Merge "Add junitOutput flag to virtlet tests"
diff --git a/tcp_tests/fixtures/salt_fixtures.py b/tcp_tests/fixtures/salt_fixtures.py
index aff28dc..58c8509 100644
--- a/tcp_tests/fixtures/salt_fixtures.py
+++ b/tcp_tests/fixtures/salt_fixtures.py
@@ -35,7 +35,7 @@
 @pytest.mark.revert_snapshot(ext.SNAPSHOT.salt_deployed)
 @pytest.fixture(scope='function')
 def salt_deployed(revert_snapshot, request, config,
-                  hardware, underlay, salt_actions):
+                  hardware, underlay, salt_actions, grab_versions, snapshot):
     """Fixture to get or install salt service on environment
 
     :param revert_snapshot: fixture that reverts snapshot that is specified
diff --git a/tcp_tests/fixtures/underlay_fixtures.py b/tcp_tests/fixtures/underlay_fixtures.py
index eacbec9..7502df2 100644
--- a/tcp_tests/fixtures/underlay_fixtures.py
+++ b/tcp_tests/fixtures/underlay_fixtures.py
@@ -110,7 +110,7 @@
     return None
 
 
-@pytest.fixture(scope='function', autouse=True)
+@pytest.fixture(scope='function')
 def snapshot(request, hardware):
     """Fixture for creating snapshot at the end of test if it's needed
 
@@ -198,7 +198,7 @@
     return underlay
 
 
-@pytest.fixture(scope='function', autouse=True)
+@pytest.fixture(scope='function')
 def grab_versions(request, func_name, underlay):
     """Fixture for grab package versions at the end of test
 
diff --git a/tcp_tests/managers/jenkins/__init__.py b/tcp_tests/managers/jenkins/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tcp_tests/managers/jenkins/__init__.py
diff --git a/tcp_tests/managers/jenkins/client.py b/tcp_tests/managers/jenkins/client.py
new file mode 100644
index 0000000..f781305
--- /dev/null
+++ b/tcp_tests/managers/jenkins/client.py
@@ -0,0 +1,65 @@
+import time
+
+import jenkins
+
+from devops.helpers import helpers
+
+
+class JenkinsClient(object):
+
+    def __init__(self, host=None, username=None, password=None):
+        host = host or 'http://172.16.44.33:8081'
+        username = username or 'admin'
+        password = password or 'r00tme'
+        self.__client = jenkins.Jenkins(
+            host,
+            username=username,
+            password=password)
+
+    def jobs(self):
+        return self.__client.get_jobs()
+
+    def find_jobs(self, name):
+        return filter(lambda x: name in x['fullname'], self.jobs())
+
+    def job_info(self, name):
+        return self.__client.get_job_info(name)
+
+    def list_builds(self, name):
+        return self.job_info(name).get('builds')
+
+    def build_info(self, name, build_id):
+        return self.__client.get_build_info(name, build_id)
+
+    def job_params(self, name):
+        job = self.job_info(name)
+        job_params = next(
+            p for p in job['property'] if
+            'hudson.model.ParametersDefinitionProperty' == p['_class'])
+        job_params = job_params['parameterDefinitions']
+        return job_params
+
+    def make_defults_params(self, name):
+        job_params = self.job_params(name)
+        def_params = dict(
+            [(j['name'], j['defaultParameterValue']['value'])
+             for j in job_params])
+        return def_params
+
+    def run_build(self, name, params=None):
+        params = params or self.make_defults_params(name)
+        self.__client.build_job(name, params)
+        time.sleep(10)  # wait while jobs started:
+        build_id = self.job_info(name)['lastBuild']['number']
+        return name, build_id
+
+    def wait_end_of_build(self, name, build_id, timeout=600):
+
+        def building():
+            return not self.build_info(name, build_id)['building']
+
+        helpers.wait(
+            building,
+            timeout=timeout,
+            timeout_msg='Timeout waiting, job {0} are not finished "{1}" build'
+                        ' still'.format(name, build_id))
diff --git a/tcp_tests/managers/openstack_manager.py b/tcp_tests/managers/openstack_manager.py
index 2780d9b..bac459a 100644
--- a/tcp_tests/managers/openstack_manager.py
+++ b/tcp_tests/managers/openstack_manager.py
@@ -12,6 +12,7 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 import os
+import requests
 
 from tcp_tests.managers.execute_commands import ExecuteCommandsMixin
 from tcp_tests import logger
@@ -39,6 +40,55 @@
         self.execute_commands(commands,
                               label='Install OpenStack services')
         self.__config.openstack.openstack_installed = True
+        h_data = self.get_horizon_data()
+        self.__config.openstack.horizon_host = h_data['horizon_host']
+        self.__config.openstack.horizon_port = h_data['horizon_port']
+        self.__config.openstack.horizon_user = h_data['horizon_user']
+        self.__config.openstack.horizon_password = h_data['horizon_password']
+        self.auth_in_horizon(
+            h_data['horizon_host'],
+            h_data['horizon_port'],
+            h_data['horizon_user'],
+            h_data['horizon_password'])
+
+    def get_horizon_data(self):
+        horizon_data = {}
+        tgt = 'I@nginx:server and not cfg*'
+        pillar_host = ('nginx:server:site:nginx_ssl_redirect'
+                       '_openstack_web:host:name')
+        pillar_port = ('nginx:server:site:nginx_ssl_redirect'
+                       '_openstack_web:host:port')
+        hosts = self._salt.get_pillar(tgt=tgt, pillar=pillar_host)
+        host = set([ip for item in hosts for node, ip
+                    in item.items() if ip])
+        if host:
+            host = host.pop()
+        ports = self._salt.get_pillar(tgt=tgt, pillar=pillar_port)
+
+        port = set([port for item in ports for node, port
+                    in item.items() if port])
+        if port:
+            port = port.pop()
+        tgt = 'I@keystone:server and ctl01*'
+        pillar_user = 'keystone:server:admin_name'
+        pillar_password = 'keystone:server:admin_password'
+        users = self._salt.get_pillar(tgt=tgt, pillar=pillar_user)
+        user = set([user for item in users for node, user
+                    in item.items() if user])
+        if user:
+            user = user.pop()
+        passwords = self._salt.get_pillar(tgt=tgt, pillar=pillar_password)
+        pwd = set([pwd for item in passwords for node, pwd
+                   in item.items() if pwd])
+        if pwd:
+            pwd = pwd.pop()
+        horizon_data.update({'horizon_host': host})
+        horizon_data.update({'horizon_port': port})
+        horizon_data.update({'horizon_user': user})
+        horizon_data.update({'horizon_password': pwd})
+        LOG.info("Data from pillars {}".format(horizon_data))
+
+        return horizon_data
 
     def run_tempest(
             self,
@@ -121,3 +171,37 @@
         LOG.info('Reboot (warm restart) nodes {0}'.format(node_names))
         self.warm_shutdown_openstack_nodes(node_names, timeout=timeout)
         self.warm_start_nodes(node_names)
+
+    def auth_in_horizon(self, host, port, user, password):
+        client = requests.session()
+        url = "http://{0}:{1}".format(
+            self.__config.openstack.horizon_host,
+            self.__config.openstack.horizon_port)
+        # Retrieve the CSRF token first
+        client.get(url, verify=False)  # sets cookie
+        if not len(client.cookies):
+            login_data = dict(
+                username=self.__config.openstack.horizon_user,
+                password=self.__config.openstack.horizon_password,
+                next='/')
+            resp = client.post(url, data=login_data,
+                               headers=dict(Referer=url), verify=False)
+            LOG.debug("Horizon resp {}".format(resp))
+            assert 200 == resp.status_code, ("Failed to auth in "
+                                             "horizon. Response "
+                                             "{0}".format(resp.status_code))
+        else:
+            login_data = dict(
+                username=self.__config.openstack.horizon_user,
+                password=self.__config.openstack.horizon_password,
+                next='/')
+            csrftoken = client.cookies.get('csrftoken', None)
+            if csrftoken:
+                login_data['csrfmiddlewaretoken'] = csrftoken
+
+            resp = client.post(url, data=login_data,
+                               headers=dict(Referer=url), verify=False)
+            LOG.debug("Horizon resp {}".format(resp))
+            assert 200 == resp.status_code, ("Failed to auth in "
+                                             "horizon. Response "
+                                             "{0}".format(resp.status_code))
diff --git a/tcp_tests/managers/rallymanager.py b/tcp_tests/managers/rallymanager.py
index fc56afa..ae72941 100644
--- a/tcp_tests/managers/rallymanager.py
+++ b/tcp_tests/managers/rallymanager.py
@@ -173,7 +173,8 @@
     # Updated to replace the OpenStackManager method run_tempest
     def run_tempest(self, conf_name='/var/lib/lvm_mcp.conf',
                     pattern='set=smoke', concurrency=0, timeout=None,
-                    report_prefix='', report_types=None):
+                    report_prefix='', report_types=None,
+                    designate_plugin=True):
         """Run tempest tests
 
         :param conf_name: tempest config placed in the rally container
@@ -182,6 +183,7 @@
                             to take the amount of the cores on the node
                             <self._node_name>.
         :param timeout: stop tempest tests after specified timeout.
+        :param designate_plugin: enabled by default plugin for designate
         :param report_prefix: str, prefix for report filenames. Usually the
                               output of the fixture 'func_name'
         :param report_types: list of the report types that need to download
@@ -189,23 +191,36 @@
                              None by default.
         """
         report_types = report_types or []
-
-        cmd = (
-            "cat > /root/rally/install_tempest.sh << EOF\n"
-            "rally verify create-verifier"
-            "  --type tempest "
-            "  --name tempest-verifier"
-            "  --source /var/lib/tempest"
-            "  --version {tempest_tag}"
-            "  --system-wide\n"
-            "rally verify add-verifier-ext"
-            "  --source /var/lib/designate-tempest-plugin"
-            "  --version {designate_tag}\n"
-            "rally verify configure-verifier --extend {tempest_conf}\n"
-            "rally verify configure-verifier --show\n"
-            "EOF".format(tempest_tag=self.tempest_tag,
-                         designate_tag=self.designate_tag,
-                         tempest_conf=conf_name))
+        if not designate_plugin:
+            cmd = (
+                "cat > /root/rally/install_tempest.sh << EOF\n"
+                "rally verify create-verifier"
+                "  --type tempest "
+                "  --name tempest-verifier"
+                "  --source /var/lib/tempest"
+                "  --version {tempest_tag}"
+                "  --system-wide\n"
+                "rally verify configure-verifier --extend {tempest_conf}\n"
+                "rally verify configure-verifier --show\n"
+                "EOF".format(tempest_tag=self.tempest_tag,
+                             tempest_conf=conf_name))
+        else:
+            cmd = (
+                "cat > /root/rally/install_tempest.sh << EOF\n"
+                "rally verify create-verifier"
+                "  --type tempest "
+                "  --name tempest-verifier"
+                "  --source /var/lib/tempest"
+                "  --version {tempest_tag}"
+                "  --system-wide\n"
+                "rally verify add-verifier-ext"
+                "  --source /var/lib/designate-tempest-plugin"
+                "  --version {designate_tag}\n"
+                "rally verify configure-verifier --extend {tempest_conf}\n"
+                "rally verify configure-verifier --show\n"
+                "EOF".format(tempest_tag=self.tempest_tag,
+                             designate_tag=self.designate_tag,
+                             tempest_conf=conf_name))
         with self._underlay.remote(node_name=self._node_name) as remote:
             LOG.info("Create install_tempest.sh")
             remote.check_call(cmd)
diff --git a/tcp_tests/managers/saltmanager.py b/tcp_tests/managers/saltmanager.py
index 1ff5324..06e7d0b 100644
--- a/tcp_tests/managers/saltmanager.py
+++ b/tcp_tests/managers/saltmanager.py
@@ -38,14 +38,15 @@
         'runStates': 'run_states',
     }
 
-    def __init__(self, config, underlay, host=None, port='6969'):
+    def __init__(self, config, underlay, host=None, port='6969',
+                 username=None, password=None):
         self.__config = config
         self.__underlay = underlay
         self.__port = port
         self.__host = host
         self.__api = None
-        self.__user = settings.SALT_USER
-        self.__password = settings.SALT_PASSWORD
+        self.__user = username or settings.SALT_USER
+        self.__password = password or settings.SALT_PASSWORD
         self._salt = self
 
         super(SaltManager, self).__init__(config=config, underlay=underlay)
@@ -60,6 +61,10 @@
         self.execute_commands(commands=commands,
                               label="Install and configure salt")
 
+    def change_creds(self, username, password):
+        self.__user = username
+        self.__password = password
+
     @property
     def port(self):
         return self.__port
diff --git a/tcp_tests/managers/sl_manager.py b/tcp_tests/managers/sl_manager.py
index a1ee3a7..68ff0de 100644
--- a/tcp_tests/managers/sl_manager.py
+++ b/tcp_tests/managers/sl_manager.py
@@ -93,8 +93,8 @@
         target_node_name = [node_name for node_name
                             in self.__underlay.node_names()
                             if node_to_run in node_name]
-        cmd = ("cd {0}; "
-               ". venv-stacklight-pytest/bin/activate;"
+        cmd = (". venv-stacklight-pytest/bin/activate;"
+               "cd {0}; "
                "export VOLUME_STATUS='available';"
                "pytest -k {1} {2}".format(
                    tests_path,
@@ -105,7 +105,7 @@
                 as node_remote:
             LOG.debug("Run {0} on the node {1}".format(
                 cmd, target_node_name[0]))
-            result = node_remote.execute(cmd)
+            result = node_remote.check_call(cmd, verbose=True)
             LOG.debug("Test execution result is {}".format(result))
         return result
 
@@ -114,8 +114,8 @@
         target_node_name = [node_name for node_name
                             in self.__underlay.node_names()
                             if node_to_run in node_name]
-        cmd = ("cd {0}; "
-               ". venv-stacklight-pytest/bin/activate;"
+        cmd = (". venv-stacklight-pytest/bin/activate;"
+               "cd {0}; "
                "export VOLUME_STATUS='available';"
                "pip install pytest-json;"
                "pytest --json=report.json -k {1} {2}".format(
@@ -127,9 +127,9 @@
                 as node_remote:
             LOG.debug("Run {0} on the node {1}".format(
                 cmd, target_node_name[0]))
-            node_remote.execute(cmd)
-            res = node_remote.execute('cd {0}; cat report.json'.format(
-                tests_path))
+            node_remote.check_call(cmd, verbose=True)
+            res = node_remote.check_call('cd {0}; cat report.json'.format(
+                tests_path), verbose=True)
             LOG.debug("Test execution result is {}".format(res['stdout']))
             result = json.loads(res['stdout'][0])
         return result['report']['tests']
diff --git a/tcp_tests/managers/underlay_ssh_manager.py b/tcp_tests/managers/underlay_ssh_manager.py
index 7d3da96..5194239 100644
--- a/tcp_tests/managers/underlay_ssh_manager.py
+++ b/tcp_tests/managers/underlay_ssh_manager.py
@@ -247,13 +247,16 @@
         """
         ssh_data = self.__ssh_data(node_name=node_name, host=host,
                                    address_pool=address_pool)
+        ssh_auth = ssh_client.SSHAuth(
+            username=ssh_data['login'],
+            password=ssh_data['password'],
+            keys=[rsakey.RSAKey(file_obj=StringIO.StringIO(key))
+                  for key in ssh_data['keys']])
+
         return ssh_client.SSHClient(
             host=ssh_data['host'],
             port=ssh_data['port'] or 22,
-            username=ssh_data['login'],
-            password=ssh_data['password'],
-            private_keys=[rsakey.RSAKey(file_obj=StringIO.StringIO(key))
-                          for key in ssh_data['keys']])
+            auth=ssh_auth)
 
     def local(self):
         """Get Subprocess instance for local operations like:
diff --git a/tcp_tests/requirements.txt b/tcp_tests/requirements.txt
index 381be12..b06b805 100644
--- a/tcp_tests/requirements.txt
+++ b/tcp_tests/requirements.txt
@@ -19,3 +19,4 @@
 setuptools<=36.2.0
 netaddr
 mock>=1.2
+python-jenkins
diff --git a/tcp_tests/settings.py b/tcp_tests/settings.py
index a1b296a..f90000c 100644
--- a/tcp_tests/settings.py
+++ b/tcp_tests/settings.py
@@ -52,9 +52,10 @@
 SALT_PASSWORD = os.environ.get('SALT_PASSWORD', 'hovno12345!')
 
 DOCKER_REGISTRY = os.environ.get('DOCKER_REGISTRY',
-                                 'docker-prod-virtual.docker.mirantis.net')
+                                 'docker-prod-local.artifactory.mirantis.com')
 DOCKER_NAME = os.environ.get('DOCKER_NAME',
                              'mirantis/oscore/rally-tempest:latest')
+DOCKER_IMAGES_SL_TAG = os.environ.get('DOCKER_IMAGES_SL_TAG', 'latest')
 
 PATTERN = os.environ.get('PATTERN', None)
 RUN_TEMPEST = get_var_as_bool('RUN_TEMPEST', False)
diff --git a/tcp_tests/settings_oslo.py b/tcp_tests/settings_oslo.py
index dacdb22..ad4cce3 100644
--- a/tcp_tests/settings_oslo.py
+++ b/tcp_tests/settings_oslo.py
@@ -174,6 +174,14 @@
     ct.Cfg('openstack_steps_path', ct.String(),
            help="Path to YAML with steps to deploy openstack",
            default=_default_openstack_steps),
+    ct.Cfg('horizon_host', ct.IPAddress(),
+           help="", default='0.0.0.0'),
+    ct.Cfg('horizon_port', ct.String(),
+           help="", default='5000'),
+    ct.Cfg('horizon_user', ct.String(),
+           help="", default='admin'),
+    ct.Cfg('horizon_password', ct.String(),
+           help="", default='workshop'),
 ]
 openstack_opts = [
     ct.Cfg('openstack_installed', ct.Boolean(),
@@ -197,20 +205,26 @@
            help="Path to YAML with steps to deploy sl",
            default=_default_sl_prepare_tests_steps_path),
     ct.Cfg('docker_image_alertmanager', ct.String(),
-           default='{}/openstack-docker/alertmanager:latest'.format(
-               settings.DOCKER_REGISTRY)),
+           default='{0}/openstack-docker/alertmanager:{1}'.format(
+               settings.DOCKER_REGISTRY, settings.DOCKER_IMAGES_SL_TAG)),
     ct.Cfg('docker_image_pushgateway', ct.String(),
-           default='{}/openstack-docker/pushgateway:latest'.format(
-               settings.DOCKER_REGISTRY)),
+           default='{0}/openstack-docker/pushgateway:{1}'.format(
+               settings.DOCKER_REGISTRY, settings.DOCKER_IMAGES_SL_TAG)),
     ct.Cfg('docker_image_prometheus', ct.String(),
-           default='{}/openstack-docker/prometheus:latest'.format(
-               settings.DOCKER_REGISTRY)),
+           default='{0}/openstack-docker/prometheus:{1}'.format(
+               settings.DOCKER_REGISTRY, settings.DOCKER_IMAGES_SL_TAG)),
     ct.Cfg('docker_image_remote_agent', ct.String(),
-           default='{}/openstack-docker/telegraf:latest'.format(
-               settings.DOCKER_REGISTRY)),
+           default='{0}/openstack-docker/telegraf:{1}'.format(
+               settings.DOCKER_REGISTRY, settings.DOCKER_IMAGES_SL_TAG)),
     ct.Cfg('docker_image_remote_storage_adapter', ct.String(),
-           default='{}/openstack-docker/remote_storage_adapter:latest'.format(
-               settings.DOCKER_REGISTRY)),
+           default='{0}/openstack-docker/remote_storage_adapter:{1}'.format(
+               settings.DOCKER_REGISTRY, settings.DOCKER_IMAGES_SL_TAG)),
+    ct.Cfg('docker_image_prometheus_relay', ct.String(),
+           default='{0}/openstack-docker/prometheus_relay:{1}'.format(
+               settings.DOCKER_REGISTRY, settings.DOCKER_IMAGES_SL_TAG)),
+    ct.Cfg('docker_image_grafana', ct.String(),
+           default='{0}/mirantis/external/grafana:{1}'.format(
+               settings.DOCKER_REGISTRY, settings.DOCKER_IMAGES_SL_TAG)),
     # SalesForce connection options for pushkin
     ct.Cfg('sfdc_sandbox_enabled', ct.String(), default='False'),
     ct.Cfg('sfdc_auth_url', ct.String(), default=''),
diff --git a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/openstack.yaml b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/openstack.yaml
index 85e530d..342e4c8 100644
--- a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/openstack.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/openstack.yaml
@@ -171,7 +171,7 @@
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
-  skip_fail: true
+  skip_fail: false
 
 - description: Check IP on computes
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
diff --git a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt.yaml b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt.yaml
index 019b590..bcf5dbb 100644
--- a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt.yaml
@@ -58,6 +58,8 @@
 
 {{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
 
+{{ SHARED.ADJUST_SL_OPTS(OVERRIDES_FILENAME='/srv/salt/reclass/classes/environment/' + ENVIRONMENT_MODEL_INVENTORY_NAME + '/overrides.yml') }}
+
 {{ SHARED.MACRO_GENERATE_INVENTORY() }}
 
 {{ SHARED.MACRO_NETWORKING_WORKAROUNDS() }}
diff --git a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/sl.yaml b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/sl.yaml
index 41bfc9a..ed3636c 100644
--- a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/sl.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/sl.yaml
@@ -18,105 +18,105 @@
   skip_fail: false
 
 - description: Configure docker service
-  cmd: salt -C 'I@docker:swarm' state.sls docker.host
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls docker.host
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: Install docker swarm on master node
-  cmd: salt -C 'I@docker:swarm:role:master' state.sls docker.swarm
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: Send grains to the swarm slave nodes
-  cmd: salt -C 'I@docker:swarm' state.sls salt.minion.grains
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls salt.minion.grains
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description:  Update mine
-  cmd: salt -C 'I@docker:swarm' mine.update
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' mine.update
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description:  Refresh modules
-  cmd: salt -C 'I@docker:swarm' saltutil.refresh_modules; sleep 5;
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' saltutil.refresh_modules; sleep 5;
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description:  Rerun swarm on slaves to proper token population
-  cmd: salt -C 'I@docker:swarm:role:master' state.sls docker.swarm
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description:  Configure slave nodes
-  cmd: salt -C 'I@docker:swarm:role:manager' state.sls docker.swarm -b 1
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:manager' state.sls docker.swarm -b 1
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description:  List registered Docker swarm nodes
-  cmd: salt -C 'I@docker:swarm:role:master' cmd.run 'docker node ls'
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' cmd.run 'docker node ls'
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 # Install slv2 infra
 - description: Install telegraf
-  cmd: salt -C 'I@telegraf:agent or I@telegraf:remote_agent' state.sls telegraf
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@telegraf:agent or I@telegraf:remote_agent' state.sls telegraf
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 2, delay: 10}
   skip_fail: false
 
 - description: Configure Prometheus exporters, if pillar 'prometheus:exporters' exists on any server
   cmd: |
-    if salt -C 'I@prometheus:exporters' match.pillar 'prometheus:exporters' ; then
-      salt -C 'I@prometheus:exporters' state.sls prometheus
+    if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' match.pillar 'prometheus:exporters' ; then
+      salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' state.sls prometheus
     fi
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: Configure fluentd
-  cmd: salt -C 'I@fluentd:agent' state.sls fluentd
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@fluentd:agent' state.sls fluentd
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: Install elasticsearch server
-  cmd: salt -C 'I@elasticsearch:server' state.sls elasticsearch.server -b 1
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:server' state.sls elasticsearch.server -b 1
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: Install kibana server
-  cmd: salt -C 'I@kibana:server' state.sls kibana.server -b 1
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:server' state.sls kibana.server -b 1
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: Install elasticsearch client
-  cmd: salt -C 'I@elasticsearch:client' state.sls elasticsearch.client
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:client' state.sls elasticsearch.client
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: Install kibana client
-  cmd: salt -C 'I@kibana:client' state.sls kibana.client
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:client' state.sls kibana.client
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: Check influix db
   cmd: |
-    INFLUXDB_SERVICE=`salt -C 'I@influxdb:server' test.ping 1>/dev/null 2>&1 && echo true`;
+    INFLUXDB_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' test.ping 1>/dev/null 2>&1 && echo true`;
     echo "Influxdb service presence: ${INFLUXDB_SERVICE}";
     if [[ "$INFLUXDB_SERVICE" == "true" ]]; then
-        salt -C 'I@influxdb:server' state.sls influxdb
+        salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' state.sls influxdb
     fi
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
@@ -125,44 +125,44 @@
 # Collect grains needed to configure the services
 
 - description: Get grains
-  cmd: salt -C 'I@salt:minion' state.sls salt.minion.grains
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' state.sls salt.minion.grains
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: Sync modules
-  cmd: salt -C 'I@salt:minion' saltutil.refresh_modules
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' saltutil.refresh_modules
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: Update mine
-  cmd: salt -C 'I@salt:minion' mine.update; sleep 5;
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' mine.update; sleep 5;
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 # Configure the services running in Docker Swarm
 - description: Install prometheus alertmanager
-  cmd: salt -C 'I@docker:swarm' state.sls prometheus -b 1
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:server' state.sls prometheus -b 1
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 2, delay: 10}
   skip_fail: false
 
 - description: run docker state
-  cmd: salt -C 'I@docker:swarm:role:master' state.sls docker
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 2, delay: 120}
   skip_fail: false
 
 - description: docker ps
-  cmd: sleep 120; salt -C 'I@docker:swarm' dockerng.ps
+  cmd: sleep 120; salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' dockerng.ps
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: Configure Grafana dashboards and datasources
-  cmd: sleep 60;  salt -C 'I@grafana:client' state.sls grafana.client
+  cmd: sleep 60;  salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@grafana:client' state.sls grafana.client
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 2, delay: 10}
   skip_fail: false
diff --git a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/underlay--user-data-cfg01.yaml
index a910fa8..6026c30 100644
--- a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/underlay--user-data-cfg01.yaml
@@ -18,8 +18,8 @@
    expire: False
 
   bootcmd:
-   # Block access to SSH while node is preparing
-   - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
+   #   # Block access to SSH while node is preparing
+   #   - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
    # Enable root access
    - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
    - service sshd restart
@@ -49,36 +49,36 @@
 
    ############## TCP Cloud cfg01 node ##################
    #- sleep 120
-   - echo "Preparing base OS"
+   #   - echo "Preparing base OS"
 
    - echo "nameserver 172.18.208.44" > /etc/resolv.conf;
-   - echo "nameserver 8.8.8.8" >> /etc/resolv.conf;
-   - which wget >/dev/null || (apt-get update; apt-get install -y wget);
+   #   - echo "nameserver 8.8.8.8" >> /etc/resolv.conf;
+   #   - which wget >/dev/null || (apt-get update; apt-get install -y wget);
 
-   # Configure Ubuntu mirrors
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
+   #   # Configure Ubuntu mirrors
+   #   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
+   #   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
+   #   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
 
-   - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
-   - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
-   - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;
-   - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;
+   #   - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
+   #   - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
+   #   - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;
+   #   - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;
 
-   - apt-get clean
-   - apt-get update
+   #   - apt-get clean
+   #   - apt-get update
 
-   # Install common packages
-   - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc
+   #   # Install common packages
+   #   - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc
 
-   # Install salt-minion and stop it until it is configured
-   - eatmydata apt-get install -y salt-minion && service salt-minion stop
+   #   # Install salt-minion and stop it until it is configured
+   #   - eatmydata apt-get install -y salt-minion && service salt-minion stop
 
-   ########################################################
-   # Node is ready, allow SSH access
-   - echo "Allow SSH access ..."
-   - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
-   ########################################################
+   #   ########################################################
+   #   # Node is ready, allow SSH access
+   #   - echo "Allow SSH access ..."
+   #   - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
+   #   ########################################################
 
   write_files:
    - path: /etc/default/grub.d/97-enable-grub-menu.cfg
diff --git a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/underlay--user-data1604-hwe.yaml b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/underlay--user-data1604-hwe.yaml
index f4d3775..a0b1c88 100644
--- a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/underlay--user-data1604-hwe.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/underlay--user-data1604-hwe.yaml
@@ -18,8 +18,8 @@
    expire: False
 
   bootcmd:
-   # Block access to SSH while node is preparing
-   - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
+   #   # Block access to SSH while node is preparing
+   #   - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
    # Enable root access
    - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
    - service sshd restart
@@ -50,38 +50,38 @@
 
    ############## TCP Cloud cfg01 node ##################
    #- sleep 120
-   - echo "Preparing base OS"
+   #   - echo "Preparing base OS"
    - echo "nameserver 172.18.208.44" > /etc/resolv.conf;
-   - echo "nameserver 8.8.8.8" >> /etc/resolv.conf;
-   - which wget >/dev/null || (apt-get update; apt-get install -y wget)
+   #   - echo "nameserver 8.8.8.8" >> /etc/resolv.conf;
+   #   - which wget >/dev/null || (apt-get update; apt-get install -y wget)
 
-   # Configure Ubuntu mirrors
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
+   #   # Configure Ubuntu mirrors
+   #   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
+   #   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
+   #   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
 
-   - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
-   - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
-   - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;
-   - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;
+   #   - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
+   #   - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
+   #   - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;
+   #   - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;
 
-   - apt-get clean
-   - eatmydata apt-get update && apt-get -y upgrade
+   #   - apt-get clean
+   #   - eatmydata apt-get update && apt-get -y upgrade
 
-   # Install common packages
-   - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc
+   #   # Install common packages
+   #   - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc
 
-   # Install salt-minion and stop it until it is configured
-   - eatmydata apt-get install -y salt-minion && service salt-minion stop
+   #   # Install salt-minion and stop it until it is configured
+   #   - eatmydata apt-get install -y salt-minion && service salt-minion stop
 
-   # Install latest kernel
-   - eatmydata apt-get install -y {{ os_env('LINUX_KERNEL_HWE_PACKAGE_NAME', 'linux-image-extra-4.10.0-42-generic') }}
+   #   # Install latest kernel
+   #   - eatmydata apt-get install -y {{ os_env('LINUX_KERNEL_HWE_PACKAGE_NAME', 'linux-image-extra-4.10.0-42-generic') }}
 
    ########################################################
    # Node is ready, allow SSH access
    #- echo "Allow SSH access ..."
    #- sudo iptables -D INPUT -p tcp --dport 22 -j DROP
-   - reboot
+   #   - reboot
    ########################################################
 
   write_files:
diff --git a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/underlay--user-data1604.yaml b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/underlay--user-data1604.yaml
index 1176d0d..49f28dd 100644
--- a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/underlay--user-data1604.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/underlay--user-data1604.yaml
@@ -18,8 +18,8 @@
    expire: False
 
   bootcmd:
-   # Block access to SSH while node is preparing
-   - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
+   #   # Block access to SSH while node is preparing
+   #   - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
    # Enable root access
    - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
    - service sshd restart
@@ -50,34 +50,34 @@
 
    ############## TCP Cloud cfg01 node ##################
    #- sleep 120
-   - echo "Preparing base OS"
+   #   - echo "Preparing base OS"
    - echo "nameserver 172.18.208.44" > /etc/resolv.conf;
-   - echo "nameserver 8.8.8.8" >> /etc/resolv.conf;
-   - which wget >/dev/null || (apt-get update; apt-get install -y wget)
+   #   - echo "nameserver 8.8.8.8" >> /etc/resolv.conf;
+   #   - which wget >/dev/null || (apt-get update; apt-get install -y wget)
 
-   # Configure Ubuntu mirrors
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
+   #   # Configure Ubuntu mirrors
+   #   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
+   #   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
+   #   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
 
-   - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
-   - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
-   - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;
-   - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;
+   #   - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
+   #   - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
+   #   - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;
+   #   - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;
 
-   - apt-get clean
-   - eatmydata apt-get update && apt-get -y upgrade
+   #   - apt-get clean
+   #   - eatmydata apt-get update && apt-get -y upgrade
 
-   # Install common packages
-   - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc
+   #   # Install common packages
+   #   - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc
 
-   # Install salt-minion and stop it until it is configured
-   - eatmydata apt-get install -y salt-minion && service salt-minion stop
+   #   # Install salt-minion and stop it until it is configured
+   #   - eatmydata apt-get install -y salt-minion && service salt-minion stop
 
    ########################################################
    # Node is ready, allow SSH access
-   - echo "Allow SSH access ..."
-   - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
+   #   - echo "Allow SSH access ..."
+   #   - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
    ########################################################
 
   write_files:
diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/openstack.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/openstack.yaml
index d20e7c2..0de3fd7 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/openstack.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/openstack.yaml
@@ -211,7 +211,7 @@
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
-  skip_fail: true
+  skip_fail: false
 
 - description: Check IP on computes
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/salt.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/salt.yaml
index f3e121d..dd847c4 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/salt.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/salt.yaml
@@ -76,6 +76,8 @@
 
 {{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
 
+{{ SHARED.ADJUST_SL_OPTS(OVERRIDES_FILENAME='/srv/salt/reclass/classes/environment/' + ENVIRONMENT_MODEL_INVENTORY_NAME + '/overrides.yml') }}
+
 - description: "Workaround for rack01 compute generator"
   cmd: |
     set -e;
diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/sl.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/sl.yaml
index 9e14bf2..8d131b7 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/sl.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/sl.yaml
@@ -18,105 +18,105 @@
   skip_fail: false
 
 - description: Configure docker service
-  cmd: salt -C 'I@docker:swarm' state.sls docker.host
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls docker.host
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: Install docker swarm on master node
-  cmd: salt -C 'I@docker:swarm:role:master' state.sls docker.swarm
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: Send grains to the swarm slave nodes
-  cmd: salt -C 'I@docker:swarm' state.sls salt.minion.grains
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls salt.minion.grains
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description:  Update mine
-  cmd: salt -C 'I@docker:swarm' mine.update
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' mine.update
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description:  Refresh modules
-  cmd: salt -C 'I@docker:swarm' saltutil.refresh_modules; sleep 5;
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' saltutil.refresh_modules; sleep 5;
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description:  Rerun swarm on slaves to proper token population
-  cmd: salt -C 'I@docker:swarm:role:master' state.sls docker.swarm
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description:  Configure slave nodes
-  cmd: salt -C 'I@docker:swarm:role:manager' state.sls docker.swarm -b 1
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:manager' state.sls docker.swarm -b 1
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description:  List registered Docker swarm nodes
-  cmd: salt -C 'I@docker:swarm:role:master' cmd.run 'docker node ls'
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' cmd.run 'docker node ls'
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 # Install slv2 infra
 - description: Install telegraf
-  cmd: salt -C 'I@telegraf:agent or I@telegraf:remote_agent' state.sls telegraf
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@telegraf:agent or I@telegraf:remote_agent' state.sls telegraf
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 2, delay: 10}
   skip_fail: false
 
 - description: Configure Prometheus exporters, if pillar 'prometheus:exporters' exists on any server
   cmd: |
-    if salt -C 'I@prometheus:exporters' match.pillar 'prometheus:exporters' ; then
-      salt -C 'I@prometheus:exporters' state.sls prometheus
+    if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' match.pillar 'prometheus:exporters' ; then
+      salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' state.sls prometheus
     fi
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: Configure collector
-  cmd: salt -C 'I@heka:log_collector' state.sls heka.log_collector
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:log_collector' state.sls heka.log_collector
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: Install elasticsearch server
-  cmd: salt -C 'I@elasticsearch:server' state.sls elasticsearch.server -b 1
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:server' state.sls elasticsearch.server -b 1
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: Install kibana server
-  cmd: salt -C 'I@kibana:server' state.sls kibana.server -b 1
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:server' state.sls kibana.server -b 1
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: Install elasticsearch client
-  cmd: salt -C 'I@elasticsearch:client' state.sls elasticsearch.client
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:client' state.sls elasticsearch.client
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: Install kibana client
-  cmd: salt -C 'I@kibana:client' state.sls kibana.client
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:client' state.sls kibana.client
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: Check influix db
   cmd: |
-    INFLUXDB_SERVICE=`salt -C 'I@influxdb:server' test.ping 1>/dev/null 2>&1 && echo true`;
+    INFLUXDB_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' test.ping 1>/dev/null 2>&1 && echo true`;
     echo "Influxdb service presence: ${INFLUXDB_SERVICE}";
     if [[ "$INFLUXDB_SERVICE" == "true" ]]; then
-        salt -C 'I@influxdb:server' state.sls influxdb
+        salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' state.sls influxdb
     fi
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
@@ -125,44 +125,44 @@
 # Collect grains needed to configure the services
 
 - description: Get grains
-  cmd: salt -C 'I@salt:minion' state.sls salt.minion.grains
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' state.sls salt.minion.grains
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: Sync modules
-  cmd: salt -C 'I@salt:minion' saltutil.refresh_modules
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' saltutil.refresh_modules
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: Update mine
-  cmd: salt -C 'I@salt:minion' mine.update; sleep 5;
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' mine.update; sleep 5;
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 # Configure the services running in Docker Swarm
 - description: Install prometheus alertmanager
-  cmd: salt -C 'I@docker:swarm' state.sls prometheus,heka.remote_collector -b 1
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls prometheus,heka.remote_collector -b 1
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 2, delay: 10}
   skip_fail: false
 
 - description: run docker state
-  cmd: salt -C 'I@docker:swarm:role:master' state.sls docker
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 2, delay: 120}
   skip_fail: false
 
 - description: docker ps
-  cmd: sleep 120; salt -C 'I@docker:swarm' dockerng.ps
+  cmd: sleep 120; salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' dockerng.ps
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: Configure Grafana dashboards and datasources
-  cmd: sleep 60;  salt -C 'I@grafana:client' state.sls grafana.client
+  cmd: sleep 60;  salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@grafana:client' state.sls grafana.client
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 2, delay: 10}
   skip_fail: false
diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/underlay--user-data-cfg01.yaml
index c6314ad..2bb48f0 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/underlay--user-data-cfg01.yaml
@@ -18,8 +18,8 @@
    expire: False

 

   bootcmd:

-   # Block access to SSH while node is preparing

-   - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP

+   #   # Block access to SSH while node is preparing

+   #   - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP

    # Enable root access

    - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config

    - service sshd restart

@@ -49,35 +49,35 @@
 

    ############## TCP Cloud cfg01 node ##################

    #- sleep 120

-   - echo "Preparing base OS"

+   #   - echo "Preparing base OS"

 

    - echo "nameserver 172.18.208.44" > /etc/resolv.conf;

-   - echo "nameserver 8.8.8.8" >> /etc/resolv.conf;

-   - which wget >/dev/null || (apt-get update; apt-get install -y wget);

+   #   - echo "nameserver 8.8.8.8" >> /etc/resolv.conf;

+   #   - which wget >/dev/null || (apt-get update; apt-get install -y wget);

 

    # Configure Ubuntu mirrors
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
+   #   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
+   #   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
+   #   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
 
-   - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;

-   - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;

-   - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;

-   - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;

+   #   - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;

+   #   - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;

+   #   - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;

+   #   - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;

 

-   - apt-get clean

-   - apt-get update

+   #   - apt-get clean

+   #   - apt-get update

 

    # Install common packages

-   - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc

+   #   - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc

 

    # Install salt-minion and stop it until it is configured

-   - eatmydata apt-get install -y salt-minion && service salt-minion stop

+   #   - eatmydata apt-get install -y salt-minion && service salt-minion stop

 

    ########################################################

    # Node is ready, allow SSH access

-   - echo "Allow SSH access ..."

-   - sudo iptables -D INPUT -p tcp --dport 22 -j DROP

+   #   - echo "Allow SSH access ..."

+   #   - sudo iptables -D INPUT -p tcp --dport 22 -j DROP

    ########################################################

 

   write_files:

diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/underlay--user-data1604-hwe.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/underlay--user-data1604-hwe.yaml
index e755c22..07a6936 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/underlay--user-data1604-hwe.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/underlay--user-data1604-hwe.yaml
@@ -18,8 +18,8 @@
    expire: False

 

   bootcmd:

-   # Block access to SSH while node is preparing

-   - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP

+   #   # Block access to SSH while node is preparing

+   #   - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP

    # Enable root access

    - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config

    - service sshd restart

@@ -50,38 +50,38 @@
 

    ############## TCP Cloud cfg01 node ##################

    #- sleep 120

-   - echo "Preparing base OS"

+   #   - echo "Preparing base OS"

    - echo "nameserver 172.18.208.44" > /etc/resolv.conf;

-   - echo "nameserver 8.8.8.8" >> /etc/resolv.conf;

-   - which wget >/dev/null || (apt-get update; apt-get install -y wget)

+   #   - echo "nameserver 8.8.8.8" >> /etc/resolv.conf;

+   #   - which wget >/dev/null || (apt-get update; apt-get install -y wget)

 

    # Configure Ubuntu mirrors
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
+   #   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
+   #   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
+   #   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
 
-   - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;

-   - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;

-   - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;

-   - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;

+   #   - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;

+   #   - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;

+   #   - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;

+   #   - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;

 

-   - apt-get clean

-   - eatmydata apt-get update && apt-get -y upgrade

+   #   - apt-get clean

+   #   - eatmydata apt-get update && apt-get -y upgrade

 

    # Install common packages

-   - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc

+   #   - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc

 

    # Install salt-minion and stop it until it is configured

-   - eatmydata apt-get install -y salt-minion && service salt-minion stop

+   #   - eatmydata apt-get install -y salt-minion && service salt-minion stop

 

    # Install latest kernel

-   - eatmydata apt-get install -y {{ os_env('LINUX_KERNEL_HWE_PACKAGE_NAME', 'linux-image-extra-4.10.0-42-generic') }}
+   #   - eatmydata apt-get install -y {{ os_env('LINUX_KERNEL_HWE_PACKAGE_NAME', 'linux-image-extra-4.10.0-42-generic') }}
 

    ########################################################

    # Node is ready, allow SSH access

    #- echo "Allow SSH access ..."

    #- sudo iptables -D INPUT -p tcp --dport 22 -j DROP

-   - reboot

+   #   - reboot

    ########################################################

 

   write_files:

diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/underlay--user-data1604.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/underlay--user-data1604.yaml
index 4891685..e8e6345 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/underlay--user-data1604.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/underlay--user-data1604.yaml
@@ -19,7 +19,7 @@
 

   bootcmd:

    # Block access to SSH while node is preparing

-   - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP

+   #   - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP

    # Enable root access

    - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config

    - service sshd restart

@@ -50,34 +50,34 @@
 

    ############## TCP Cloud cfg01 node ##################

    #- sleep 120

-   - echo "Preparing base OS"

+   #   - echo "Preparing base OS"

    - echo "nameserver 172.18.208.44" > /etc/resolv.conf;

-   - echo "nameserver 8.8.8.8" >> /etc/resolv.conf;

-   - which wget >/dev/null || (apt-get update; apt-get install -y wget)

+   #   - echo "nameserver 8.8.8.8" >> /etc/resolv.conf;

+   #   - which wget >/dev/null || (apt-get update; apt-get install -y wget)

 

    # Configure Ubuntu mirrors
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
+   #   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
+   #   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
+   #   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
 
-   - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;

-   - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;

-   - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;

-   - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;

+   #   - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;

+   #   - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;

+   #   - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;

+   #   - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;

 

-   - apt-get clean

-   - eatmydata apt-get update && apt-get -y upgrade

+   #   - apt-get clean

+   #   - eatmydata apt-get update && apt-get -y upgrade

 

    # Install common packages

-   - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc

+   #   - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc

 

    # Install salt-minion and stop it until it is configured

-   - eatmydata apt-get install -y salt-minion && service salt-minion stop

+   #   - eatmydata apt-get install -y salt-minion && service salt-minion stop

 

    ########################################################

    # Node is ready, allow SSH access

-   - echo "Allow SSH access ..."

-   - sudo iptables -D INPUT -p tcp --dport 22 -j DROP

+   #   - echo "Allow SSH access ..."

+   #   - sudo iptables -D INPUT -p tcp --dport 22 -j DROP

    ########################################################

 

   write_files:

diff --git a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/openstack.yaml b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/openstack.yaml
index f8dd91c..8c214e3 100644
--- a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/openstack.yaml
+++ b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/openstack.yaml
@@ -179,7 +179,7 @@
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
-  skip_fail: true
+  skip_fail: false
 
 - description: Check IP on computes
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/oss.yaml b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/oss.yaml
index c69013e..f26d7ee 100644
--- a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/oss.yaml
+++ b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/oss.yaml
@@ -213,7 +213,7 @@
 #--------
 
 - description: "Waiting for Jenkins to come up in container..."
-  cmd: timeout 60 salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@jenkins:client' cmd.run
+  cmd: timeout 60 salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:client:stack:jenkins' cmd.run
       'export CICD_CONTROL_ADDRESS=$(salt-call --out=newline_values_only pillar.get _param:cluster_vip_address);
        export JENKINS_CLIENT_USER=$(salt-call --out=newline_values_only pillar.get _param:jenkins_client_user);
        export JENKINS_CLIENT_PASSWORD=$(salt-call --out=newline_values_only pillar.get _param:jenkins_client_password);
@@ -237,7 +237,7 @@
 - description: "Waiting for postgresql database to come up in container..."
 #  cmd: timeout 300 salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@postgresql:client' cmd.run
 #      'while true; do if docker service logs postgresql_db | grep -q "ready to accept"; then break; else sleep 5; fi; done'
-  cmd: timeout 300 salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@postgresql:client' cmd.run
+  cmd: timeout 300 salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:client:stack:postgresql' cmd.run
       'while true; do if docker service logs postgresql_postgresql-db | grep -q "ready to accept"; then break; else sleep 5; fi; done'
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 3, delay: 10}
@@ -247,7 +247,7 @@
                 "1. State postgresql.client cannot insert values into 'pushkin' database because it is created empty,\n"
                 "2. Container with Pushkin cannot start and fill the database scheme until state postgresql.client created users.")
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@postgresql:client' state.sls postgresql.client -b 1 &&
-    timeout 300 salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@postgresql:client' cmd.run
+    timeout 300 salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:client:stack:postgresql' cmd.run
     'export CICD_CONTROL_ADDRESS=$(salt-call --out=newline_values_only pillar.get _param:cluster_vip_address);
      while true; do curl -sf http://${CICD_CONTROL_ADDRESS}:8887/apps && break; sleep 2; done'
   node_name: {{ HOSTNAME_CFG01 }}
@@ -258,7 +258,7 @@
 #--------
 
 - description: Waiting for Rundeck to come up in container...
-  cmd: timeout 30 salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@rundeck:client' cmd.run
+  cmd: timeout 30 salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:client:stack:rundeck' cmd.run
       'export CICD_CONTROL_ADDRESS=$(salt-call --out=newline_values_only pillar.get _param:cluster_vip_address);
        while true; do curl -sf http://${CICD_CONTROL_ADDRESS}:4440 && break; sleep 2; done'
   node_name: {{ HOSTNAME_CFG01 }}
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/salt.yaml b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/salt.yaml
index bd34cc8..2a86269 100644
--- a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/salt.yaml
+++ b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/salt.yaml
@@ -25,6 +25,8 @@
 
 {{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
 
+{{ SHARED.ADJUST_SL_OPTS(OVERRIDES_FILENAME='/srv/salt/reclass/classes/environment/' + ENVIRONMENT_MODEL_INVENTORY_NAME + '/overrides.yml') }}
+
 - description: "Workaround for combined roles: remove unnecessary classes"
   cmd: |
     set -e;
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/sl.yaml b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/sl.yaml
index 62c538f..962035c 100644
--- a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/sl.yaml
+++ b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/sl.yaml
@@ -19,105 +19,105 @@
   skip_fail: false
 
 - description: Configure docker service
-  cmd: salt -C 'I@docker:swarm' state.sls docker.host
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls docker.host
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: Install docker swarm on master node
-  cmd: salt -C 'I@docker:swarm:role:master' state.sls docker.swarm
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: Send grains to the swarm slave nodes
-  cmd: salt -C 'I@docker:swarm' state.sls salt.minion.grains
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls salt.minion.grains
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description:  Update mine
-  cmd: salt -C 'I@docker:swarm' mine.update
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' mine.update
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description:  Refresh modules
-  cmd: salt -C 'I@docker:swarm' saltutil.refresh_modules; sleep 5;
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' saltutil.refresh_modules; sleep 5;
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description:  Rerun swarm on slaves to proper token population
-  cmd: salt -C 'I@docker:swarm:role:master' state.sls docker.swarm
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description:  Configure slave nodes
-  cmd: salt -C 'I@docker:swarm:role:manager' state.sls docker.swarm -b 1
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:manager' state.sls docker.swarm -b 1
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description:  List registered Docker swarm nodes
-  cmd: salt -C 'I@docker:swarm:role:master' cmd.run 'docker node ls'
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' cmd.run 'docker node ls'
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 # Install slv2 infra
 - description: Install telegraf
-  cmd: salt -C 'I@telegraf:agent or I@telegraf:remote_agent' state.sls telegraf
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@telegraf:agent or I@telegraf:remote_agent' state.sls telegraf
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 2, delay: 10}
   skip_fail: false
 
 - description: Configure Prometheus exporters, if pillar 'prometheus:exporters' exists on any server
   cmd: |
-    if salt -C 'I@prometheus:exporters' match.pillar 'prometheus:exporters' ; then
-      salt -C 'I@prometheus:exporters' state.sls prometheus
+    if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' match.pillar 'prometheus:exporters' ; then
+      salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' state.sls prometheus
     fi
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: Configure fluentd
-  cmd: salt -C 'I@fluentd:agent' state.sls fluentd
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@fluentd:agent' state.sls fluentd
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: Install elasticsearch server
-  cmd: salt -C 'I@elasticsearch:server' state.sls elasticsearch.server -b 1
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:server' state.sls elasticsearch.server -b 1
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: Install kibana server
-  cmd: salt -C 'I@kibana:server' state.sls kibana.server -b 1
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:server' state.sls kibana.server -b 1
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: Install elasticsearch client
-  cmd: salt -C 'I@elasticsearch:client' state.sls elasticsearch.client
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:client' state.sls elasticsearch.client
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: Install kibana client
-  cmd: salt -C 'I@kibana:client' state.sls kibana.client
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:client' state.sls kibana.client
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: Check influix db
   cmd: |
-    INFLUXDB_SERVICE=`salt -C 'I@influxdb:server' test.ping 1>/dev/null 2>&1 && echo true`;
+    INFLUXDB_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' test.ping 1>/dev/null 2>&1 && echo true`;
     echo "Influxdb service presence: ${INFLUXDB_SERVICE}";
     if [[ "$INFLUXDB_SERVICE" == "true" ]]; then
-        salt -C 'I@influxdb:server' state.sls influxdb
+        salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' state.sls influxdb
     fi
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
@@ -126,60 +126,44 @@
 # Collect grains needed to configure the services
 
 - description: Get grains
-  cmd: salt -C 'I@salt:minion' state.sls salt.minion.grains
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' state.sls salt.minion.grains
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: Sync modules
-  cmd: salt -C 'I@salt:minion' saltutil.refresh_modules
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' saltutil.refresh_modules
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: Update mine
-  cmd: salt -C 'I@salt:minion' mine.update; sleep 5;
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' mine.update; sleep 5;
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
-# Change environment configuration before deploy
-- description: Set SL docker images deploy parameters
-  cmd: |
-  {#- For cookiecutter-generated model, use overrides.yml from environment model instead of cluster model #}
-  {%- set OVERRIDES_FILENAME='/srv/salt/reclass/classes/environment/' + ENVIRONMENT_MODEL_INVENTORY_NAME + '/overrides.yml' %}
-  {%- for sl_opt, value in config.sl_deploy.items() %}
-    {%- if value|string() %}
-    salt-call reclass.cluster_meta_set name={{ sl_opt }} value={{ value }} file_name={{ OVERRIDES_FILENAME }};
-    {%- endif %}
-  {%- endfor %}
-    salt '*' saltutil.refresh_pillar;
-    sleep 10
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 1}
-  skip_fail: false
-
 # Configure the services running in Docker Swarm
 - description: Install prometheus alertmanager
-  cmd: salt -C 'I@docker:swarm' state.sls prometheus -b 1
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:server' state.sls prometheus -b 1
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: run docker state
-  cmd: salt -C 'I@docker:swarm:role:master' state.sls docker
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: docker ps
-  cmd: salt -C 'I@docker:swarm' dockerng.ps
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' dockerng.ps
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: Configure Grafana dashboards and datasources
-  cmd: sleep 30;  salt -C 'I@grafana:client' state.sls grafana.client
+  cmd: sleep 30;  salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@grafana:client' state.sls grafana.client
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 2, delay: 10}
   skip_fail: false
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/underlay--user-data-cfg01.yaml
index 2ac0aa1..3e70fd8 100644
--- a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/underlay--user-data-cfg01.yaml
@@ -18,8 +18,6 @@
    expire: False
 
   bootcmd:
-   # Block access to SSH while node is preparing
-   - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
    # Enable root access
    - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
    - service sshd restart
@@ -47,38 +45,7 @@
    - swapon /swapfile
    - echo "/swapfile   none    swap    defaults    0   0" >> /etc/fstab
 
-   ############## TCP Cloud cfg01 node ##################
-   #- sleep 120
-   - echo "Preparing base OS"
-
    - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
-   - echo "nameserver 8.8.8.8" >> /etc/resolv.conf;
-   - which wget >/dev/null || (apt-get update; apt-get install -y wget);
-
-   # Configure Ubuntu mirrors
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
-
-   - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
-   - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
-   - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;
-   - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;
-
-   - apt-get clean
-   - apt-get update
-
-   # Install common packages
-   - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc
-
-   # Install salt-minion and stop it until it is configured
-   - eatmydata apt-get install -y salt-minion && service salt-minion stop
-
-   ########################################################
-   # Node is ready, allow SSH access
-   - echo "Allow SSH access ..."
-   - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
-   ########################################################
 
   write_files:
    - path: /etc/default/grub.d/97-enable-grub-menu.cfg
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/underlay--user-data1604-hwe.yaml b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/underlay--user-data1604-hwe.yaml
index 9990f34..319c007 100644
--- a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/underlay--user-data1604-hwe.yaml
+++ b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/underlay--user-data1604-hwe.yaml
@@ -18,8 +18,6 @@
    expire: False
 
   bootcmd:
-   # Block access to SSH while node is preparing
-   - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
    # Enable root access
    - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
    - service sshd restart
@@ -47,41 +45,6 @@
    - swapon /swapfile
    - echo "/swapfile   none    swap    defaults   0   0" >> /etc/fstab
 
-
-   ############## TCP Cloud cfg01 node ##################
-   #- sleep 120
-   - echo "Preparing base OS"
-   - which wget >/dev/null || (apt-get update; apt-get install -y wget)
-
-   # Configure Ubuntu mirrors
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
-
-   - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
-   - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
-   - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;
-   - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;
-
-   - apt-get clean
-   - eatmydata apt-get update && apt-get -y upgrade
-
-   # Install common packages
-   - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc
-
-   # Install salt-minion and stop it until it is configured
-   - eatmydata apt-get install -y salt-minion && service salt-minion stop
-
-   # Install latest kernel
-   - eatmydata apt-get install -y {{ os_env('LINUX_KERNEL_HWE_PACKAGE_NAME', 'linux-image-extra-4.10.0-42-generic') }}
-
-   ########################################################
-   # Node is ready, allow SSH access
-   #- echo "Allow SSH access ..."
-   #- sudo iptables -D INPUT -p tcp --dport 22 -j DROP
-   - reboot
-   ########################################################
-
   write_files:
    - path: /etc/default/grub.d/97-enable-grub-menu.cfg
      content: |
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/underlay--user-data1604.yaml b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/underlay--user-data1604.yaml
index d5719b4..319c007 100644
--- a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/underlay--user-data1604.yaml
+++ b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/underlay--user-data1604.yaml
@@ -18,8 +18,6 @@
    expire: False
 
   bootcmd:
-   # Block access to SSH while node is preparing
-   - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
    # Enable root access
    - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
    - service sshd restart
@@ -47,37 +45,6 @@
    - swapon /swapfile
    - echo "/swapfile   none    swap    defaults   0   0" >> /etc/fstab
 
-
-   ############## TCP Cloud cfg01 node ##################
-   #- sleep 120
-   - echo "Preparing base OS"
-   - which wget >/dev/null || (apt-get update; apt-get install -y wget)
-
-   # Configure Ubuntu mirrors
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
-
-   - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
-   - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
-   - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;
-   - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;
-
-   - apt-get clean
-   - eatmydata apt-get update && apt-get -y upgrade
-
-   # Install common packages
-   - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc
-
-   # Install salt-minion and stop it until it is configured
-   - eatmydata apt-get install -y salt-minion && service salt-minion stop
-
-   ########################################################
-   # Node is ready, allow SSH access
-   - echo "Allow SSH access ..."
-   - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
-   ########################################################
-
   write_files:
    - path: /etc/default/grub.d/97-enable-grub-menu.cfg
      content: |
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/openstack.yaml b/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/openstack.yaml
index bae02cc..3882976 100644
--- a/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/openstack.yaml
+++ b/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/openstack.yaml
@@ -179,7 +179,7 @@
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
-  skip_fail: true
+  skip_fail: false
 
 - description: Check IP on computes
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/salt.yaml b/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/salt.yaml
index 45514bb..0ad8e5e 100644
--- a/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/salt.yaml
+++ b/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/salt.yaml
@@ -25,6 +25,8 @@
 
 {{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
 
+{{ SHARED.ADJUST_SL_OPTS(OVERRIDES_FILENAME='/srv/salt/reclass/classes/environment/' + ENVIRONMENT_MODEL_INVENTORY_NAME + '/overrides.yml') }}
+
 {{ SHARED.MACRO_GENERATE_INVENTORY() }}
 
 {{ SHARED.MACRO_NETWORKING_WORKAROUNDS() }}
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/sl.yaml b/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/sl.yaml
index 0668eb7..dd64b90 100644
--- a/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/sl.yaml
+++ b/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/sl.yaml
@@ -18,105 +18,105 @@
   skip_fail: false
 
 - description: Configure docker service
-  cmd: salt -C 'I@docker:swarm' state.sls docker.host
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls docker.host
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: Install docker swarm on master node
-  cmd: salt -C 'I@docker:swarm:role:master' state.sls docker.swarm
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: Send grains to the swarm slave nodes
-  cmd: salt -C 'I@docker:swarm' state.sls salt.minion.grains
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls salt.minion.grains
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description:  Update mine
-  cmd: salt -C 'I@docker:swarm' mine.update
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' mine.update
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description:  Refresh modules
-  cmd: salt -C 'I@docker:swarm' saltutil.refresh_modules; sleep 5;
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' saltutil.refresh_modules; sleep 5;
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description:  Rerun swarm on slaves to proper token population
-  cmd: salt -C 'I@docker:swarm:role:master' state.sls docker.swarm
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description:  Configure slave nodes
-  cmd: salt -C 'I@docker:swarm:role:manager' state.sls docker.swarm -b 1
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:manager' state.sls docker.swarm -b 1
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description:  List registered Docker swarm nodes
-  cmd: salt -C 'I@docker:swarm:role:master' cmd.run 'docker node ls'
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' cmd.run 'docker node ls'
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 # Install slv2 infra
 - description: Install telegraf
-  cmd: salt -C 'I@telegraf:agent or I@telegraf:remote_agent' state.sls telegraf
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@telegraf:agent or I@telegraf:remote_agent' state.sls telegraf
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 2, delay: 10}
   skip_fail: false
 
 - description: Configure Prometheus exporters, if pillar 'prometheus:exporters' exists on any server
   cmd: |
-    if salt -C 'I@prometheus:exporters' match.pillar 'prometheus:exporters' ; then
-      salt -C 'I@prometheus:exporters' state.sls prometheus
+    if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' match.pillar 'prometheus:exporters' ; then
+      salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' state.sls prometheus
     fi
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: Configure collector
-  cmd: salt -C 'I@heka:log_collector' state.sls heka.log_collector
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:log_collector' state.sls heka.log_collector
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: Install elasticsearch server
-  cmd: salt -C 'I@elasticsearch:server' state.sls elasticsearch.server -b 1
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:server' state.sls elasticsearch.server -b 1
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: Install kibana server
-  cmd: salt -C 'I@kibana:server' state.sls kibana.server -b 1
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:server' state.sls kibana.server -b 1
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: Install elasticsearch client
-  cmd: salt -C 'I@elasticsearch:client' state.sls elasticsearch.client
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:client' state.sls elasticsearch.client
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: Install kibana client
-  cmd: salt -C 'I@kibana:client' state.sls kibana.client
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:client' state.sls kibana.client
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: Check influix db
   cmd: |
-    INFLUXDB_SERVICE=`salt -C 'I@influxdb:server' test.ping 1>/dev/null 2>&1 && echo true`;
+    INFLUXDB_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' test.ping 1>/dev/null 2>&1 && echo true`;
     echo "Influxdb service presence: ${INFLUXDB_SERVICE}";
     if [[ "$INFLUXDB_SERVICE" == "true" ]]; then
-        salt -C 'I@influxdb:server' state.sls influxdb
+        salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' state.sls influxdb
     fi
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
@@ -125,56 +125,44 @@
 # Collect grains needed to configure the services
 
 - description: Get grains
-  cmd: salt -C 'I@salt:minion' state.sls salt.minion.grains
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' state.sls salt.minion.grains
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: Sync modules
-  cmd: salt -C 'I@salt:minion' saltutil.refresh_modules
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' saltutil.refresh_modules
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: Update mine
-  cmd: salt -C 'I@salt:minion' mine.update; sleep 5;
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' mine.update; sleep 5;
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
-# Change environment configuration before deploy
-- description: Set SL docker images deploy parameters
-  cmd: |
-  {% for sl_opt, value in config.sl_deploy.items() %}
-    {% if value|string() %}
-    salt-call reclass.cluster_meta_set {{ sl_opt }} {{ value }};
-    {% endif %}
-  {% endfor %}
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 1}
-  skip_fail: false
-
 # Configure the services running in Docker Swarm
 - description: Install prometheus alertmanager
-  cmd: salt -C 'I@docker:swarm' state.sls prometheus,heka.remote_collector -b 1
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls prometheus,heka.remote_collector -b 1
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: run docker state
-  cmd: salt -C 'I@docker:swarm:role:master' state.sls docker
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: docker ps
-  cmd: salt -C 'I@docker:swarm' dockerng.ps
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' dockerng.ps
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: Configure Grafana dashboards and datasources
-  cmd: sleep 30;  salt -C 'I@grafana:client' state.sls grafana.client
+  cmd: sleep 30;  salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@grafana:client' state.sls grafana.client
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 2, delay: 10}
   skip_fail: false
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/underlay--user-data-cfg01.yaml
index a00d531..19ae10b 100644
--- a/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/underlay--user-data-cfg01.yaml
@@ -18,8 +18,6 @@
    expire: False
 
   bootcmd:
-   # Block access to SSH while node is preparing
-   - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
    # Enable root access
    - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
    - service sshd restart
@@ -47,38 +45,7 @@
    - swapon /swapfile
    - echo "/swapfile   none    swap    defaults    0   0" >> /etc/fstab
 
-   ############## TCP Cloud cfg01 node ##################
-   #- sleep 120
-   - echo "Preparing base OS"
-
    - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
-   - echo "nameserver 8.8.8.8" >> /etc/resolv.conf;
-   - which wget >/dev/null || (apt-get update; apt-get install -y wget);
-
-   # Configure Ubuntu mirrors
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
-
-   - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
-   - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
-   - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;
-   - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;
-
-   - apt-get clean
-   - apt-get update
-
-   # Install common packages
-   - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc
-
-   # Install salt-minion and stop it until it is configured
-   - eatmydata apt-get install -y salt-minion && service salt-minion stop
-
-   ########################################################
-   # Node is ready, allow SSH access
-   - echo "Allow SSH access ..."
-   - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
-   ########################################################
 
   write_files:
    - path: /etc/default/grub.d/97-enable-grub-menu.cfg
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/underlay--user-data1604-hwe.yaml b/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/underlay--user-data1604-hwe.yaml
index 9990f34..319c007 100644
--- a/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/underlay--user-data1604-hwe.yaml
+++ b/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/underlay--user-data1604-hwe.yaml
@@ -18,8 +18,6 @@
    expire: False
 
   bootcmd:
-   # Block access to SSH while node is preparing
-   - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
    # Enable root access
    - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
    - service sshd restart
@@ -47,41 +45,6 @@
    - swapon /swapfile
    - echo "/swapfile   none    swap    defaults   0   0" >> /etc/fstab
 
-
-   ############## TCP Cloud cfg01 node ##################
-   #- sleep 120
-   - echo "Preparing base OS"
-   - which wget >/dev/null || (apt-get update; apt-get install -y wget)
-
-   # Configure Ubuntu mirrors
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
-
-   - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
-   - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
-   - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;
-   - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;
-
-   - apt-get clean
-   - eatmydata apt-get update && apt-get -y upgrade
-
-   # Install common packages
-   - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc
-
-   # Install salt-minion and stop it until it is configured
-   - eatmydata apt-get install -y salt-minion && service salt-minion stop
-
-   # Install latest kernel
-   - eatmydata apt-get install -y {{ os_env('LINUX_KERNEL_HWE_PACKAGE_NAME', 'linux-image-extra-4.10.0-42-generic') }}
-
-   ########################################################
-   # Node is ready, allow SSH access
-   #- echo "Allow SSH access ..."
-   #- sudo iptables -D INPUT -p tcp --dport 22 -j DROP
-   - reboot
-   ########################################################
-
   write_files:
    - path: /etc/default/grub.d/97-enable-grub-menu.cfg
      content: |
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/underlay--user-data1604.yaml b/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/underlay--user-data1604.yaml
index d5719b4..319c007 100644
--- a/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/underlay--user-data1604.yaml
+++ b/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/underlay--user-data1604.yaml
@@ -18,8 +18,6 @@
    expire: False
 
   bootcmd:
-   # Block access to SSH while node is preparing
-   - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
    # Enable root access
    - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
    - service sshd restart
@@ -47,37 +45,6 @@
    - swapon /swapfile
    - echo "/swapfile   none    swap    defaults   0   0" >> /etc/fstab
 
-
-   ############## TCP Cloud cfg01 node ##################
-   #- sleep 120
-   - echo "Preparing base OS"
-   - which wget >/dev/null || (apt-get update; apt-get install -y wget)
-
-   # Configure Ubuntu mirrors
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
-
-   - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
-   - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
-   - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;
-   - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;
-
-   - apt-get clean
-   - eatmydata apt-get update && apt-get -y upgrade
-
-   # Install common packages
-   - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc
-
-   # Install salt-minion and stop it until it is configured
-   - eatmydata apt-get install -y salt-minion && service salt-minion stop
-
-   ########################################################
-   # Node is ready, allow SSH access
-   - echo "Allow SSH access ..."
-   - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
-   ########################################################
-
   write_files:
    - path: /etc/default/grub.d/97-enable-grub-menu.cfg
      content: |
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/underlay.yaml b/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/underlay.yaml
index b63a123..ef002fe 100644
--- a/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/underlay.yaml
+++ b/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/underlay.yaml
@@ -344,7 +344,7 @@
           - name: {{ HOSTNAME_CFG01 }}
             role: salt_master
             params:
-              vcpu: !os_env SLAVE_NODE_CPU, 2
+              vcpu: !os_env SLAVE_NODE_CPU, 3
               memory: !os_env SLAVE_NODE_MEMORY, 4096
               boot:
                 - hd
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-mcp-dvr-vxlan.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-mcp-dvr-vxlan.yaml
index 1dad209..24a6c8b 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-mcp-dvr-vxlan.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-mcp-dvr-vxlan.yaml
@@ -28,16 +28,16 @@
     set -e;
     . /root/venv-reclass-tools/bin/activate;
     # Remove rack01 key
-    reclass-tools del-key parameters.reclass.storage.node.openstack_compute_rack01 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_compute_rack01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
 
     # Add openstack_compute_node definition from system
-    reclass-tools add-key 'classes' 'system.reclass.storage.system.openstack_compute_multi' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml --merge;
+    reclass-tools add-key 'classes' 'system.reclass.storage.system.openstack_compute_multi' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml --merge;
 
     # Set ipaddresses for our nodes
-    reclass-tools add-key parameters._param.openstack_compute_node01_control_address 10.167.4.3 /srv/salt/reclass/classes/cluster/cookied-bm-mcp-dvr-vxlan/infra/config.yml;
-    reclass-tools add-key parameters._param.openstack_compute_node02_control_address 10.167.4.31 /srv/salt/reclass/classes/cluster/cookied-bm-mcp-dvr-vxlan/infra/config.yml;
-    reclass-tools add-key parameters._param.openstack_compute_node01_tenant_address 10.167.6.3 /srv/salt/reclass/classes/cluster/cookied-bm-mcp-dvr-vxlan/infra/config.yml;
-    reclass-tools add-key parameters._param.openstack_compute_node02_tenant_address 10.167.6.31 /srv/salt/reclass/classes/cluster/cookied-bm-mcp-dvr-vxlan/infra/config.yml;
+    reclass-tools add-key parameters._param.openstack_compute_node01_control_address 10.167.4.3 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    reclass-tools add-key parameters._param.openstack_compute_node02_control_address 10.167.4.31 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    reclass-tools add-key parameters._param.openstack_compute_node01_tenant_address 10.167.6.3 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    reclass-tools add-key parameters._param.openstack_compute_node02_tenant_address 10.167.6.31 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
 
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
@@ -45,8 +45,8 @@
 
 - description: Temporary workaround for removing cinder-volume from CTL nodes
   cmd: |
-    sed -i 's/\-\ system\.cinder\.volume\.single//g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control.yml;
-    sed -i 's/\-\ system\.cinder\.volume\.notification\.messagingv2//g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control.yml;
+    sed -i 's/\-\ system\.cinder\.volume\.single//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
+    sed -i 's/\-\ system\.cinder\.volume\.notification\.messagingv2//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
   skip_fail: true
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-mcp-ocata-contrail.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-mcp-ocata-contrail.yaml
index f8714ff..4da1add 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-mcp-ocata-contrail.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-mcp-ocata-contrail.yaml
@@ -28,9 +28,9 @@
     set -e;
     . /root/venv-reclass-tools/bin/activate;
     # Remove rack01 key
-    reclass-tools del-key parameters.reclass.storage.node.openstack_compute_rack01 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_compute_rack01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
     # Add openstack_compute_node definition from system
-    reclass-tools add-key 'classes' 'system.reclass.storage.system.openstack_compute_multi' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml --merge;
+    reclass-tools add-key 'classes' 'system.reclass.storage.system.openstack_compute_multi' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml --merge;
 
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
@@ -39,8 +39,8 @@
 
 - description: Temporary workaround for removing cinder-volume from CTL nodes
   cmd: |
-    sed -i 's/\-\ system\.cinder\.volume\.single//g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control.yml;
-    sed -i 's/\-\ system\.cinder\.volume\.notification\.messagingv2//g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control.yml;
+    sed -i 's/\-\ system\.cinder\.volume\.single//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
+    sed -i 's/\-\ system\.cinder\.volume\.notification\.messagingv2//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
   skip_fail: true
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-ocata-dop-sl2.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-ocata-dop-sl2.yaml
index 9dd5f9a..d1693c1 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-ocata-dop-sl2.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-ocata-dop-sl2.yaml
@@ -20,23 +20,23 @@
 - description: "Workaround for combined roles: remove unnecessary classes"
   cmd: |
     set -e;
-    sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml;
-    sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml;
-    sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml;
-    sed -i '/system.reclass.storage.system.stacklight_telemetry_cluster/d' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml;
-    sed -i '/system.reclass.storage.system.stacklight_log_cluster/d' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml;
+    sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    sed -i '/system.reclass.storage.system.stacklight_telemetry_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    sed -i '/system.reclass.storage.system.stacklight_log_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
 
     . /root/venv-reclass-tools/bin/activate;
-    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node01 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node02 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node03 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.stacklight_log_node01 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml;
+    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    reclass-tools del-key parameters.reclass.storage.node.stacklight_log_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
   skip_fail: false
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-ocata-dvr.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-ocata-dvr.yaml
new file mode 100644
index 0000000..bb17c15
--- /dev/null
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-ocata-dvr.yaml
@@ -0,0 +1,50 @@
+{% from 'cookied-model-generator/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-model-generator/underlay.yaml' import DOMAIN_NAME with context %}
+
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+# Other salt model repository parameters see in shared-salt.yaml
+
+{% set LAB_CONFIG_NAME = 'virtual-mcp-ocata-dvr' %}
+# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
+{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME', LAB_CONFIG_NAME) %}
+# Path to the context files used to render Cluster and Environment models
+{%- set CLUSTER_CONTEXT_NAME = '_context-cookiecutter-mcp-ocata-dvr.yaml' %}
+{%- set ENVIRONMENT_CONTEXT_NAMES = ['_context-environment.yaml'] %}
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+
+{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL() }}
+
+{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
+
+- description: "Workaround for combined roles: remove unnecessary classes"
+  cmd: |
+    set -e;
+    sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    sed -i '/system.reclass.storage.system.stacklight_telemetry_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    sed -i '/system.reclass.storage.system.stacklight_log_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+
+    . /root/venv-reclass-tools/bin/activate;
+    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    reclass-tools del-key parameters.reclass.storage.node.stacklight_log_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+
+    salt-call reclass.cluster_meta_set name='openstack_dns_node01_address' value='{{ SHARED.IPV4_NET_CONTROL_PREFIX }}.111' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
+    salt-call reclass.cluster_meta_set name='openstack_dns_node02_address' value='{{ SHARED.IPV4_NET_CONTROL_PREFIX }}.112' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
+    # Workaround of missing reclass.system for dns role
+    salt-call reclass.cluster_meta_set name='salt_master_host' value='${_param:infra_config_deploy_address}' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
+
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+{{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-ocata-ovs.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-ocata-ovs.yaml
new file mode 100644
index 0000000..b711673
--- /dev/null
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-ocata-ovs.yaml
@@ -0,0 +1,44 @@
+{% from 'cookied-model-generator/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-model-generator/underlay.yaml' import DOMAIN_NAME with context %}
+
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+# Other salt model repository parameters see in shared-salt.yaml
+
+{% set LAB_CONFIG_NAME = 'virtual-mcp-ocata-ovs' %}
+# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
+{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME', LAB_CONFIG_NAME) %}
+# Path to the context files used to render Cluster and Environment models
+{%- set CLUSTER_CONTEXT_NAME = '_context-cookiecutter-mcp-ocata-ovs.yaml' %}
+{%- set ENVIRONMENT_CONTEXT_NAMES = ['_context-environment.yaml'] %}
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+
+{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL() }}
+
+{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
+
+- description: "Workaround for combined roles: remove unnecessary classes"
+  cmd: |
+    set -e;
+    sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    sed -i '/system.reclass.storage.system.stacklight_telemetry_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    sed -i '/system.reclass.storage.system.stacklight_log_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+
+    . /root/venv-reclass-tools/bin/activate;
+    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    reclass-tools del-key parameters.reclass.storage.node.stacklight_log_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+{{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-dvr.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-dvr.yaml
new file mode 100644
index 0000000..c823df7
--- /dev/null
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-dvr.yaml
@@ -0,0 +1,50 @@
+{% from 'cookied-model-generator/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-model-generator/underlay.yaml' import DOMAIN_NAME with context %}
+
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+# Other salt model repository parameters see in shared-salt.yaml
+
+{% set LAB_CONFIG_NAME = 'virtual-mcp-pike-dvr' %}
+# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
+{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME', LAB_CONFIG_NAME) %}
+# Path to the context files used to render Cluster and Environment models
+{%- set CLUSTER_CONTEXT_NAME = '_context-cookiecutter-mcp-pike-dvr.yaml' %}
+{%- set ENVIRONMENT_CONTEXT_NAMES = ['_context-environment.yaml'] %}
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+
+{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL() }}
+
+{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
+
+- description: "Workaround for combined roles: remove unnecessary classes"
+  cmd: |
+    set -e;
+    sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    sed -i '/system.reclass.storage.system.stacklight_telemetry_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    sed -i '/system.reclass.storage.system.stacklight_log_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+
+    . /root/venv-reclass-tools/bin/activate;
+    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    reclass-tools del-key parameters.reclass.storage.node.stacklight_log_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+
+    salt-call reclass.cluster_meta_set name='openstack_dns_node01_address' value='{{ SHARED.IPV4_NET_CONTROL_PREFIX }}.111' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
+    salt-call reclass.cluster_meta_set name='openstack_dns_node02_address' value='{{ SHARED.IPV4_NET_CONTROL_PREFIX }}.112' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
+    # Workaround of missing reclass.system for dns role
+    salt-call reclass.cluster_meta_set name='salt_master_host' value='${_param:infra_config_deploy_address}' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
+
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+{{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-ovs.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-ovs.yaml
new file mode 100644
index 0000000..47427e3
--- /dev/null
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-ovs.yaml
@@ -0,0 +1,44 @@
+{% from 'cookied-model-generator/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-model-generator/underlay.yaml' import DOMAIN_NAME with context %}
+
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+# Other salt model repository parameters see in shared-salt.yaml
+
+{% set LAB_CONFIG_NAME = 'virtual-mcp-pike-ovs' %}
+# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
+{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME', LAB_CONFIG_NAME) %}
+# Path to the context files used to render Cluster and Environment models
+{%- set CLUSTER_CONTEXT_NAME = '_context-cookiecutter-mcp-pike-ovs.yaml' %}
+{%- set ENVIRONMENT_CONTEXT_NAMES = ['_context-environment.yaml'] %}
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+
+{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL() }}
+
+{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
+
+- description: "Workaround for combined roles: remove unnecessary classes"
+  cmd: |
+    set -e;
+    sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    sed -i '/system.reclass.storage.system.stacklight_telemetry_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    sed -i '/system.reclass.storage.system.stacklight_log_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+
+    . /root/venv-reclass-tools/bin/activate;
+    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    reclass-tools del-key parameters.reclass.storage.node.stacklight_log_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+{{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}
diff --git a/tcp_tests/templates/cookied-model-generator/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-model-generator/underlay--user-data-cfg01.yaml
index 1b5c90d..c871146 100644
--- a/tcp_tests/templates/cookied-model-generator/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/cookied-model-generator/underlay--user-data-cfg01.yaml
@@ -59,6 +59,8 @@
    - salt-key -y -D
    - service salt-master restart
    - service salt-minion restart
+   - apt-get install -y salt-formula-*
+   - for f in $(ls -1 /usr/share/salt-formulas/reclass/service); do ln -s /usr/share/salt-formulas/reclass/service/$f /srv/salt/reclass/classes/service/ || true; done
    - salt-call --timeout=180 test.ping
 
    ########################################################
diff --git a/tcp_tests/templates/ironic_standalone/underlay--user-data-ironic.yaml b/tcp_tests/templates/ironic_standalone/underlay--user-data-ironic.yaml
index dcc2933..3afdf7f 100644
--- a/tcp_tests/templates/ironic_standalone/underlay--user-data-ironic.yaml
+++ b/tcp_tests/templates/ironic_standalone/underlay--user-data-ironic.yaml
@@ -89,7 +89,7 @@
    - chmod a+r /httpboot/coreos_production_pxe*
 
    - echo "Download ubuntu cloudinit image"
-   - wget https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img -O /httpboot/xenial-server-cloudimg-amd64.qcow2
+   - wget {{ os_env('IMAGE_URL1604', 'https://cloud-images.ubuntu.com/releases/16.04/release/ubuntu-16.04-server-cloudimg-amd64-disk1.img') }} -O /httpboot/xenial-server-cloudimg-amd64.qcow2
 
    ########################################################
    # Node is ready, allow SSH access
diff --git a/tcp_tests/templates/mcp-ocata-local-repo-dvr/openstack.yaml b/tcp_tests/templates/mcp-ocata-local-repo-dvr/openstack.yaml
index 9d13e8d..8802687 100644
--- a/tcp_tests/templates/mcp-ocata-local-repo-dvr/openstack.yaml
+++ b/tcp_tests/templates/mcp-ocata-local-repo-dvr/openstack.yaml
@@ -198,7 +198,7 @@
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
-  skip_fail: true
+  skip_fail: false
 
 - description: Check IP on computes
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
diff --git a/tcp_tests/templates/mcp-ocata-local-repo-dvr/sl.yaml b/tcp_tests/templates/mcp-ocata-local-repo-dvr/sl.yaml
index 37e3ad4..4927c4f 100644
--- a/tcp_tests/templates/mcp-ocata-local-repo-dvr/sl.yaml
+++ b/tcp_tests/templates/mcp-ocata-local-repo-dvr/sl.yaml
@@ -18,105 +18,105 @@
   skip_fail: false
 
 - description: Configure docker service
-  cmd: salt -C 'I@docker:swarm' state.sls docker.host
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls docker.host
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: Install docker swarm on master node
-  cmd: salt -C 'I@docker:swarm:role:master' state.sls docker.swarm
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: Send grains to the swarm slave nodes
-  cmd: salt -C 'I@docker:swarm' state.sls salt.minion.grains
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls salt.minion.grains
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description:  Update mine
-  cmd: salt -C 'I@docker:swarm' mine.update
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' mine.update
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description:  Refresh modules
-  cmd: salt -C 'I@docker:swarm' saltutil.refresh_modules; sleep 5;
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' saltutil.refresh_modules; sleep 5;
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description:  Rerun swarm on slaves to proper token population
-  cmd: salt -C 'I@docker:swarm:role:master' state.sls docker.swarm
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description:  Configure slave nodes
-  cmd: salt -C 'I@docker:swarm:role:manager' state.sls docker.swarm -b 1
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:manager' state.sls docker.swarm -b 1
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description:  List registered Docker swarm nodes
-  cmd: salt -C 'I@docker:swarm:role:master' cmd.run 'docker node ls'
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' cmd.run 'docker node ls'
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 # Install slv2 infra
 - description: Install telegraf
-  cmd: salt -C 'I@telegraf:agent or I@telegraf:remote_agent' state.sls telegraf
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@telegraf:agent or I@telegraf:remote_agent' state.sls telegraf
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 2, delay: 10}
   skip_fail: false
 
 - description: Configure Prometheus exporters, if pillar 'prometheus:exporters' exists on any server
   cmd: |
-    if salt -C 'I@prometheus:exporters' match.pillar 'prometheus:exporters' ; then
-      salt -C 'I@prometheus:exporters' state.sls prometheus
+    if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' match.pillar 'prometheus:exporters' ; then
+      salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' state.sls prometheus
     fi
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: Configure collector
-  cmd: salt -C 'I@heka:log_collector' state.sls heka.log_collector
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:log_collector' state.sls heka.log_collector
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: Install elasticsearch server
-  cmd: salt -C 'I@elasticsearch:server' state.sls elasticsearch.server -b 1
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:server' state.sls elasticsearch.server -b 1
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: Install kibana server
-  cmd: salt -C 'I@kibana:server' state.sls kibana.server -b 1
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:server' state.sls kibana.server -b 1
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: Install elasticsearch client
-  cmd: salt -C 'I@elasticsearch:client' state.sls elasticsearch.client
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:client' state.sls elasticsearch.client
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: Install kibana client
-  cmd: salt -C 'I@kibana:client' state.sls kibana.client
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:client' state.sls kibana.client
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: Check influix db
   cmd: |
-    INFLUXDB_SERVICE=`salt -C 'I@influxdb:server' test.ping 1>/dev/null 2>&1 && echo true`;
+    INFLUXDB_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' test.ping 1>/dev/null 2>&1 && echo true`;
     echo "Influxdb service presence: ${INFLUXDB_SERVICE}";
     if [[ "$INFLUXDB_SERVICE" == "true" ]]; then
-        salt -C 'I@influxdb:server' state.sls influxdb
+        salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' state.sls influxdb
     fi
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
@@ -125,44 +125,44 @@
 # Collect grains needed to configure the services
 
 - description: Get grains
-  cmd: salt -C 'I@salt:minion' state.sls salt.minion.grains
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' state.sls salt.minion.grains
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: Sync modules
-  cmd: salt -C 'I@salt:minion' saltutil.refresh_modules
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' saltutil.refresh_modules
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: Update mine
-  cmd: salt -C 'I@salt:minion' mine.update; sleep 5;
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' mine.update; sleep 5;
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 5, delay: 15}
   skip_fail: false
 
 # Configure the services running in Docker Swarm
 - description: Install prometheus alertmanager
-  cmd: salt -C 'I@docker:swarm' state.sls prometheus,heka.remote_collector -b 1
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls prometheus,heka.remote_collector -b 1
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: run docker state
-  cmd: salt -C 'I@docker:swarm:role:master' state.sls docker
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: docker ps
-  cmd: salt -C 'I@docker:swarm' dockerng.ps
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' dockerng.ps
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: Configure Grafana dashboards and datasources
-  cmd: sleep 30;  salt -C 'I@grafana:client' state.sls grafana.client
+  cmd: sleep 30;  salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@grafana:client' state.sls grafana.client
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 2, delay: 10}
   skip_fail: false
diff --git a/tcp_tests/templates/mk24_lab_ovs_dvr_vlan_bm/openstack.yaml b/tcp_tests/templates/mk24_lab_ovs_dvr_vlan_bm/openstack.yaml
index 4d0ce7b..441b300 100644
--- a/tcp_tests/templates/mk24_lab_ovs_dvr_vlan_bm/openstack.yaml
+++ b/tcp_tests/templates/mk24_lab_ovs_dvr_vlan_bm/openstack.yaml
@@ -159,7 +159,7 @@
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
-  skip_fail: true
+  skip_fail: false
 
 - description: Check IP on computes
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
diff --git a/tcp_tests/templates/physical-mcp-ocata-offline-ovs/Readme.txt b/tcp_tests/templates/physical-mcp-ocata-offline-ovs/Readme.txt
new file mode 100644
index 0000000..a3297a8
--- /dev/null
+++ b/tcp_tests/templates/physical-mcp-ocata-offline-ovs/Readme.txt
@@ -0,0 +1 @@
+PoC templates. Do not use!
\ No newline at end of file
diff --git a/tcp_tests/templates/physical-mcp-ocata-offline-ovs/common-services.yaml b/tcp_tests/templates/physical-mcp-ocata-offline-ovs/common-services.yaml
new file mode 100644
index 0000000..c782dfa
--- /dev/null
+++ b/tcp_tests/templates/physical-mcp-ocata-offline-ovs/common-services.yaml
@@ -0,0 +1,16 @@
+{% from 'physical-mcp-ocata-offline-ovs/underlay.yaml' import HOSTNAME_CFG01 with context %}
+
+
+- description: Approve cfg01 ssh key for jenkins user
+  cmd: mkdir -p /var/lib/jenkins/.ssh && ssh-keyscan cfg01 > /var/lib/jenkins/.ssh/known_hosts && chown jenkins /var/lib/jenkins/.ssh/known_hosts
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 15}
+  skip_fail: false
+
+
+- description: Install jq for parse json output
+  cmd: apt install -y jq
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 15}
+  skip_fail: false
+
diff --git a/tcp_tests/templates/physical-mcp-ocata-offline-ovs/openstack.yaml b/tcp_tests/templates/physical-mcp-ocata-offline-ovs/openstack.yaml
new file mode 100644
index 0000000..745df96
--- /dev/null
+++ b/tcp_tests/templates/physical-mcp-ocata-offline-ovs/openstack.yaml
@@ -0,0 +1,161 @@
+{% from 'physical_mcp11_ovs_dpdk/underlay.yaml' import HOSTNAME_CFG01 with context %}
+
+# Install OpenStack control services
+
+- description: Install glance on all controllers
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+     -C 'I@glance:server' state.sls glance -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Install keystone service (note that different fernet keys are created on different nodes)
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keystone:server' state.sls keystone.server -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 15}
+  skip_fail: false
+
+- description: Restart apache due to PROD-10477
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' cmd.run "systemctl restart apache2"
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 15}
+  skip_fail: false
+
+- description: Check apache status to PROD-10477
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' cmd.run "systemctl status apache2"
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 15}
+  skip_fail: false
+
+- description: Mount glusterfs.client volumes (resuires created 'keystone' and 'glusterfs' system users)
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@glance:server' state.sls glusterfs.client
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Update fernet keys for keystone server on the mounted glusterfs volume
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keystone:server' state.sls keystone.server -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Populate keystone services/tenants/admins
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keystone:client' state.sls keystone.client
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Check keystone service-list
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keystone:server' cmd.run '. /root/keystonercv3; openstack service list'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Check glance image-list
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keystone:server' cmd.run '. /root/keystonerc; glance image-list'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+
+- description: Install nova on all controllers
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@nova:controller' state.sls nova -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 5}
+  skip_fail: false
+
+- description: Check nova service-list
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keystone:server' cmd.run '. /root/keystonerc; nova service-list'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+
+- description: Install cinder
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@cinder:controller' state.sls cinder -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Check cinder list
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keystone:server' cmd.run '. /root/keystonerc; cinder list'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+
+- description: Install neutron service
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@neutron:server' state.sls neutron -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Install neutron on gtw node
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@neutron:gateway' state.sls neutron
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+
+- description: Check neutron agent-list
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keystone:server' cmd.run '. /root/keystonerc; neutron agent-list'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+
+- description: Install heat service
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@heat:server' state.sls heat -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Check heat service
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keystone:server' cmd.run '. /root/keystonercv3; openstack orchestration resource type list'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+
+- description: Deploy horizon dashboard
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@horizon:server' state.sls horizon
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: true
+
+# Install compute node
+
+- description: Apply formulas for compute node
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: true
+
+- description: Re-apply(as in doc) formulas for compute node
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: true
+
+- description: Check IP on computes
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
+    'ip a'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 10, delay: 30}
+  skip_fail: false
diff --git a/tcp_tests/templates/physical-mcp-ocata-offline-ovs/salt.yaml b/tcp_tests/templates/physical-mcp-ocata-offline-ovs/salt.yaml
new file mode 100644
index 0000000..04185ea
--- /dev/null
+++ b/tcp_tests/templates/physical-mcp-ocata-offline-ovs/salt.yaml
@@ -0,0 +1,26 @@
+{% from 'physical-mcp-ocata-offline-ovs/underlay.yaml' import HOSTNAME_CFG01 with context %}
+# {% from 'physical-mcp-ocata-offline-ovs/underlay.yaml' import LAB_CONFIG_NAME with context %}
+# {% from 'physical-mcp-ocata-offline-ovs/underlay.yaml' import DOMAIN_NAME with context %}
+
+# Environment model name stored in https://github.com/Mirantis/tcp-qa/tree/master/tcp_tests/environments
+# {% set ENVIRONMENT_MODEL_NAME = os_env('ENVIRONMENT_MODEL_NAME','physical-mcp-ocata-offline-ovs') %}
+
+# {% import 'shared-salt.yaml' as SHARED with context %}
+
+- description: Wait while a salt-minion is started
+  cmd: timeout 90s bash -c 'while ! systemctl is-active salt-minion; do sleep 10; echo salt-minion isnt run; done'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Refresh pillars on master node
+  cmd: sleep 90; salt-call --hard-crash --state-output=mixed --state-verbose=False saltutil.refresh_pillar
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Sync all salt resources on master node
+  cmd: salt-call --hard-crash --state-output=mixed --state-verbose=False saltutil.sync_all && sleep 5
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
diff --git a/tcp_tests/templates/physical-mcp-ocata-offline-ovs/underlay--meta-data.yaml b/tcp_tests/templates/physical-mcp-ocata-offline-ovs/underlay--meta-data.yaml
new file mode 100644
index 0000000..3699401
--- /dev/null
+++ b/tcp_tests/templates/physical-mcp-ocata-offline-ovs/underlay--meta-data.yaml
@@ -0,0 +1,4 @@
+| # All the data below will be stored as a string object
+  instance-id: iid-local1
+  hostname: {hostname}
+  local-hostname: {hostname}
diff --git a/tcp_tests/templates/physical-mcp-ocata-offline-ovs/underlay--user-data-apt01.yaml b/tcp_tests/templates/physical-mcp-ocata-offline-ovs/underlay--user-data-apt01.yaml
new file mode 100644
index 0000000..6978bd3
--- /dev/null
+++ b/tcp_tests/templates/physical-mcp-ocata-offline-ovs/underlay--user-data-apt01.yaml
@@ -0,0 +1,62 @@
+| # All the data below will be stored as a string object
+  #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+
+  ssh_pwauth: True
+  users:
+   - name: root
+     sudo: ALL=(ALL) NOPASSWD:ALL
+     shell: /bin/bash
+     ssh_authorized_keys:
+     {% for key in config.underlay.ssh_keys %}
+      - ssh-rsa {{ key['public'] }}
+     {% endfor %}
+
+  disable_root: false
+  chpasswd:
+   list: |
+    root:r00tme
+   expire: False
+
+  bootcmd:
+   # Block access to SSH while node is preparing
+   - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
+   # Enable root access
+   - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+   - service sshd restart
+  output:
+    all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
+
+  runcmd:
+   # Mount config drive
+   - mkdir /root/config-drive
+   - mount /dev/sr0 /root/config-drive
+
+   # Create swap
+   - fallocate -l 4G /swapfile
+   - chmod 600 /swapfile
+   - mkswap /swapfile
+   - swapon /swapfile
+   - echo "/swapfile   none    swap    defaults    0   0" >> /etc/fstab
+
+   # Run user data script from config drive
+   - cd /root/config-drive && /bin/bash -xe ./user-data
+
+   # Enable root access
+   - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+
+   ########################################################
+   # Node is ready, allow SSH access
+   - echo "Allow SSH access ..."
+   - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
+   ########################################################
+
+  write_files:
+   - path: /root/.ssh/config
+     owner: root:root
+     permissions: '0600'
+     content: |
+          Host *
+            ServerAliveInterval 300
+            ServerAliveCountMax 10
+            StrictHostKeyChecking no
+            UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/physical-mcp-ocata-offline-ovs/underlay--user-data-cfg01.yaml b/tcp_tests/templates/physical-mcp-ocata-offline-ovs/underlay--user-data-cfg01.yaml
new file mode 100644
index 0000000..6978bd3
--- /dev/null
+++ b/tcp_tests/templates/physical-mcp-ocata-offline-ovs/underlay--user-data-cfg01.yaml
@@ -0,0 +1,62 @@
+| # All the data below will be stored as a string object
+  #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+
+  ssh_pwauth: True
+  users:
+   - name: root
+     sudo: ALL=(ALL) NOPASSWD:ALL
+     shell: /bin/bash
+     ssh_authorized_keys:
+     {% for key in config.underlay.ssh_keys %}
+      - ssh-rsa {{ key['public'] }}
+     {% endfor %}
+
+  disable_root: false
+  chpasswd:
+   list: |
+    root:r00tme
+   expire: False
+
+  bootcmd:
+   # Block access to SSH while node is preparing
+   - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
+   # Enable root access
+   - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+   - service sshd restart
+  output:
+    all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
+
+  runcmd:
+   # Mount config drive
+   - mkdir /root/config-drive
+   - mount /dev/sr0 /root/config-drive
+
+   # Create swap
+   - fallocate -l 4G /swapfile
+   - chmod 600 /swapfile
+   - mkswap /swapfile
+   - swapon /swapfile
+   - echo "/swapfile   none    swap    defaults    0   0" >> /etc/fstab
+
+   # Run user data script from config drive
+   - cd /root/config-drive && /bin/bash -xe ./user-data
+
+   # Enable root access
+   - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+
+   ########################################################
+   # Node is ready, allow SSH access
+   - echo "Allow SSH access ..."
+   - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
+   ########################################################
+
+  write_files:
+   - path: /root/.ssh/config
+     owner: root:root
+     permissions: '0600'
+     content: |
+          Host *
+            ServerAliveInterval 300
+            ServerAliveCountMax 10
+            StrictHostKeyChecking no
+            UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/physical-mcp-ocata-offline-ovs/underlay--user-data.yaml b/tcp_tests/templates/physical-mcp-ocata-offline-ovs/underlay--user-data.yaml
new file mode 100644
index 0000000..aab7cde
--- /dev/null
+++ b/tcp_tests/templates/physical-mcp-ocata-offline-ovs/underlay--user-data.yaml
@@ -0,0 +1,79 @@
+| # All the data below will be stored as a string object
+  #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+
+  ssh_pwauth: True
+  users:
+   - name: root
+     sudo: ALL=(ALL) NOPASSWD:ALL
+     shell: /bin/bash
+     ssh_authorized_keys:
+     {% for key in config.underlay.ssh_keys %}
+      - ssh-rsa {{ key['public'] }}
+     {% endfor %}
+
+  disable_root: false
+  chpasswd:
+   list: |
+    root:r00tme
+   expire: False
+
+  bootcmd:
+   # Block access to SSH while node is preparing
+   - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
+   # Enable root access
+   - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+   - service sshd restart
+  output:
+    all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
+
+  runcmd:
+   - export TERM=linux
+   - export LANG=C
+   # Configure dhclient
+   - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
+   - sudo resolvconf -u
+
+   # Prepare network connection
+   #- sudo ifup eth0
+
+   # Create swap
+   - fallocate -l 4G /swapfile
+   - chmod 600 /swapfile
+   - mkswap /swapfile
+   - swapon /swapfile
+   - echo "/swapfile   none    swap    defaults   0   0" >> /etc/fstab
+
+
+   ############## TCP Cloud cfg01 node ##################
+   #- sleep 120
+   - echo "Preparing base OS"
+   - which wget >/dev/null || (apt-get update; apt-get install -y wget)
+
+   - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
+   - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
+   - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list
+   - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -
+
+   - apt-get clean
+   - eatmydata apt-get update && apt-get -y upgrade
+
+   # Install common packages
+   - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc
+
+   ########################################################
+   # Node is ready, allow SSH access
+   #- echo "Allow SSH access ..."
+   #- sudo iptables -D INPUT -p tcp --dport 22 -j DROP
+   - apt-get install linux-generic-hwe-16.04 -y
+   - reboot
+   ########################################################
+
+  write_files:
+   - path: /etc/network/interfaces
+     content: |
+          # The loopback network interface
+          auto lo
+          iface lo inet loopback
+
+          auto {interface_name}
+          iface {interface_name} inet dhcp
diff --git a/tcp_tests/templates/physical-mcp-ocata-offline-ovs/underlay.yaml b/tcp_tests/templates/physical-mcp-ocata-offline-ovs/underlay.yaml
new file mode 100644
index 0000000..ca5c171
--- /dev/null
+++ b/tcp_tests/templates/physical-mcp-ocata-offline-ovs/underlay.yaml
@@ -0,0 +1,378 @@
+# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
+{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
+
+{% import 'physical-mcp-ocata-offline-ovs/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
+{% import 'physical-mcp-ocata-offline-ovs/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
+
+
+{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'physical-mcp-ocata-offline-ovs') %}
+#{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME) + '.local' %}
+{% set DOMAIN_NAME = os_env('DOMAIN_NAME', 'offline-ocata-vxlan.local') %}
+{% set HOSTNAME_APT = os_env('HOSTNAME_CFG01', 'apt.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CFG01 = os_env('HOSTNAME_APT01', 'cfg01.' + DOMAIN_NAME) %}
+
+{% set HOSTNAME_KVM01 = os_env('HOSTNAME_KVM01', 'kvm01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_KVM02 = os_env('HOSTNAME_KVM02', 'kvm02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_KVM03 = os_env('HOSTNAME_KVM03', 'kvm03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_KVM03 = os_env('HOSTNAME_KVM04', 'kvm04.' + DOMAIN_NAME) %}
+{% set HOSTNAME_KVM03 = os_env('HOSTNAME_KVM05', 'kvm05.' + DOMAIN_NAME) %}
+{% set HOSTNAME_KVM03 = os_env('HOSTNAME_KVM06', 'kvm06.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP001 = os_env('HOSTNAME_CMP001', 'cmp001.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP002 = os_env('HOSTNAME_CMP002', 'cmp002.' + DOMAIN_NAME) %}
+{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_GTW02 = os_env('HOSTNAME_GTW02', 'gtw02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_GTW03 = os_env('HOSTNAME_GTW03', 'gtw03.' + DOMAIN_NAME) %}
+
+{% set HOSTNAME_CTL = os_env('HOSTNAME_CTL', 'ctl.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_DBS = os_env('HOSTNAME_DBS', 'dbs.' + DOMAIN_NAME) %}
+{% set HOSTNAME_DBS01 = os_env('HOSTNAME_DBS01', 'dbs01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_DBS02 = os_env('HOSTNAME_DBS02', 'dbs02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_DBS03 = os_env('HOSTNAME_DBS03', 'dbs03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MSG = os_env('HOSTNAME_MSG', 'msg.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MSG01 = os_env('HOSTNAME_MSG01', 'msg01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MSG02 = os_env('HOSTNAME_MSG02', 'msg02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MSG03 = os_env('HOSTNAME_MSG03', 'msg03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MON = os_env('HOSTNAME_MON', 'mon.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MON01 = os_env('HOSTNAME_MON01', 'mon01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MON02 = os_env('HOSTNAME_MON02', 'mon02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MON03 = os_env('HOSTNAME_MON03', 'mon03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_LOG = os_env('HOSTNAME_LOG', 'log.' + DOMAIN_NAME) %}
+{% set HOSTNAME_LOG01 = os_env('HOSTNAME_LOG01', 'log01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_LOG02 = os_env('HOSTNAME_LOG02', 'log02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_LOG03 = os_env('HOSTNAME_LOG03', 'log03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MTR = os_env('HOSTNAME_MTR', 'mtr.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MTR01 = os_env('HOSTNAME_MTR01', 'mtr01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MTR02 = os_env('HOSTNAME_MTR02', 'mtr02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MTR03 = os_env('HOSTNAME_MTR03', 'mtr03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_PRX02 = os_env('HOSTNAME_PRX02', 'prx02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_PRX03 = os_env('HOSTNAME_PRX03', 'prx03.' + DOMAIN_NAME) %}
+
+
+
+{% set ETH0_IP_ADDRESS_APT = os_env('ETH0_IP_ADDRESS_ATP', '10.10.0.14') %}
+{% set ETH0_IP_ADDRESS_CFG01 = os_env('ETH0_IP_ADDRESS_CFG01', '10.10.0.15') %}
+{% set ETH0_IP_ADDRESS_PRX01 = os_env('ETH0_IP_ADDRESS_PRX01', '10.10.0.11') %}
+{% set ETH0_IP_ADDRESS_PRX02 = os_env('ETH0_IP_ADDRESS_PRX02', '10.10.0.12') %}
+{% set ETH0_IP_ADDRESS_PRX03 = os_env('ETH0_IP_ADDRESS_PRX03', '10.10.0.13') %}
+{% set ETH0_IP_ADDRESS_CTL = os_env('ETH0_IP_ADDRESS_CTL', '10.10.0.10') %}
+{% set ETH0_IP_ADDRESS_CTL01 = os_env('ETH0_IP_ADDRESS_CTL01', '10.10.0.11') %}
+{% set ETH0_IP_ADDRESS_CTL02 = os_env('ETH0_IP_ADDRESS_CTL02', '10.10.0.12') %}
+{% set ETH0_IP_ADDRESS_CTL03 = os_env('ETH0_IP_ADDRESS_CTL03', '10.10.0.13') %}
+{% set ETH0_IP_ADDRESS_MSG = os_env('ETH0_IP_ADDRESS_MSG', '10.10.0.40') %}
+{% set ETH0_IP_ADDRESS_MSG01 = os_env('ETH0_IP_ADDRESS_MSG01', '10.10.0.41') %}
+{% set ETH0_IP_ADDRESS_MSG02 = os_env('ETH0_IP_ADDRESS_MSG02', '10.10.0.42') %}
+{% set ETH0_IP_ADDRESS_MSG03 = os_env('ETH0_IP_ADDRESS_MSG03', '10.10.0.43') %}
+{% set ETH0_IP_ADDRESS_DBS = os_env('ETH0_IP_ADDRESS_DBS', '10.10.0.50') %}
+{% set ETH0_IP_ADDRESS_DBS01 = os_env('ETH0_IP_ADDRESS_DBS01', '10.10.0.51') %}
+{% set ETH0_IP_ADDRESS_DBS02 = os_env('ETH0_IP_ADDRESS_DBS02', '10.10.0.52') %}
+{% set ETH0_IP_ADDRESS_DBS03 = os_env('ETH0_IP_ADDRESS_DBS03', '10.10.0.53') %}
+{% set ETH0_IP_ADDRESS_KVM01 = os_env('ETH0_IP_ADDRESS_KVM01', '10.10.0.241') %}
+{% set ETH0_IP_ADDRESS_KVM02 = os_env('ETH0_IP_ADDRESS_KVM02', '10.10.0.242') %}
+{% set ETH0_IP_ADDRESS_KVM03 = os_env('ETH0_IP_ADDRESS_KVM03', '10.10.0.243') %}
+{% set ETH0_IP_ADDRESS_KVM04 = os_env('ETH0_IP_ADDRESS_KVM04', '10.10.0.244') %}
+{% set ETH0_IP_ADDRESS_KVM05 = os_env('ETH0_IP_ADDRESS_KVM05', '10.10.0.245') %}
+{% set ETH0_IP_ADDRESS_KVM06 = os_env('ETH0_IP_ADDRESS_KVM06', '10.10.0.246') %}
+{% set ETH0_IP_ADDRESS_CMP001 = os_env('ETH0_IP_ADDRESS_CMP001', '10.10.0.101') %}
+{% set ETH0_IP_ADDRESS_CMP002 = os_env('ETH0_IP_ADDRESS_CMP002', '10.10.0.102') %}
+{% set ETH0_IP_ADDRESS_GTW01 = os_env('ETH0_IP_ADDRESS_GTW01', '10.10.0.224') %}
+{% set ETH0_IP_ADDRESS_GTW02 = os_env('ETH0_IP_ADDRESS_GTW02', '10.10.0.225') %}
+{% set ETH0_IP_ADDRESS_GTW02 = os_env('ETH0_IP_ADDRESS_GTW02', '10.10.0.226') %}
+
+
+{% set ETH1_IP_ADDRESS_CFG01 = os_env('ETH1_IP_ADDRESS_CFG01', '10.11.0.15') %}
+{% set ETH1_IP_ADDRESS_PRX01 = os_env('ETH1_IP_ADDRESS_PRX01', '10.11.0.11') %}
+{% set ETH1_IP_ADDRESS_PRX02 = os_env('ETH1_IP_ADDRESS_PRX02', '10.11.0.12') %}
+{% set ETH1_IP_ADDRESS_PRX03 = os_env('ETH1_IP_ADDRESS_PRX03', '10.11.0.13') %}
+{% set ETH1_IP_ADDRESS_CTL = os_env('ETH1_IP_ADDRESS_CTL', '10.11.0.10') %}
+{% set ETH1_IP_ADDRESS_CTL01 = os_env('ETH1_IP_ADDRESS_CTL01', '10.11.0.11') %}
+{% set ETH1_IP_ADDRESS_CTL02 = os_env('ETH1_IP_ADDRESS_CTL02', '10.11.0.12') %}
+{% set ETH1_IP_ADDRESS_CTL03 = os_env('ETH1_IP_ADDRESS_CTL03', '10.11.0.13') %}
+{% set ETH1_IP_ADDRESS_MSG = os_env('ETH1_IP_ADDRESS_MSG', '10.11.0.40') %}
+{% set ETH1_IP_ADDRESS_MSG01 = os_env('ETH1_IP_ADDRESS_MSG01', '10.11.0.41') %}
+{% set ETH1_IP_ADDRESS_MSG02 = os_env('ETH1_IP_ADDRESS_MSG02', '10.11.0.42') %}
+{% set ETH1_IP_ADDRESS_MSG03 = os_env('ETH1_IP_ADDRESS_MSG03', '10.11.0.43') %}
+{% set ETH1_IP_ADDRESS_DBS = os_env('ETH1_IP_ADDRESS_DBS', '10.11.0.50') %}
+{% set ETH1_IP_ADDRESS_DBS01 = os_env('ETH1_IP_ADDRESS_DBS01', '10.11.0.51') %}
+{% set ETH1_IP_ADDRESS_DBS02 = os_env('ETH1_IP_ADDRESS_DBS02', '10.11.0.52') %}
+{% set ETH1_IP_ADDRESS_DBS03 = os_env('ETH1_IP_ADDRESS_DBS03', '10.11.0.53') %}
+{% set ETH1_IP_ADDRESS_KVM01 = os_env('ETH1_IP_ADDRESS_KVM01', '10.11.0.241') %}
+{% set ETH1_IP_ADDRESS_KVM02 = os_env('ETH1_IP_ADDRESS_KVM02', '10.11.0.242') %}
+{% set ETH1_IP_ADDRESS_KVM03 = os_env('ETH1_IP_ADDRESS_KVM03', '10.11.0.243') %}
+{% set ETH1_IP_ADDRESS_KVM04 = os_env('ETH1_IP_ADDRESS_KVM04', '10.11.0.244') %}
+{% set ETH1_IP_ADDRESS_KVM05 = os_env('ETH1_IP_ADDRESS_KVM05', '10.11.0.245') %}
+{% set ETH1_IP_ADDRESS_KVM06 = os_env('ETH1_IP_ADDRESS_KVM06', '10.11.0.246') %}
+{% set ETH1_IP_ADDRESS_CMP001 = os_env('ETH1_IP_ADDRESS_CMP001', '10.11.0.101') %}
+{% set ETH1_IP_ADDRESS_CMP002 = os_env('ETH1_IP_ADDRESS_CMP002', '10.11.0.102') %}
+{% set ETH1_IP_ADDRESS_GTW01 = os_env('ETH1_IP_ADDRESS_GTW01', '10.11.0.224') %}
+{% set ETH1_IP_ADDRESS_GTW02 = os_env('ETH1_IP_ADDRESS_GTW02', '10.11.0.225') %}
+{% set ETH1_IP_ADDRESS_GTW02 = os_env('ETH1_IP_ADDRESS_GTW02', '10.11.0.226') %}
+
+{% set ETH2_IP_ADDRESS_CFG01 = os_env('ETH1_IP_ADDRESS_CFG01', '172.16.44.33') %}
+
+{% set ETH0_IP_ADDRESS_CFG01_PREFIX = '.'.join(ETH0_IP_ADDRESS_CFG01.split('.')[0:3]) %}
+{% set ETH1_IP_ADDRESS_CFG01_PREFIX = '.'.join(ETH1_IP_ADDRESS_CFG01.split('.')[0:3]) %}
+{% set ETH2_IP_ADDRESS_CFG01_PREFIX = '.'.join(ETH2_IP_ADDRESS_CFG01.split('.')[0:3]) %}
+
+
+---
+aliases:
+ - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
+ - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
+ - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
+
+
+template:
+  devops_settings:
+    env_name: {{ os_env('ENV_NAME', 'phy-mcp-ocata-offline-ovs_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
+
+    address_pools:
+      managment-pool01:
+        net: {{ os_env('MGMT_ADDRESS_POOL01', '10.11.0.0/24:24') }}
+        params:
+          ip_reserved:
+            gateway: +1
+            l2_network_device: +1
+            default_{{ HOSTNAME_CFG01 }}: {{ ETH1_IP_ADDRESS_CFG01 }}
+            default_{{ HOSTNAME_KVM01 }}: {{ ETH1_IP_ADDRESS_KVM01 }}
+            default_{{ HOSTNAME_KVM02 }}: {{ ETH1_IP_ADDRESS_KVM02 }}
+            default_{{ HOSTNAME_KVM03 }}: {{ ETH1_IP_ADDRESS_KVM03 }}
+            default_{{ HOSTNAME_KVM04 }}: {{ ETH1_IP_ADDRESS_KVM04 }}
+            default_{{ HOSTNAME_KVM05 }}: {{ ETH1_IP_ADDRESS_KVM05 }}
+            default_{{ HOSTNAME_KVM06 }}: {{ ETH1_IP_ADDRESS_KVM06 }}
+            default_{{ HOSTNAME_CMP001 }}: {{ ETH1_IP_ADDRESS_CMP001 }}
+            default_{{ HOSTNAME_CMP002 }}: {{ ETH1_IP_ADDRESS_CMP002 }}
+            default_{{ HOSTNAME_GTW01 }}: {{ ETH1_IP_ADDRESS_GTW01 }}
+            default_{{ HOSTNAME_GTW02 }}: {{ ETH1_IP_ADDRESS_GTW02 }}
+            default_{{ HOSTNAME_CTL }}: {{ ETH1_IP_ADDRESS_CTL }}
+            default_{{ HOSTNAME_CTL01 }}: {{ ETH1_IP_ADDRESS_CTL02 }}
+            default_{{ HOSTNAME_CTL02 }}: {{ ETH1_IP_ADDRESS_CTL03 }}
+            default_{{ HOSTNAME_CTL03 }}: {{ ETH1_IP_ADDRESS_CTL04 }}
+            default_{{ HOSTNAME_MSG }}: {{ ETH1_IP_ADDRESS_MSG }}
+            default_{{ HOSTNAME_MSG01 }}: {{ ETH1_IP_ADDRESS_MSG02 }}
+            default_{{ HOSTNAME_MSG02 }}: {{ ETH1_IP_ADDRESS_MSG03 }}
+            default_{{ HOSTNAME_MSG03 }}: {{ ETH1_IP_ADDRESS_MSG04 }}
+            default_{{ HOSTNAME_MON }}: {{ ETH1_IP_ADDRESS_MON }}
+            default_{{ HOSTNAME_MON01 }}: {{ ETH1_IP_ADDRESS_MON01 }}
+            default_{{ HOSTNAME_MON02 }}: {{ ETH1_IP_ADDRESS_MON02 }}
+            default_{{ HOSTNAME_MON03 }}: {{ ETH1_IP_ADDRESS_MON03 }}
+            default_{{ HOSTNAME_DBS }}: {{ ETH1_IP_ADDRESS_DBS }}
+            default_{{ HOSTNAME_DBS01 }}: {{ ETH1_IP_ADDRESS_DBS02 }}
+            default_{{ HOSTNAME_DBS02 }}: {{ ETH1_IP_ADDRESS_DBS03 }}
+            default_{{ HOSTNAME_DBS03 }}: {{ ETH1_IP_ADDRESS_DBS04 }}
+            default_{{ HOSTNAME_LOG }}: {{ ETH1_IP_ADDRESS_LOG }}
+            default_{{ HOSTNAME_LOG01 }}: {{ ETH1_IP_ADDRESS_LOG02 }}
+            default_{{ HOSTNAME_LOG02 }}: {{ ETH1_IP_ADDRESS_LOG03 }}
+            default_{{ HOSTNAME_LOG03 }}: {{ ETH1_IP_ADDRESS_LOG04 }}
+            default_{{ HOSTNAME_MTR }}: {{ ETH1_IP_ADDRESS_MTR }}
+            default_{{ HOSTNAME_MTR01 }}: {{ ETH1_IP_ADDRESS_MTR02 }}
+            default_{{ HOSTNAME_MTR02 }}: {{ ETH1_IP_ADDRESS_MTR03 }}
+            default_{{ HOSTNAME_MTR03 }}: {{ ETH1_IP_ADDRESS_MTR04 }}
+
+      admin-pool01:
+        net: {{ os_env('DEPLOY_ADDRESS_POOL01', '10.10.0.0/24:24') }}
+        params:
+          ip_reserved:
+            gateway: +1
+            l2_network_device: +1
+            default_{{ HOSTNAME_APT }}: {{ ETH0_IP_ADDRESS_APT }}
+            default_{{ HOSTNAME_CFG01 }}: {{ ETH0_IP_ADDRESS_CFG01 }}
+            default_{{ HOSTNAME_KVM01 }}: {{ ETH0_IP_ADDRESS_KVM01 }}
+            default_{{ HOSTNAME_KVM02 }}: {{ ETH0_IP_ADDRESS_KVM02 }}
+            default_{{ HOSTNAME_KVM03 }}: {{ ETH0_IP_ADDRESS_KVM03 }}
+            default_{{ HOSTNAME_KVM04 }}: {{ ETH0_IP_ADDRESS_KVM04 }}
+            default_{{ HOSTNAME_KVM05 }}: {{ ETH0_IP_ADDRESS_KVM05 }}
+            default_{{ HOSTNAME_KVM06 }}: {{ ETH0_IP_ADDRESS_KVM06 }}
+            default_{{ HOSTNAME_CMP001 }}: {{ ETH0_IP_ADDRESS_CMP001 }}
+            default_{{ HOSTNAME_CMP002 }}: {{ ETH0_IP_ADDRESS_CMP002 }}
+            default_{{ HOSTNAME_GTW01 }}: {{ ETH0_IP_ADDRESS_GTW01 }}
+            default_{{ HOSTNAME_GTW02 }}: {{ ETH0_IP_ADDRESS_GTW02 }}
+            default_{{ HOSTNAME_CTL }}: {{ ETH0_IP_ADDRESS_CTL }}
+            default_{{ HOSTNAME_CTL01 }}: {{ ETH0_IP_ADDRESS_CTL02 }}
+            default_{{ HOSTNAME_CTL02 }}: {{ ETH0_IP_ADDRESS_CTL03 }}
+            default_{{ HOSTNAME_CTL03 }}: {{ ETH0_IP_ADDRESS_CTL04 }}
+            default_{{ HOSTNAME_MSG }}: {{ ETH0_IP_ADDRESS_MSG }}
+            default_{{ HOSTNAME_MSG01 }}: {{ ETH0_IP_ADDRESS_MSG02 }}
+            default_{{ HOSTNAME_MSG02 }}: {{ ETH0_IP_ADDRESS_MSG03 }}
+            default_{{ HOSTNAME_MSG03 }}: {{ ETH0_IP_ADDRESS_MSG04 }}
+            default_{{ HOSTNAME_MON }}: {{ ETH0_IP_ADDRESS_MON }}
+            default_{{ HOSTNAME_MON01 }}: {{ ETH0_IP_ADDRESS_MON01 }}
+            default_{{ HOSTNAME_MON02 }}: {{ ETH0_IP_ADDRESS_MON02 }}
+            default_{{ HOSTNAME_MON03 }}: {{ ETH0_IP_ADDRESS_MON03 }}
+            default_{{ HOSTNAME_DBS }}: {{ ETH0_IP_ADDRESS_DBS }}
+            default_{{ HOSTNAME_DBS01 }}: {{ ETH0_IP_ADDRESS_DBS02 }}
+            default_{{ HOSTNAME_DBS02 }}: {{ ETH0_IP_ADDRESS_DBS03 }}
+            default_{{ HOSTNAME_DBS03 }}: {{ ETH0_IP_ADDRESS_DBS04 }}
+            default_{{ HOSTNAME_LOG }}: {{ ETH0_IP_ADDRESS_LOG }}
+            default_{{ HOSTNAME_LOG01 }}: {{ ETH0_IP_ADDRESS_LOG02 }}
+            default_{{ HOSTNAME_LOG02 }}: {{ ETH0_IP_ADDRESS_LOG03 }}
+            default_{{ HOSTNAME_LOG03 }}: {{ ETH0_IP_ADDRESS_LOG04 }}
+            default_{{ HOSTNAME_MTR }}: {{ ETH0_IP_ADDRESS_MTR }}
+            default_{{ HOSTNAME_MTR01 }}: {{ ETH0_IP_ADDRESS_MTR02 }}
+            default_{{ HOSTNAME_MTR02 }}: {{ ETH0_IP_ADDRESS_MTR03 }}
+            default_{{ HOSTNAME_MTR03 }}: {{ ETH0_IP_ADDRESS_MTR04 }}
+
+
+      public-pool01:
+        net: {{ os_env('PUBLIC_ADDRESS_POOL01', '172.16.44.0/22:22') }}
+        params:
+          ip_reserved:
+            gateway: +1
+            l2_network_device: +1
+            default_{{ HOSTNAME_CFG01 }}: {{ ETH2_IP_ADDRESS_CFG01 }}
+
+
+    groups:
+
+      - name: default
+        driver:
+          name: devops.driver.libvirt
+          params:
+            connection_string: !os_env CONNECTION_STRING, qemu:///system
+            storage_pool_name: !os_env STORAGE_POOL_NAME, default
+            stp: False
+            hpet: False
+            enable_acpi: true
+            use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
+
+        network_pools:
+          admin: admin-pool01
+          managment: managment-pool01
+          public: public-pool01
+
+        l2_network_devices:
+          # Ironic management interface
+          managment:
+            address_pool: managment-pool01
+            dhcp: false
+            forward:
+              mode: bridge
+            parent_iface:
+              phys_dev: !os_env ADMIN_BRIDGE
+
+          admin: # deploy
+            address_pool: admin-pool01
+            dhcp: false
+            forward:
+              mode: bridge
+            parent_iface:
+              phys_dev: !os_env DEPLOY_BRIDGE
+
+          public:
+            address_pool: public-pool01
+            dhcp: false
+            forward:
+              mode: bridge
+            parent_iface:
+              phys_dev: !os_env PUBLIC_BRIDGE
+
+          #admin:
+          #  address_pool: admin-pool01
+          #  dhcp: true
+
+        #group_volumes:
+        #
+        #
+        #
+
+        nodes:
+          - name: {{ HOSTNAME_CFG01 }}
+            role: salt_master
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 4
+              memory: !os_env SLAVE_NODE_MEMORY, 4096
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  shared_backing_store_name: !os_env CFG01_VOLUME_NAME
+                  format: qcow2
+                - name: config
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  source_image: !os_env CFG01_CONFIG_PATH
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_cfg01
+
+
+              interfaces:
+                - label: ens3
+                  l2_network_device: admin # deploy
+                  interface_model: *interface_model
+
+                - label: ens4
+                  l2_network_device: managment
+                  interface_model: *interface_model
+
+                - label: ens5
+                  l2_network_device: public
+                  interface_model: *interface_model
+
+                #- label: ens6
+                #  l2_network_device: admin
+                #  interface_model: *interface_model
+
+
+              network_config:
+                ens3:
+                  networks:
+                    - admin # deploy
+                ens4:
+                  networks:
+                    - managment
+                ens5:
+                  networks:
+                    - public
+                #ens6:
+                #  networks:
+                #    - admin
+
+          - name: {{ HOSTNAME_APT }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 4
+              memory: !os_env SLAVE_NODE_MEMORY, 4096
+              boot:
+                - hd
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 550
+                  shared_backing_store_name: !os_env APT_VOLUME_NAME
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  # capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  source_image: !os_env APT_CONFIG_PATH
+
+              interfaces:
+                - label: ens3
+                  l2_network_device: admin # deploy
+                  interface_model: *interface_model
+                #- label: ens4
+                #  l2_network_device: admin
+                #  interface_model: *interface_model
+
+              network_config:
+                ens3:
+                  networks:
+                    - admin
+                #ens4:
+                #  networks:
+                #    - admin
+
diff --git a/tcp_tests/templates/physical_mcp11_dvr/openstack.yaml b/tcp_tests/templates/physical_mcp11_dvr/openstack.yaml
index 29d972c..e01ee0f 100644
--- a/tcp_tests/templates/physical_mcp11_dvr/openstack.yaml
+++ b/tcp_tests/templates/physical_mcp11_dvr/openstack.yaml
@@ -159,7 +159,7 @@
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
-  skip_fail: true
+  skip_fail: false
 
 - description: Check IP on computes
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
diff --git a/tcp_tests/templates/physical_mcp11_ovs_dpdk/openstack.yaml b/tcp_tests/templates/physical_mcp11_ovs_dpdk/openstack.yaml
index 4407d40..83674b2 100644
--- a/tcp_tests/templates/physical_mcp11_ovs_dpdk/openstack.yaml
+++ b/tcp_tests/templates/physical_mcp11_ovs_dpdk/openstack.yaml
@@ -151,7 +151,7 @@
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
-  skip_fail: true
+  skip_fail: false
 
 - description: Check IP on computes
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
diff --git a/tcp_tests/templates/shared-salt.yaml b/tcp_tests/templates/shared-salt.yaml
index 22acb9a..e34e519 100644
--- a/tcp_tests/templates/shared-salt.yaml
+++ b/tcp_tests/templates/shared-salt.yaml
@@ -13,13 +13,13 @@
 {% set ENVIRONMENT_TEMPLATE_REF_CHANGE = os_env('ENVIRONMENT_TEMPLATE_REF_CHANGE','') %}
 
 {% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
-{% set FORMULA_REPOSITORY = os_env('FORMULA_REPOSITORY', 'deb [arch=amd64] http://apt.mirantis.com/xenial ' + REPOSITORY_SUITE + ' salt') %}
+{% set FORMULA_REPOSITORY = os_env('FORMULA_REPOSITORY', 'deb [arch=amd64] http://apt.mirantis.com/${DISTRIB_CODENAME} ' + REPOSITORY_SUITE + ' salt extra') %}
 {% set FORMULA_GPG = os_env('FORMULA_GPG', 'http://apt.mirantis.com/public.gpg') %}
-{% set SALT_REPOSITORY = os_env('SALT_REPOSITORY', "deb http://repo.saltstack.local.test/apt/ubuntu/16.04/amd64/2016.3 xenial main") %}
-{% set SALT_GPG = os_env('SALT_GPG', 'http://repo.saltstack.local.test/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub') %}
-{% set UBUNTU_REPOSITORY = os_env('UBUNTU_REPOSITORY', "deb http://archive.ubuntu.com/ubuntu/ xenial main universe restricted") %}
-{% set UBUNTU_UPDATES_REPOSITORY = os_env('UBUNTU_UPDATES_REPOSITORY', "deb http://archive.ubuntu.com/ubuntu/ xenial-updates main universe restricted") %}
-{% set UBUNTU_SECURITY_REPOSITORY = os_env('UBUNTU_SECURITY_REPOSITORY', "deb http://archive.ubuntu.com/ubuntu/ xenial-security main universe restricted") %}
+{% set SALT_REPOSITORY = os_env('SALT_REPOSITORY', "deb [arch=amd64] http://apt.mirantis.com/${DISTRIB_CODENAME}/salt/2016.3 " + REPOSITORY_SUITE + " main") %}
+{% set SALT_GPG = os_env('SALT_GPG', 'http://apt.mirantis.com/public.gpg') %}
+{% set UBUNTU_REPOSITORY = os_env('UBUNTU_REPOSITORY', "deb [arch=amd64] http://mirror.mirantis.com/" + REPOSITORY_SUITE + "/ubuntu/ ${DISTRIB_CODENAME} main restricted universe") %}
+{% set UBUNTU_UPDATES_REPOSITORY = os_env('UBUNTU_UPDATES_REPOSITORY', "deb [arch=amd64] http://mirror.mirantis.com/" + REPOSITORY_SUITE + "/ubuntu/ ${DISTRIB_CODENAME}-updates main restricted universe") %}
+{% set UBUNTU_SECURITY_REPOSITORY = os_env('UBUNTU_SECURITY_REPOSITORY', "deb [arch=amd64] http://mirror.mirantis.com/" + REPOSITORY_SUITE + "/ubuntu/ ${DISTRIB_CODENAME}-security main restricted universe") %}
 
 {# Address pools for reclass cluster model are taken in the following order:
  # 1. environment variables,
@@ -54,6 +54,7 @@
   cmd: |
     rm -rf trusted* ;
     rm -rf /etc/apt/sources.list ;
+    . /etc/lsb-release;  # Get DISTRIB_CODENAME variable
     echo "{{ FORMULA_REPOSITORY }}" > /etc/apt/sources.list.d/mcp_salt.list;
     wget -O - "{{ FORMULA_GPG }}" | apt-key add -;
     echo "{{ SALT_REPOSITORY }}" > /etc/apt/sources.list.d/mcp_saltstack.list;
@@ -72,7 +73,26 @@
 {%- macro MACRO_INSTALL_SALT_MASTER() %}
 {######################################}
 - description: Installing salt master on cfg01
-  cmd:  eatmydata apt-get install -y --allow-unauthenticated reclass git salt-master
+  cmd: |
+    which wget >/dev/null || (apt-get update; apt-get install -y wget);
+    # Configure ubuntu and salt repositories
+    . /etc/lsb-release;  # Get DISTRIB_CODENAME variable
+    echo "{{ UBUNTU_REPOSITORY }}" > /etc/apt/sources.list
+    echo "{{ UBUNTU_UPDATES_REPOSITORY }}" >> /etc/apt/sources.list
+    echo "{{ UBUNTU_SECURITY_REPOSITORY }}" >> /etc/apt/sources.list
+
+    echo "{{ FORMULA_REPOSITORY }}" > /etc/apt/sources.list.d/mcp_salt.list;
+    wget -O - {{ FORMULA_GPG }} | apt-key add -;
+    echo "{{ SALT_REPOSITORY }}" > /etc/apt/sources.list.d/mcp_saltstack.list;
+    wget -O - {{ SALT_GPG }} | apt-key add -;
+
+    apt-get clean
+    apt-get update
+
+    # Install salt-master and reclass
+    eatmydata apt-get install -y --allow-unauthenticated reclass salt-master
+    # Install common packages
+    eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 1}
   skip_fail: false
@@ -417,6 +437,7 @@
     set -e;
     FORMULA_PATH=${FORMULA_PATH:-/usr/share/salt-formulas};
     which wget > /dev/null || (apt-get update; apt-get install -y wget);
+    . /etc/lsb-release;  # Get DISTRIB_CODENAME variable
     echo "{{ FORMULA_REPOSITORY }}" > /etc/apt/sources.list.d/mcp_salt.list;
     wget -O - "{{ FORMULA_GPG }}" | apt-key add -;
     apt-get clean; apt-get update;
@@ -482,8 +503,28 @@
     id: {{ ssh['node_name'] }}
     master: {{ config.salt.salt_master_host }}
     EOF
+
+    # Configure ubuntu and salt repositories
+    which wget >/dev/null || (apt-get update; apt-get install -y wget);
+
+    . /etc/lsb-release;  # Get DISTRIB_CODENAME variable
+    echo "{{ UBUNTU_REPOSITORY }}" > /etc/apt/sources.list
+    echo "{{ UBUNTU_UPDATES_REPOSITORY }}" >> /etc/apt/sources.list
+    echo "{{ UBUNTU_SECURITY_REPOSITORY }}" >> /etc/apt/sources.list
+
+    echo "{{ SALT_REPOSITORY }}" > /etc/apt/sources.list.d/mcp_saltstack.list;
+    wget -O - {{ SALT_GPG }} | apt-key add -;
+
+    apt-get clean
+    apt-get update
+
+    # Install salt-minion
     eatmydata apt-get install -y salt-minion;
-    service salt-minion restart;  # For case if salt-minion was already installed
+    # Install common packages
+    eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc
+
+    # Restart salt-minion if it was already installed
+    service salt-minion restart
   node_name: {{ ssh['node_name'] }}
   retry: {count: 1, delay: 1}
   skip_fail: false
@@ -723,8 +764,9 @@
 
 {%- endmacro %}
 
+
 {%- macro ADJUST_K8S_OPTS() %}
-{#########################################}
+{############################}
 
 - description: Set k8s deploy parameters
   cmd: |
@@ -739,8 +781,31 @@
 
 {%- endmacro %}
 
+
+{%- macro ADJUST_SL_OPTS(OVERRIDES_FILENAME='') %}
+{#############################################}
+- description: Set SL docker images deploy parameters
+  cmd: |
+  {#- For cookiecutter-generated model, use overrides.yml from environment model instead of cluster model #}
+  {%- for sl_opt, value in config.sl_deploy.items() %}
+    {%- if value|string() %}
+      {%- if OVERRIDES_FILENAME %}
+    salt-call reclass.cluster_meta_set name={{ sl_opt }} value={{ value }} file_name={{ OVERRIDES_FILENAME }};
+      {%- else %}
+    salt-call reclass.cluster_meta_set name={{ sl_opt }} value={{ value }};
+      {%- endif %}
+    {%- endif %}
+  {%- endfor %}
+    salt '*' saltutil.refresh_pillar;
+    sleep 10
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 1}
+  skip_fail: false
+{%- endmacro %}
+
+
 {%- macro REGISTER_COMPUTE_NODES() %}
-{#########################################}
+{###################################}
 
 {% for ssh in config.underlay.ssh %}
 {% if ssh["node_name"].startswith("cmp") %}
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ceph-offline/ceph.yaml b/tcp_tests/templates/virtual-mcp-ocata-ceph-offline/ceph.yaml
index ad3b628..8705250 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-ceph-offline/ceph.yaml
+++ b/tcp_tests/templates/virtual-mcp-ocata-ceph-offline/ceph.yaml
@@ -39,8 +39,8 @@
 
 - description: Install ceph mgr if defined(needed only for Luminious)
   cmd: |
-    if salt -C 'I@ceph:mgr' match.pillar 'ceph:mgr' ; then
-      salt -C 'I@ceph:mgr' state.sls ceph.mgr
+    if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:mgr' match.pillar 'ceph:mgr' ; then
+      salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:mgr' state.sls ceph.mgr
     fi
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 2, delay: 5}
@@ -97,10 +97,10 @@
 
 - description: Install radosgw if exists
   cmd: |
-    if salt -C 'I@ceph:radosgw' match.pillar 'ceph:radosgw' ; then
-      salt -C 'I@ceph:radosgw' saltutil.sync_grains;
-      salt -C 'I@ceph:radosgw' state.sls ceph.radosgw;
-      salt -C 'I@keystone:client' state.sls keystone.client;
+    if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:radosgw' match.pillar 'ceph:radosgw' ; then
+      salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:radosgw' saltutil.sync_grains;
+      salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:radosgw' state.sls ceph.radosgw;
+      salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keystone:client' state.sls keystone.client;
     fi
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 2, delay: 5}
@@ -115,34 +115,34 @@
 
 - description: Connect ceph to glance
   cmd: |
-    salt -C 'I@ceph:common and I@glance:server' state.sls ceph.common,ceph.setup.keyring,glance;
-    salt -C 'I@ceph:common and I@glance:server' service.restart glance-api;
-    salt -C 'I@ceph:common and I@glance:server' service.restart glance-glare;
-    salt -C 'I@ceph:common and I@glance:server' service.restart glance-registry;
+    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@glance:server' state.sls ceph.common,ceph.setup.keyring,glance;
+    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@glance:server' service.restart glance-api;
+    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@glance:server' service.restart glance-glare;
+    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@glance:server' service.restart glance-registry;
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 2, delay: 5}
   skip_fail: false
 
 - description: Connect ceph to cinder and nova
   cmd: |
-    salt -C 'I@ceph:common and I@cinder:controller' state.sls ceph.common,ceph.setup.keyring,cinder;
-    salt -C 'I@ceph:common and I@nova:compute' state.sls ceph.common,ceph.setup.keyring;
-    salt -C 'I@ceph:common and I@nova:compute' saltutil.sync_grains;
-    salt -C 'I@ceph:common and I@nova:compute' state.sls nova;
+    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@cinder:controller' state.sls ceph.common,ceph.setup.keyring,cinder;
+    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@nova:compute' state.sls ceph.common,ceph.setup.keyring;
+    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@nova:compute' saltutil.sync_grains;
+    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@nova:compute' state.sls nova;
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 2, delay: 5}
   skip_fail: false
 
 - description: Restart cinder volume
   cmd: |
-    salt -C 'I@cinder:controller' service.restart cinder-volume;
+    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@cinder:controller' service.restart cinder-volume;
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 2, delay: 5}
   skip_fail: false
 
 - description: Restart nova-compute
   cmd: |
-    salt -C 'I@nova:compute' service.restart nova-compute;
+    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@nova:compute' service.restart nova-compute;
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 2, delay: 5}
   skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ceph-offline/openstack.yaml b/tcp_tests/templates/virtual-mcp-ocata-ceph-offline/openstack.yaml
index 80c2099..1783d6d 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-ceph-offline/openstack.yaml
+++ b/tcp_tests/templates/virtual-mcp-ocata-ceph-offline/openstack.yaml
@@ -164,7 +164,7 @@
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
-  skip_fail: true
+  skip_fail: false
 
 - description: Check IP on computes
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr/_context-cookiecutter-mcp-ocata-dvr.yaml b/tcp_tests/templates/virtual-mcp-ocata-dvr/_context-cookiecutter-mcp-ocata-dvr.yaml
new file mode 100644
index 0000000..25422ec
--- /dev/null
+++ b/tcp_tests/templates/virtual-mcp-ocata-dvr/_context-cookiecutter-mcp-ocata-dvr.yaml
@@ -0,0 +1,154 @@
+default_context:
+  bmk_enabled: 'False'
+  ceph_enabled: 'False'
+  cicd_enabled: 'False'
+  cluster_domain: virtual-mcp-ocata-dvr.local
+  cluster_name: virtual-mcp-ocata-dvr
+  compute_bond_mode: active-backup
+  compute_primary_first_nic: eth1
+  compute_primary_second_nic: eth2
+  context_seed: wUqrwKeBTCpRpVrhK1KwZQv4cjM9VhG7L2vQ0iQsTuMrXASklEBDmJEf6bnPEqcK
+  control_network_netmask: 255.255.255.0
+  control_network_subnet: 172.16.10.0/24
+  control_vlan: '10'
+  cookiecutter_template_branch: master
+  cookiecutter_template_credentials: gerrit
+  cookiecutter_template_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.net:29418/mk/cookiecutter-templates.git
+  deploy_network_gateway: 192.168.10.1
+  deploy_network_netmask: 255.255.255.0
+  deploy_network_subnet: 192.168.10.0/24
+  deployment_type: physical
+  dns_server01: 8.8.8.8
+  dns_server02: 8.8.4.4
+  email_address: ddmitriev@mirantis.com
+  gateway_primary_first_nic: eth1
+  gateway_primary_second_nic: eth2
+  infra_bond_mode: active-backup
+  infra_deploy_nic: eth0
+  infra_kvm01_control_address: 172.16.10.101
+  infra_kvm01_deploy_address: 192.168.10.101
+  infra_kvm01_hostname: kvm01
+  infra_kvm02_control_address: 172.16.10.102
+  infra_kvm02_deploy_address: 192.168.10.102
+  infra_kvm02_hostname: kvm02
+  infra_kvm03_control_address: 172.16.10.103
+  infra_kvm03_deploy_address: 192.168.10.103
+  infra_kvm03_hostname: kvm03
+  infra_kvm_vip_address: 172.16.10.100
+  infra_primary_first_nic: eth1
+  infra_primary_second_nic: eth2
+  kubernetes_enabled: 'False'
+  local_repositories: 'False'
+  maas_deploy_address: 192.168.10.90
+  maas_hostname: cfg01
+  mcp_version: stable
+  offline_deployment: 'False'
+  opencontrail_enabled: 'False'
+  openstack_benchmark_node01_address: 172.16.10.95
+  openstack_benchmark_node01_hostname: bmk01
+  openstack_cluster_size: compact
+  openstack_compute_count: '100'
+  openstack_compute_rack01_hostname: cmp
+  openstack_compute_rack01_single_subnet: 172.16.10
+  openstack_compute_rack01_tenant_subnet: 10.1.0
+  openstack_control_address: 172.16.10.100
+  openstack_control_hostname: ctl
+  openstack_control_node01_address: 172.16.10.101
+  openstack_control_node01_hostname: ctl01
+  openstack_control_node02_address: 172.16.10.102
+  openstack_control_node02_hostname: ctl02
+  openstack_control_node03_address: 172.16.10.103
+  openstack_control_node03_hostname: ctl03
+  openstack_database_address: 172.16.10.100
+  openstack_database_hostname: dbs
+  openstack_database_node01_address: 172.16.10.101
+  openstack_database_node01_hostname: dbs01
+  openstack_database_node02_address: 172.16.10.102
+  openstack_database_node02_hostname: dbs02
+  openstack_database_node03_address: 172.16.10.103
+  openstack_database_node03_hostname: dbs03
+  openstack_enabled: 'True'
+  openstack_gateway_node01_address: 172.16.10.110
+  openstack_gateway_node01_hostname: gtw01
+  openstack_gateway_node01_tenant_address: 10.1.0.6
+  openstack_gateway_node02_address: 172.16.10.111
+  openstack_gateway_node02_hostname: gtw02
+  openstack_gateway_node02_tenant_address: 10.1.0.7
+  openstack_gateway_node03_address: 172.16.10.112
+  openstack_gateway_node03_hostname: gtw03
+  openstack_gateway_node03_tenant_address: 10.1.0.8
+  openstack_message_queue_address: 172.16.10.100
+  openstack_message_queue_hostname: msg
+  openstack_message_queue_node01_address: 172.16.10.101
+  openstack_message_queue_node01_hostname: msg01
+  openstack_message_queue_node02_address: 172.16.10.102
+  openstack_message_queue_node02_hostname: msg02
+  openstack_message_queue_node03_address: 172.16.10.103
+  openstack_message_queue_node03_hostname: msg03
+  openstack_network_engine: ovs
+  openstack_neutron_qos: 'False'
+  openstack_neutron_vlan_aware_vms: 'False'
+  openstack_nfv_dpdk_enabled: 'False'
+  openstack_nfv_sriov_enabled: 'False'
+  openstack_nova_compute_nfv_req_enabled: 'False'
+  openstack_ovs_dvr_enabled: 'True'
+  openstack_ovs_encapsulation_type: vxlan
+  openstack_proxy_address: 172.16.10.80
+  openstack_proxy_hostname: prx
+  openstack_proxy_node01_address: 172.16.10.121
+  openstack_proxy_node01_hostname: prx01
+  openstack_proxy_node02_address: 172.16.10.122
+  openstack_proxy_node02_hostname: prx02
+  openstack_upgrade_node01_address: 172.16.10.19
+  openstack_version: ocata
+  oss_enabled: 'False'
+  oss_node03_address: ${_param:stacklight_monitor_node03_address}
+  oss_notification_app_id: '24'
+  oss_notification_sender_password: password
+  oss_notification_smtp_port: '587'
+  oss_notification_webhook_login_id: '13'
+  platform: openstack_enabled
+  public_host: ${_param:openstack_proxy_address}
+  publication_method: email
+  reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
+  salt_api_password: H0rTPdmktZ8RI7T7y6fjqY0uEbbs7Kwi
+  salt_api_password_hash: $6$lfbIFtMZ$.nTbTDMzs1iYv0WqkZHia8H8Fma963Nv3qyyz1x68jQh0YXK9i907B/hvoG4QHMvfolE7V7vQnFClJ1mVA3Yb.
+  salt_master_address: 172.16.10.90
+  salt_master_hostname: cfg01
+  salt_master_management_address: 192.168.10.90
+  shared_reclass_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.net:29418/salt-models/reclass-system.git
+  stacklight_enabled: 'True'
+  stacklight_log_address: 172.16.10.70
+  stacklight_log_hostname: log
+  stacklight_log_node01_address: 172.16.10.107
+  stacklight_log_node01_hostname: log01
+  stacklight_log_node02_address: 172.16.10.108
+  stacklight_log_node02_hostname: log02
+  stacklight_log_node03_address: 172.16.10.109
+  stacklight_log_node03_hostname: log03
+  stacklight_monitor_address: 172.16.10.70
+  stacklight_monitor_hostname: mon
+  stacklight_monitor_node01_address: 172.16.10.107
+  stacklight_monitor_node01_hostname: mon01
+  stacklight_monitor_node02_address: 172.16.10.108
+  stacklight_monitor_node02_hostname: mon02
+  stacklight_monitor_node03_address: 172.16.10.109
+  stacklight_monitor_node03_hostname: mon03
+  stacklight_notification_address: alerts@localhost
+  stacklight_notification_smtp_host: 127.0.0.1
+  stacklight_telemetry_address: 172.16.10.70
+  stacklight_telemetry_hostname: mtr
+  stacklight_telemetry_node01_address: 172.16.10.107
+  stacklight_telemetry_node01_hostname: mtr01
+  stacklight_telemetry_node02_address: 172.16.10.108
+  stacklight_telemetry_node02_hostname: mtr02
+  stacklight_telemetry_node03_address: 172.16.10.109
+  stacklight_telemetry_node03_hostname: mtr03
+  stacklight_version: '2'
+  static_ips_on_deploy_network_enabled: 'False'
+  tenant_network_gateway: 10.1.0.1
+  tenant_network_netmask: 255.255.255.0
+  tenant_network_subnet: 10.1.0.0/24
+  tenant_vlan: '20'
+  upstream_proxy_enabled: 'False'
+  use_default_network_scheme: 'False'
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr/_context-environment.yaml b/tcp_tests/templates/virtual-mcp-ocata-dvr/_context-environment.yaml
new file mode 100644
index 0000000..9a04b68
--- /dev/null
+++ b/tcp_tests/templates/virtual-mcp-ocata-dvr/_context-environment.yaml
@@ -0,0 +1,165 @@
+nodes:
+    cfg01.mcp11-ovs-dpdk.local:
+      reclass_storage_name: infra_config_node01
+      roles:
+      - infra_config
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_vlan_ctl
+
+    ctl01.mcp11-ovs-dpdk.local:
+      reclass_storage_name: openstack_control_node01
+      roles:
+      - infra_kvm
+      - openstack_control_leader
+      - openstack_database_leader
+      - openstack_message_queue
+      - features_designate_pool_manager_database
+      - features_designate_pool_manager
+      - features_designate_pool_manager_keystone
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_vlan_ctl
+
+    ctl02.mcp11-ovs-dpdk.local:
+      reclass_storage_name: openstack_control_node02
+      roles:
+      - infra_kvm
+      - openstack_control
+      - openstack_database
+      - openstack_message_queue
+      - features_designate_pool_manager_database
+      - features_designate_pool_manager
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_vlan_ctl
+
+    ctl03.mcp11-ovs-dpdk.local:
+      reclass_storage_name: openstack_control_node03
+      roles:
+      - infra_kvm
+      - openstack_control
+      - openstack_database
+      - openstack_message_queue
+      - features_designate_pool_manager_database
+      - features_designate_pool_manager
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_vlan_ctl
+
+    prx01.mcp11-ovs-dpdk.local:
+      reclass_storage_name: openstack_proxy_node01
+      roles:
+      - openstack_proxy
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_vlan_ctl
+
+    mon01.mcp11-ovs-dpdk.local:
+      reclass_storage_name: stacklight_server_node01
+      roles:
+      - stacklightv2_server_leader
+      - stacklight_telemetry_leader
+      - stacklight_log_leader_v2
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_vlan_ctl
+
+    mon02.mcp11-ovs-dpdk.local:
+      reclass_storage_name: stacklight_server_node02
+      roles:
+      - stacklightv2_server
+      - stacklight_telemetry
+      - stacklight_log
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_vlan_ctl
+
+    mon03.mcp11-ovs-dpdk.local:
+      reclass_storage_name: stacklight_server_node03
+      roles:
+      - stacklightv2_server
+      - stacklight_telemetry
+      - stacklight_log
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_vlan_ctl
+
+    # Generator-based computes. For compatibility only
+    cmp<<count>>.mcp11-ovs-dpdk.local:
+      reclass_storage_name: openstack_compute_rack01
+      roles:
+      - openstack_compute
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: bond0_ab_ovs_vxlan_ctl_mesh
+        ens5:
+          role: bond0_ab_ovs_vxlan_ctl_mesh
+        ens6:
+          role: bond1_ab_ovs_floating
+
+    gtw01.mcp11-ovs-dpdk.local:
+      reclass_storage_name: openstack_gateway_node01
+      roles:
+      - openstack_gateway
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: bond0_ab_ovs_vxlan_ctl_mesh
+        ens5:
+          role: bond0_ab_ovs_vxlan_ctl_mesh
+        ens6:
+          role: bond1_ab_ovs_floating
+
+    dns01.mcp11-ovs-dpdk.local:
+      reclass_storage_name: openstack_dns_node01
+      roles:
+      - features_designate_pool_manager_dns
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_vlan_ctl
+          single_address: ${_param:openstack_dns_node01_address}
+
+    dns02.mcp11-ovs-dpdk.local:
+      reclass_storage_name: openstack_dns_node02
+      roles:
+      - features_designate_pool_manager_dns
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_vlan_ctl
+          single_address: ${_param:openstack_dns_node02_address}
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr/openstack.yaml b/tcp_tests/templates/virtual-mcp-ocata-dvr/openstack.yaml
index 35a11a0..ed0ee59 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-dvr/openstack.yaml
+++ b/tcp_tests/templates/virtual-mcp-ocata-dvr/openstack.yaml
@@ -200,7 +200,7 @@
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
-  skip_fail: true
+  skip_fail: false
 
 - description: Check IP on computes
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr/salt.yaml b/tcp_tests/templates/virtual-mcp-ocata-dvr/salt.yaml
index 689dcf4..c8204a8 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-dvr/salt.yaml
+++ b/tcp_tests/templates/virtual-mcp-ocata-dvr/salt.yaml
@@ -17,6 +17,8 @@
 
 {{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
 
+{{ SHARED.ADJUST_SL_OPTS(OVERRIDES_FILENAME='/srv/salt/reclass/classes/cluster/' + SHARED.CLUSTER_NAME + '/stacklight/server.yml') }}
+
 - description: "Workaround for PROD-14831 , add 'dns' role to cmp01 and cmp02 nodes"
   cmd: |
     set -e;
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr/sl.yaml b/tcp_tests/templates/virtual-mcp-ocata-dvr/sl.yaml
index c5c710a..5795a25 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-dvr/sl.yaml
+++ b/tcp_tests/templates/virtual-mcp-ocata-dvr/sl.yaml
@@ -18,105 +18,105 @@
   skip_fail: false
 
 - description: Configure docker service
-  cmd: salt -C 'I@docker:swarm' state.sls docker.host
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls docker.host
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: Install docker swarm on master node
-  cmd: salt -C 'I@docker:swarm:role:master' state.sls docker.swarm
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: Send grains to the swarm slave nodes
-  cmd: salt -C 'I@docker:swarm' state.sls salt.minion.grains
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls salt.minion.grains
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description:  Update mine
-  cmd: salt -C 'I@docker:swarm' mine.update
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' mine.update
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description:  Refresh modules
-  cmd: salt -C 'I@docker:swarm' saltutil.refresh_modules; sleep 5;
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' saltutil.refresh_modules; sleep 5;
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description:  Rerun swarm on slaves to proper token population
-  cmd: salt -C 'I@docker:swarm:role:master' state.sls docker.swarm
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description:  Configure slave nodes
-  cmd: salt -C 'I@docker:swarm:role:manager' state.sls docker.swarm -b 1
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:manager' state.sls docker.swarm -b 1
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description:  List registered Docker swarm nodes
-  cmd: salt -C 'I@docker:swarm:role:master' cmd.run 'docker node ls'
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' cmd.run 'docker node ls'
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 # Install slv2 infra
 - description: Install telegraf
-  cmd: salt -C 'I@telegraf:agent or I@telegraf:remote_agent' state.sls telegraf
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@telegraf:agent or I@telegraf:remote_agent' state.sls telegraf
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 2, delay: 10}
   skip_fail: false
 
 - description: Configure Prometheus exporters, if pillar 'prometheus:exporters' exists on any server
   cmd: |
-    if salt -C 'I@prometheus:exporters' match.pillar 'prometheus:exporters' ; then
-      salt -C 'I@prometheus:exporters' state.sls prometheus
+    if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' match.pillar 'prometheus:exporters' ; then
+      salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' state.sls prometheus
     fi
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: Configure collector
-  cmd: salt -C 'I@heka:log_collector' state.sls heka.log_collector
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:log_collector' state.sls heka.log_collector
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: Install elasticsearch server
-  cmd: salt -C 'I@elasticsearch:server' state.sls elasticsearch.server -b 1
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:server' state.sls elasticsearch.server -b 1
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: Install kibana server
-  cmd: salt -C 'I@kibana:server' state.sls kibana.server -b 1
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:server' state.sls kibana.server -b 1
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: Install elasticsearch client
-  cmd: salt -C 'I@elasticsearch:client' state.sls elasticsearch.client
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:client' state.sls elasticsearch.client
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: Install kibana client
-  cmd: salt -C 'I@kibana:client' state.sls kibana.client
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:client' state.sls kibana.client
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: Check influix db
   cmd: |
-    INFLUXDB_SERVICE=`salt -C 'I@influxdb:server' test.ping 1>/dev/null 2>&1 && echo true`;
+    INFLUXDB_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' test.ping 1>/dev/null 2>&1 && echo true`;
     echo "Influxdb service presence: ${INFLUXDB_SERVICE}";
     if [[ "$INFLUXDB_SERVICE" == "true" ]]; then
-        salt -C 'I@influxdb:server' state.sls influxdb
+        salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' state.sls influxdb
     fi
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
@@ -125,44 +125,44 @@
 # Collect grains needed to configure the services
 
 - description: Get grains
-  cmd: salt -C 'I@salt:minion' state.sls salt.minion.grains
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' state.sls salt.minion.grains
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: Sync modules
-  cmd: salt -C 'I@salt:minion' saltutil.refresh_modules
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' saltutil.refresh_modules
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: Update mine
-  cmd: salt -C 'I@salt:minion' mine.update; sleep 5;
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' mine.update; sleep 5;
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 5, delay: 15}
   skip_fail: false
 
 # Configure the services running in Docker Swarm
 - description: Install prometheus alertmanager
-  cmd: salt -C 'I@docker:swarm' state.sls prometheus,heka.remote_collector -b 1
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls prometheus,heka.remote_collector -b 1
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: run docker state
-  cmd: salt -C 'I@docker:swarm:role:master' state.sls docker
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: docker ps
-  cmd: salt -C 'I@docker:swarm' dockerng.ps
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' dockerng.ps
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: Configure Grafana dashboards and datasources
-  cmd: sleep 30;  salt -C 'I@grafana:client' state.sls grafana.client
+  cmd: sleep 30;  salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@grafana:client' state.sls grafana.client
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 2, delay: 10}
   skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr/underlay--user-data-cfg01.yaml b/tcp_tests/templates/virtual-mcp-ocata-dvr/underlay--user-data-cfg01.yaml
index 6448211..a73ca23 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-dvr/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/virtual-mcp-ocata-dvr/underlay--user-data-cfg01.yaml
@@ -18,8 +18,6 @@
    expire: False
 
   bootcmd:
-   # Block access to SSH while node is preparing
-   - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
    # Enable root access
    - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
    - service sshd restart
@@ -44,37 +42,18 @@
    - swapon /swapfile
    - echo "/swapfile   none    swap    defaults    0   0" >> /etc/fstab
 
-   ############## TCP Cloud cfg01 node ##################
-   #- sleep 120
-   - echo "Preparing base OS"
-
    - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
-   - echo "nameserver 8.8.8.8" >> /etc/resolv.conf;
-   - which wget >/dev/null || (apt-get update; apt-get install -y wget);
 
-   # Configure Ubuntu mirrors
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
-
-   - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
-   - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
-   - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;
-   - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;
-
-   - apt-get clean
-   - apt-get update
-
-   # Install common packages
-   - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree
-
-   ########################################################
-   # Node is ready, allow SSH access
-   - echo "Allow SSH access ..."
-   - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
-   ########################################################
+   # Enable grub menu using updated config below
+   - update-grub
 
   write_files:
+   - path: /etc/default/grub.d/97-enable-grub-menu.cfg
+     content: |
+         GRUB_RECORDFAIL_TIMEOUT=30
+         GRUB_TIMEOUT=3
+         GRUB_TIMEOUT_STYLE=menu
+
    - path: /etc/network/interfaces
      content: |
           auto ens3
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr/underlay--user-data1604.yaml b/tcp_tests/templates/virtual-mcp-ocata-dvr/underlay--user-data1604.yaml
index c056295..3fbb777 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-dvr/underlay--user-data1604.yaml
+++ b/tcp_tests/templates/virtual-mcp-ocata-dvr/underlay--user-data1604.yaml
@@ -18,8 +18,6 @@
    expire: False
 
   bootcmd:
-   # Block access to SSH while node is preparing
-   - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
    # Enable root access
    - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
    - service sshd restart
@@ -44,34 +42,6 @@
    - swapon /swapfile
    - echo "/swapfile   none    swap    defaults   0   0" >> /etc/fstab
 
-
-   ############## TCP Cloud cfg01 node ##################
-   #- sleep 120
-   - echo "Preparing base OS"
-   - which wget >/dev/null || (apt-get update; apt-get install -y wget)
-
-   # Configure Ubuntu mirrors
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
-
-   - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
-   - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
-   - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;
-   - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;
-
-   - apt-get clean
-   - eatmydata apt-get update && apt-get -y upgrade
-
-   # Install common packages
-   - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc
-
-   ########################################################
-   # Node is ready, allow SSH access
-   - echo "Allow SSH access ..."
-   - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
-   ########################################################
-
   write_files:
    - path: /etc/network/interfaces
      content: |
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/ceph.yaml b/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/ceph.yaml
index 17450c6..25ec2f0 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/ceph.yaml
+++ b/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/ceph.yaml
@@ -39,8 +39,8 @@
 
 - description: Install ceph mgr if defined(needed only for Luminious)
   cmd: |
-    if salt -C 'I@ceph:mgr' match.pillar 'ceph:mgr' ; then
-      salt -C 'I@ceph:mgr' state.sls ceph.mgr
+    if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:mgr' match.pillar 'ceph:mgr' ; then
+      salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:mgr' state.sls ceph.mgr
     fi
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 2, delay: 5}
@@ -97,10 +97,10 @@
 
 - description: Install radosgw if exists
   cmd: |
-    if salt -C 'I@ceph:radosgw' match.pillar 'ceph:radosgw' ; then
-      salt -C 'I@ceph:radosgw' saltutil.sync_grains;
-      salt -C 'I@ceph:radosgw' state.sls ceph.radosgw;
-      salt -C 'I@keystone:client' state.sls keystone.client;
+    if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:radosgw' match.pillar 'ceph:radosgw' ; then
+      salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:radosgw' saltutil.sync_grains;
+      salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:radosgw' state.sls ceph.radosgw;
+      salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keystone:client' state.sls keystone.client;
     fi
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 2, delay: 5}
@@ -115,34 +115,34 @@
 
 - description: Connect ceph to glance
   cmd: |
-    salt -C 'I@ceph:common and I@glance:server' state.sls ceph.common,ceph.setup.keyring,glance;
-    salt -C 'I@ceph:common and I@glance:server' service.restart glance-api;
-    salt -C 'I@ceph:common and I@glance:server' service.restart glance-glare;
-    salt -C 'I@ceph:common and I@glance:server' service.restart glance-registry;
+    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@glance:server' state.sls ceph.common,ceph.setup.keyring,glance;
+    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@glance:server' service.restart glance-api;
+    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@glance:server' service.restart glance-glare;
+    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@glance:server' service.restart glance-registry;
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 2, delay: 5}
   skip_fail: false
 
 - description: Connect ceph to cinder and nova
   cmd: |
-    salt -C 'I@ceph:common and I@cinder:controller' state.sls ceph.common,ceph.setup.keyring,cinder;
-    salt -C 'I@ceph:common and I@nova:compute' state.sls ceph.common,ceph.setup.keyring;
-    salt -C 'I@ceph:common and I@nova:compute' saltutil.sync_grains;
-    salt -C 'I@ceph:common and I@nova:compute' state.sls nova;
+    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@cinder:controller' state.sls ceph.common,ceph.setup.keyring,cinder;
+    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@nova:compute' state.sls ceph.common,ceph.setup.keyring;
+    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@nova:compute' saltutil.sync_grains;
+    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@nova:compute' state.sls nova;
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 2, delay: 5}
   skip_fail: false
 
 - description: Restart cinder volume
   cmd: |
-    salt -C 'I@cinder:controller' service.restart cinder-volume;
+    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@cinder:controller' service.restart cinder-volume;
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 2, delay: 5}
   skip_fail: false
 
 - description: Restart nova-compute
   cmd: |
-    salt -C 'I@nova:compute' service.restart nova-compute;
+    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@nova:compute' service.restart nova-compute;
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 2, delay: 5}
   skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/openstack.yaml b/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/openstack.yaml
index 4f37ddd..f35c749 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/openstack.yaml
+++ b/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/openstack.yaml
@@ -164,7 +164,7 @@
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
-  skip_fail: true
+  skip_fail: false
 
 - description: Check IP on computes
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/underlay--user-data-cfg01.yaml b/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/underlay--user-data-cfg01.yaml
index 6448211..da7908d 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/underlay--user-data-cfg01.yaml
@@ -18,8 +18,6 @@
    expire: False
 
   bootcmd:
-   # Block access to SSH while node is preparing
-   - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
    # Enable root access
    - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
    - service sshd restart
@@ -44,35 +42,7 @@
    - swapon /swapfile
    - echo "/swapfile   none    swap    defaults    0   0" >> /etc/fstab
 
-   ############## TCP Cloud cfg01 node ##################
-   #- sleep 120
-   - echo "Preparing base OS"
-
    - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
-   - echo "nameserver 8.8.8.8" >> /etc/resolv.conf;
-   - which wget >/dev/null || (apt-get update; apt-get install -y wget);
-
-   # Configure Ubuntu mirrors
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
-
-   - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
-   - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
-   - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;
-   - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;
-
-   - apt-get clean
-   - apt-get update
-
-   # Install common packages
-   - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree
-
-   ########################################################
-   # Node is ready, allow SSH access
-   - echo "Allow SSH access ..."
-   - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
-   ########################################################
 
   write_files:
    - path: /etc/network/interfaces
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/underlay--user-data1604.yaml b/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/underlay--user-data1604.yaml
index c056295..3fbb777 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/underlay--user-data1604.yaml
+++ b/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/underlay--user-data1604.yaml
@@ -18,8 +18,6 @@
    expire: False
 
   bootcmd:
-   # Block access to SSH while node is preparing
-   - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
    # Enable root access
    - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
    - service sshd restart
@@ -44,34 +42,6 @@
    - swapon /swapfile
    - echo "/swapfile   none    swap    defaults   0   0" >> /etc/fstab
 
-
-   ############## TCP Cloud cfg01 node ##################
-   #- sleep 120
-   - echo "Preparing base OS"
-   - which wget >/dev/null || (apt-get update; apt-get install -y wget)
-
-   # Configure Ubuntu mirrors
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
-
-   - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
-   - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
-   - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;
-   - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;
-
-   - apt-get clean
-   - eatmydata apt-get update && apt-get -y upgrade
-
-   # Install common packages
-   - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc
-
-   ########################################################
-   # Node is ready, allow SSH access
-   - echo "Allow SSH access ..."
-   - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
-   ########################################################
-
   write_files:
    - path: /etc/network/interfaces
      content: |
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ovs/_context-cookiecutter-mcp-ocata-ovs.yaml b/tcp_tests/templates/virtual-mcp-ocata-ovs/_context-cookiecutter-mcp-ocata-ovs.yaml
new file mode 100644
index 0000000..d2aded3
--- /dev/null
+++ b/tcp_tests/templates/virtual-mcp-ocata-ovs/_context-cookiecutter-mcp-ocata-ovs.yaml
@@ -0,0 +1,154 @@
+default_context:
+  bmk_enabled: 'False'
+  ceph_enabled: 'False'
+  cicd_enabled: 'False'
+  cluster_domain: virtual-mcp-ocata-ovs.local
+  cluster_name: virtual-mcp-ocata-ovs
+  compute_bond_mode: active-backup
+  compute_primary_first_nic: eth1
+  compute_primary_second_nic: eth2
+  context_seed: U1hx5V31VJfFFBu8fCsk9ebDN2TwuBABTIcptYQ8tmFSlhSxHIkKnJnDsnckgKnH
+  control_network_netmask: 255.255.255.0
+  control_network_subnet: 172.16.10.0/24
+  control_vlan: '10'
+  cookiecutter_template_branch: master
+  cookiecutter_template_credentials: gerrit
+  cookiecutter_template_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.net:29418/mk/cookiecutter-templates.git
+  deploy_network_gateway: 192.168.10.1
+  deploy_network_netmask: 255.255.255.0
+  deploy_network_subnet: 192.168.10.0/24
+  deployment_type: physical
+  dns_server01: 8.8.8.8
+  dns_server02: 8.8.4.4
+  email_address: ddmitriev@mirantis.com
+  gateway_primary_first_nic: eth1
+  gateway_primary_second_nic: eth2
+  infra_bond_mode: active-backup
+  infra_deploy_nic: eth0
+  infra_kvm01_control_address: 172.16.10.101
+  infra_kvm01_deploy_address: 192.168.10.101
+  infra_kvm01_hostname: kvm01
+  infra_kvm02_control_address: 172.16.10.102
+  infra_kvm02_deploy_address: 192.168.10.102
+  infra_kvm02_hostname: kvm02
+  infra_kvm03_control_address: 172.16.10.103
+  infra_kvm03_deploy_address: 192.168.10.103
+  infra_kvm03_hostname: kvm03
+  infra_kvm_vip_address: 172.16.10.100
+  infra_primary_first_nic: eth1
+  infra_primary_second_nic: eth2
+  kubernetes_enabled: 'False'
+  local_repositories: 'False'
+  maas_deploy_address: 192.168.10.90
+  maas_hostname: cfg01
+  mcp_version: stable
+  offline_deployment: 'False'
+  opencontrail_enabled: 'False'
+  openstack_benchmark_node01_address: 172.16.10.95
+  openstack_benchmark_node01_hostname: bmk01
+  openstack_cluster_size: compact
+  openstack_compute_count: '100'
+  openstack_compute_rack01_hostname: cmp
+  openstack_compute_rack01_single_subnet: 172.16.10
+  openstack_compute_rack01_tenant_subnet: 10.1.0
+  openstack_control_address: 172.16.10.100
+  openstack_control_hostname: ctl
+  openstack_control_node01_address: 172.16.10.101
+  openstack_control_node01_hostname: ctl01
+  openstack_control_node02_address: 172.16.10.102
+  openstack_control_node02_hostname: ctl02
+  openstack_control_node03_address: 172.16.10.103
+  openstack_control_node03_hostname: ctl03
+  openstack_database_address: 172.16.10.100
+  openstack_database_hostname: dbs
+  openstack_database_node01_address: 172.16.10.101
+  openstack_database_node01_hostname: dbs01
+  openstack_database_node02_address: 172.16.10.102
+  openstack_database_node02_hostname: dbs02
+  openstack_database_node03_address: 172.16.10.103
+  openstack_database_node03_hostname: dbs03
+  openstack_enabled: 'True'
+  openstack_gateway_node01_address: 172.16.10.110
+  openstack_gateway_node01_hostname: gtw01
+  openstack_gateway_node01_tenant_address: 10.1.0.6
+  openstack_gateway_node02_address: 172.16.10.111
+  openstack_gateway_node02_hostname: gtw02
+  openstack_gateway_node02_tenant_address: 10.1.0.7
+  openstack_gateway_node03_address: 172.16.10.112
+  openstack_gateway_node03_hostname: gtw03
+  openstack_gateway_node03_tenant_address: 10.1.0.8
+  openstack_message_queue_address: 172.16.10.100
+  openstack_message_queue_hostname: msg
+  openstack_message_queue_node01_address: 172.16.10.101
+  openstack_message_queue_node01_hostname: msg01
+  openstack_message_queue_node02_address: 172.16.10.102
+  openstack_message_queue_node02_hostname: msg02
+  openstack_message_queue_node03_address: 172.16.10.103
+  openstack_message_queue_node03_hostname: msg03
+  openstack_network_engine: ovs
+  openstack_neutron_qos: 'False'
+  openstack_neutron_vlan_aware_vms: 'False'
+  openstack_nfv_dpdk_enabled: 'False'
+  openstack_nfv_sriov_enabled: 'False'
+  openstack_nova_compute_nfv_req_enabled: 'False'
+  openstack_ovs_dvr_enabled: 'False'
+  openstack_ovs_encapsulation_type: vxlan
+  openstack_proxy_address: 172.16.10.80
+  openstack_proxy_hostname: prx
+  openstack_proxy_node01_address: 172.16.10.121
+  openstack_proxy_node01_hostname: prx01
+  openstack_proxy_node02_address: 172.16.10.122
+  openstack_proxy_node02_hostname: prx02
+  openstack_upgrade_node01_address: 172.16.10.19
+  openstack_version: ocata
+  oss_enabled: 'False'
+  oss_node03_address: ${_param:stacklight_monitor_node03_address}
+  oss_notification_app_id: '24'
+  oss_notification_sender_password: password
+  oss_notification_smtp_port: '587'
+  oss_notification_webhook_login_id: '13'
+  platform: openstack_enabled
+  public_host: ${_param:openstack_proxy_address}
+  publication_method: email
+  reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
+  salt_api_password: PGah7Ph3IdWuMdAX3ZBLSf5BtlBG1Qhl
+  salt_api_password_hash: $6$kgvztcjH$9B2950AyxRjE2Tj5QNVCnvdrgaFo/u6c59pMoQPqfxs2MTLLU7ywxPTQnDH3cNV.BBEK6FilF9SulWfIfENou0
+  salt_master_address: 172.16.10.90
+  salt_master_hostname: cfg01
+  salt_master_management_address: 192.168.10.90
+  shared_reclass_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.net:29418/salt-models/reclass-system.git
+  stacklight_enabled: 'True'
+  stacklight_log_address: 172.16.10.70
+  stacklight_log_hostname: log
+  stacklight_log_node01_address: 172.16.10.107
+  stacklight_log_node01_hostname: log01
+  stacklight_log_node02_address: 172.16.10.108
+  stacklight_log_node02_hostname: log02
+  stacklight_log_node03_address: 172.16.10.109
+  stacklight_log_node03_hostname: log03
+  stacklight_monitor_address: 172.16.10.70
+  stacklight_monitor_hostname: mon
+  stacklight_monitor_node01_address: 172.16.10.107
+  stacklight_monitor_node01_hostname: mon01
+  stacklight_monitor_node02_address: 172.16.10.108
+  stacklight_monitor_node02_hostname: mon02
+  stacklight_monitor_node03_address: 172.16.10.109
+  stacklight_monitor_node03_hostname: mon03
+  stacklight_notification_address: alerts@localhost
+  stacklight_notification_smtp_host: 127.0.0.1
+  stacklight_telemetry_address: 172.16.10.70
+  stacklight_telemetry_hostname: mtr
+  stacklight_telemetry_node01_address: 172.16.10.107
+  stacklight_telemetry_node01_hostname: mtr01
+  stacklight_telemetry_node02_address: 172.16.10.108
+  stacklight_telemetry_node02_hostname: mtr02
+  stacklight_telemetry_node03_address: 172.16.10.109
+  stacklight_telemetry_node03_hostname: mtr03
+  stacklight_version: '2'
+  static_ips_on_deploy_network_enabled: 'False'
+  tenant_network_gateway: 10.1.0.1
+  tenant_network_netmask: 255.255.255.0
+  tenant_network_subnet: 10.1.0.0/24
+  tenant_vlan: '20'
+  upstream_proxy_enabled: 'False'
+  use_default_network_scheme: 'False'
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ovs/_context-environment.yaml b/tcp_tests/templates/virtual-mcp-ocata-ovs/_context-environment.yaml
new file mode 100644
index 0000000..18a87be
--- /dev/null
+++ b/tcp_tests/templates/virtual-mcp-ocata-ovs/_context-environment.yaml
@@ -0,0 +1,141 @@
+nodes:
+    cfg01.mcp11-ovs-dpdk.local:
+      reclass_storage_name: infra_config_node01
+      roles:
+      - infra_config
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_vlan_ctl
+
+    ctl01.mcp11-ovs-dpdk.local:
+      reclass_storage_name: openstack_control_node01
+      roles:
+      - infra_kvm
+      - openstack_control_leader
+      - openstack_database_leader
+      - openstack_message_queue
+      - features_designate_database
+      - features_designate
+      - features_designate_keystone
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_vlan_ctl
+
+    ctl02.mcp11-ovs-dpdk.local:
+      reclass_storage_name: openstack_control_node02
+      roles:
+      - infra_kvm
+      - openstack_control
+      - openstack_database
+      - openstack_message_queue
+      - features_designate_database
+      - features_designate
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_vlan_ctl
+
+    ctl03.mcp11-ovs-dpdk.local:
+      reclass_storage_name: openstack_control_node03
+      roles:
+      - infra_kvm
+      - openstack_control
+      - openstack_database
+      - openstack_message_queue
+      - features_designate_database
+      - features_designate
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_vlan_ctl
+
+    prx01.mcp11-ovs-dpdk.local:
+      reclass_storage_name: openstack_proxy_node01
+      roles:
+      - openstack_proxy
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_vlan_ctl
+
+    mon01.mcp11-ovs-dpdk.local:
+      reclass_storage_name: stacklight_server_node01
+      roles:
+      - stacklightv2_server_leader
+      - stacklight_telemetry_leader
+      - stacklight_log_leader_v2
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_vlan_ctl
+
+    mon02.mcp11-ovs-dpdk.local:
+      reclass_storage_name: stacklight_server_node02
+      roles:
+      - stacklightv2_server
+      - stacklight_telemetry
+      - stacklight_log
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_vlan_ctl
+
+    mon03.mcp11-ovs-dpdk.local:
+      reclass_storage_name: stacklight_server_node03
+      roles:
+      - stacklightv2_server
+      - stacklight_telemetry
+      - stacklight_log
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_vlan_ctl
+
+    # Generator-based computes. For compatibility only
+    cmp<<count>>.mcp11-ovs-dpdk.local:
+      reclass_storage_name: openstack_compute_rack01
+      roles:
+      - openstack_compute
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: bond0_ab_ovs_vxlan_ctl_mesh
+        ens5:
+          role: bond0_ab_ovs_vxlan_ctl_mesh
+        ens6:
+          role: bond1_ab_ovs_floating
+
+    gtw01.mcp11-ovs-dpdk.local:
+      reclass_storage_name: openstack_gateway_node01
+      roles:
+      - openstack_gateway
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: bond0_ab_ovs_vxlan_ctl_mesh
+        ens5:
+          role: bond0_ab_ovs_vxlan_ctl_mesh
+        ens6:
+          role: bond1_ab_ovs_floating
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ovs/openstack.yaml b/tcp_tests/templates/virtual-mcp-ocata-ovs/openstack.yaml
index 5d6f219..a82a5f6 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-ovs/openstack.yaml
+++ b/tcp_tests/templates/virtual-mcp-ocata-ovs/openstack.yaml
@@ -179,7 +179,7 @@
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
-  skip_fail: true
+  skip_fail: false
 
 - description: Check IP on computes
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ovs/salt.yaml b/tcp_tests/templates/virtual-mcp-ocata-ovs/salt.yaml
index 5f5064f..11fb74b 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-ovs/salt.yaml
+++ b/tcp_tests/templates/virtual-mcp-ocata-ovs/salt.yaml
@@ -17,6 +17,8 @@
 
 {{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
 
+{{ SHARED.ADJUST_SL_OPTS(OVERRIDES_FILENAME='/srv/salt/reclass/classes/cluster/' + SHARED.CLUSTER_NAME + '/stacklight/server.yml') }}
+
 {{ SHARED.MACRO_GENERATE_INVENTORY() }}
 
 {{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ovs/sl.yaml b/tcp_tests/templates/virtual-mcp-ocata-ovs/sl.yaml
index 688f70d..c11350b 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-ovs/sl.yaml
+++ b/tcp_tests/templates/virtual-mcp-ocata-ovs/sl.yaml
@@ -20,102 +20,102 @@
   skip_fail: false
 
 - description: Configure docker service
-  cmd: salt -C 'I@docker:swarm' state.sls docker.host
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls docker.host
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: Install docker swarm on master node
-  cmd: salt -C 'I@docker:swarm:role:master' state.sls docker.swarm
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: Send grains to the swarm slave nodes
-  cmd: salt -C 'I@docker:swarm' state.sls salt.minion.grains
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls salt.minion.grains
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description:  Update mine
-  cmd: salt -C 'I@docker:swarm' mine.update
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' mine.update
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description:  Refresh modules
-  cmd: salt -C 'I@docker:swarm' saltutil.refresh_modules; sleep 5;
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' saltutil.refresh_modules; sleep 5;
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description:  Rerun swarm on slaves to proper token population
-  cmd: salt -C 'I@docker:swarm:role:master' state.sls docker.swarm
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description:  Configure slave nodes
-  cmd: salt -C 'I@docker:swarm:role:manager' state.sls docker.swarm -b 1
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:manager' state.sls docker.swarm -b 1
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description:  List registered Docker swarm nodes
-  cmd: salt -C 'I@docker:swarm:role:master' cmd.run 'docker node ls'
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' cmd.run 'docker node ls'
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 # Install slv2 infra
 - description: Install telegraf
-  cmd: salt -C 'I@telegraf:agent or I@telegraf:remote_agent' state.sls telegraf
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@telegraf:agent or I@telegraf:remote_agent' state.sls telegraf
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 2, delay: 10}
   skip_fail: false
 
 - description: Configure Prometheus exporters
-  cmd: salt -C 'I@prometheus:exporters' state.sls prometheus
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' state.sls prometheus
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: Configure collector
-  cmd: salt -C 'I@heka:log_collector' state.sls heka.log_collector
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:log_collector' state.sls heka.log_collector
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: Install elasticsearch server
-  cmd: salt -C 'I@elasticsearch:server' state.sls elasticsearch.server -b 1
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:server' state.sls elasticsearch.server -b 1
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: Install kibana server
-  cmd: salt -C 'I@kibana:server' state.sls kibana.server -b 1
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:server' state.sls kibana.server -b 1
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: Install elasticsearch client
-  cmd: salt -C 'I@elasticsearch:client' state.sls elasticsearch.client
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:client' state.sls elasticsearch.client
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: Install kibana client
-  cmd: salt -C 'I@kibana:client' state.sls kibana.client
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:client' state.sls kibana.client
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: Check influix db
   cmd: |
-    INFLUXDB_SERVICE=`salt -C 'I@influxdb:server' test.ping 1>/dev/null 2>&1 && echo true`;
+    INFLUXDB_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' test.ping 1>/dev/null 2>&1 && echo true`;
     echo "Influxdb service presence: ${INFLUXDB_SERVICE}";
     if [[ "$INFLUXDB_SERVICE" == "true" ]]; then
-        salt -C 'I@influxdb:server' state.sls influxdb
+        salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' state.sls influxdb
     fi
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
@@ -124,44 +124,44 @@
 # Collect grains needed to configure the services
 
 - description: Get grains
-  cmd: salt -C 'I@salt:minion' state.sls salt.minion.grains
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' state.sls salt.minion.grains
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: Sync modules
-  cmd: salt -C 'I@salt:minion' saltutil.refresh_modules
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' saltutil.refresh_modules
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: Update mine
-  cmd: salt -C 'I@salt:minion' mine.update; sleep 5;
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' mine.update; sleep 5;
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 # Configure the services running in Docker Swarm
 - description: Install prometheus alertmanager
-  cmd: salt -C 'I@docker:swarm' state.sls prometheus,heka.remote_collector -b 1
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls prometheus,heka.remote_collector -b 1
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: run docker state
-  cmd: salt -C 'I@docker:swarm:role:master' state.sls docker
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: docker ps
-  cmd: salt -C 'I@docker:swarm' dockerng.ps
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' dockerng.ps
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: Configure Grafana dashboards and datasources
-  cmd: sleep 30;  salt -C 'I@grafana:client' state.sls grafana.client
+  cmd: sleep 30;  salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@grafana:client' state.sls grafana.client
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 2, delay: 10}
   skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ovs/underlay--user-data-cfg01.yaml b/tcp_tests/templates/virtual-mcp-ocata-ovs/underlay--user-data-cfg01.yaml
index 6448211..da7908d 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-ovs/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/virtual-mcp-ocata-ovs/underlay--user-data-cfg01.yaml
@@ -18,8 +18,6 @@
    expire: False
 
   bootcmd:
-   # Block access to SSH while node is preparing
-   - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
    # Enable root access
    - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
    - service sshd restart
@@ -44,35 +42,7 @@
    - swapon /swapfile
    - echo "/swapfile   none    swap    defaults    0   0" >> /etc/fstab
 
-   ############## TCP Cloud cfg01 node ##################
-   #- sleep 120
-   - echo "Preparing base OS"
-
    - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
-   - echo "nameserver 8.8.8.8" >> /etc/resolv.conf;
-   - which wget >/dev/null || (apt-get update; apt-get install -y wget);
-
-   # Configure Ubuntu mirrors
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
-
-   - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
-   - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
-   - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;
-   - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;
-
-   - apt-get clean
-   - apt-get update
-
-   # Install common packages
-   - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree
-
-   ########################################################
-   # Node is ready, allow SSH access
-   - echo "Allow SSH access ..."
-   - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
-   ########################################################
 
   write_files:
    - path: /etc/network/interfaces
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ovs/underlay--user-data1604.yaml b/tcp_tests/templates/virtual-mcp-ocata-ovs/underlay--user-data1604.yaml
index c056295..3fbb777 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-ovs/underlay--user-data1604.yaml
+++ b/tcp_tests/templates/virtual-mcp-ocata-ovs/underlay--user-data1604.yaml
@@ -18,8 +18,6 @@
    expire: False
 
   bootcmd:
-   # Block access to SSH while node is preparing
-   - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
    # Enable root access
    - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
    - service sshd restart
@@ -44,34 +42,6 @@
    - swapon /swapfile
    - echo "/swapfile   none    swap    defaults   0   0" >> /etc/fstab
 
-
-   ############## TCP Cloud cfg01 node ##################
-   #- sleep 120
-   - echo "Preparing base OS"
-   - which wget >/dev/null || (apt-get update; apt-get install -y wget)
-
-   # Configure Ubuntu mirrors
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
-
-   - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
-   - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
-   - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;
-   - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;
-
-   - apt-get clean
-   - eatmydata apt-get update && apt-get -y upgrade
-
-   # Install common packages
-   - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc
-
-   ########################################################
-   # Node is ready, allow SSH access
-   - echo "Allow SSH access ..."
-   - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
-   ########################################################
-
   write_files:
    - path: /etc/network/interfaces
      content: |
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/ceph.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/ceph.yaml
index 3c772ec..8b05b63 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/ceph.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/ceph.yaml
@@ -38,8 +38,8 @@
 
 - description: Install ceph mgr if defined(needed only for Luminious)
   cmd: |
-    if salt -C 'I@ceph:mgr' match.pillar 'ceph:mgr' ; then
-      salt -C 'I@ceph:mgr' state.sls ceph.mgr
+    if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:mgr' match.pillar 'ceph:mgr' ; then
+      salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:mgr' state.sls ceph.mgr
     fi
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 2, delay: 5}
@@ -96,9 +96,9 @@
 
 - description: Install radosgw if exists
   cmd: |
-    salt -C 'I@ceph:radosgw' saltutil.sync_grains;
-    salt -C 'I@ceph:radosgw' state.sls ceph.radosgw;
-    salt -C 'I@keystone:client' state.sls keystone.client;
+    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:radosgw' saltutil.sync_grains;
+    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:radosgw' state.sls ceph.radosgw;
+    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keystone:client' state.sls keystone.client;
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 2, delay: 5}
   skip_fail: false
@@ -121,34 +121,34 @@
 
 - description: Connect ceph to glance
   cmd: |
-    salt -C 'I@ceph:common and I@glance:server' state.sls ceph.common,ceph.setup.keyring,glance;
-    salt -C 'I@ceph:common and I@glance:server' service.restart glance-api;
-    salt -C 'I@ceph:common and I@glance:server' service.restart glance-glare;
-    salt -C 'I@ceph:common and I@glance:server' service.restart glance-registry;
+    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@glance:server' state.sls ceph.common,ceph.setup.keyring,glance;
+    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@glance:server' service.restart glance-api;
+    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@glance:server' service.restart glance-glare;
+    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@glance:server' service.restart glance-registry;
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 2, delay: 5}
   skip_fail: false
 
 - description: Connect ceph to cinder and nova
   cmd: |
-    salt -C 'I@ceph:common and I@cinder:controller' state.sls ceph.common,ceph.setup.keyring,cinder;
-    salt -C 'I@ceph:common and I@nova:compute' state.sls ceph.common,ceph.setup.keyring;
-    salt -C 'I@ceph:common and I@nova:compute' saltutil.sync_grains;
-    salt -C 'I@ceph:common and I@nova:compute' state.sls nova;
+    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@cinder:controller' state.sls ceph.common,ceph.setup.keyring,cinder;
+    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@nova:compute' state.sls ceph.common,ceph.setup.keyring;
+    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@nova:compute' saltutil.sync_grains;
+    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@nova:compute' state.sls nova;
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 2, delay: 5}
   skip_fail: false
 
 - description: Restart cinder volume
   cmd: |
-    salt -C 'I@cinder:controller' service.restart cinder-volume;
+    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@cinder:controller' service.restart cinder-volume;
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 2, delay: 5}
   skip_fail: false
 
 - description: Restart nova-compute
   cmd: |
-    salt -C 'I@nova:compute' service.restart nova-compute;
+    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@nova:compute' service.restart nova-compute;
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 2, delay: 5}
   skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/openstack.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/openstack.yaml
index 625bedd..f4903a9 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/openstack.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/openstack.yaml
@@ -164,7 +164,7 @@
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
-  skip_fail: true
+  skip_fail: false
 
 - description: Check IP on computes
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/underlay--user-data-cfg01.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/underlay--user-data-cfg01.yaml
index 6448211..da7908d 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/underlay--user-data-cfg01.yaml
@@ -18,8 +18,6 @@
    expire: False
 
   bootcmd:
-   # Block access to SSH while node is preparing
-   - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
    # Enable root access
    - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
    - service sshd restart
@@ -44,35 +42,7 @@
    - swapon /swapfile
    - echo "/swapfile   none    swap    defaults    0   0" >> /etc/fstab
 
-   ############## TCP Cloud cfg01 node ##################
-   #- sleep 120
-   - echo "Preparing base OS"
-
    - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
-   - echo "nameserver 8.8.8.8" >> /etc/resolv.conf;
-   - which wget >/dev/null || (apt-get update; apt-get install -y wget);
-
-   # Configure Ubuntu mirrors
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
-
-   - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
-   - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
-   - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;
-   - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;
-
-   - apt-get clean
-   - apt-get update
-
-   # Install common packages
-   - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree
-
-   ########################################################
-   # Node is ready, allow SSH access
-   - echo "Allow SSH access ..."
-   - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
-   ########################################################
 
   write_files:
    - path: /etc/network/interfaces
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/underlay--user-data1604.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/underlay--user-data1604.yaml
index c056295..3fbb777 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/underlay--user-data1604.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/underlay--user-data1604.yaml
@@ -18,8 +18,6 @@
    expire: False
 
   bootcmd:
-   # Block access to SSH while node is preparing
-   - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
    # Enable root access
    - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
    - service sshd restart
@@ -44,34 +42,6 @@
    - swapon /swapfile
    - echo "/swapfile   none    swap    defaults   0   0" >> /etc/fstab
 
-
-   ############## TCP Cloud cfg01 node ##################
-   #- sleep 120
-   - echo "Preparing base OS"
-   - which wget >/dev/null || (apt-get update; apt-get install -y wget)
-
-   # Configure Ubuntu mirrors
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
-
-   - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
-   - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
-   - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;
-   - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;
-
-   - apt-get clean
-   - eatmydata apt-get update && apt-get -y upgrade
-
-   # Install common packages
-   - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc
-
-   ########################################################
-   # Node is ready, allow SSH access
-   - echo "Allow SSH access ..."
-   - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
-   ########################################################
-
   write_files:
    - path: /etc/network/interfaces
      content: |
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr/_context-cookiecutter-mcp-pike-dvr.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr/_context-cookiecutter-mcp-pike-dvr.yaml
new file mode 100644
index 0000000..3802235
--- /dev/null
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr/_context-cookiecutter-mcp-pike-dvr.yaml
@@ -0,0 +1,154 @@
+default_context:
+  bmk_enabled: 'False'
+  ceph_enabled: 'False'
+  cicd_enabled: 'False'
+  cluster_domain: virtual-mcp-pike-dvr.local
+  cluster_name: virtual-mcp-pike-dvr
+  compute_bond_mode: active-backup
+  compute_primary_first_nic: eth1
+  compute_primary_second_nic: eth2
+  context_seed: wUqrwKeBTCpRpVrhK1KwZQv4cjM9VhG7L2vQ0iQsTuMrXASklEBDmJEf6bnPEqcK
+  control_network_netmask: 255.255.255.0
+  control_network_subnet: 172.16.10.0/24
+  control_vlan: '10'
+  cookiecutter_template_branch: master
+  cookiecutter_template_credentials: gerrit
+  cookiecutter_template_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.net:29418/mk/cookiecutter-templates.git
+  deploy_network_gateway: 192.168.10.1
+  deploy_network_netmask: 255.255.255.0
+  deploy_network_subnet: 192.168.10.0/24
+  deployment_type: physical
+  dns_server01: 8.8.8.8
+  dns_server02: 8.8.4.4
+  email_address: ddmitriev@mirantis.com
+  gateway_primary_first_nic: eth1
+  gateway_primary_second_nic: eth2
+  infra_bond_mode: active-backup
+  infra_deploy_nic: eth0
+  infra_kvm01_control_address: 172.16.10.101
+  infra_kvm01_deploy_address: 192.168.10.101
+  infra_kvm01_hostname: kvm01
+  infra_kvm02_control_address: 172.16.10.102
+  infra_kvm02_deploy_address: 192.168.10.102
+  infra_kvm02_hostname: kvm02
+  infra_kvm03_control_address: 172.16.10.103
+  infra_kvm03_deploy_address: 192.168.10.103
+  infra_kvm03_hostname: kvm03
+  infra_kvm_vip_address: 172.16.10.100
+  infra_primary_first_nic: eth1
+  infra_primary_second_nic: eth2
+  kubernetes_enabled: 'False'
+  local_repositories: 'False'
+  maas_deploy_address: 192.168.10.90
+  maas_hostname: cfg01
+  mcp_version: stable
+  offline_deployment: 'False'
+  opencontrail_enabled: 'False'
+  openstack_benchmark_node01_address: 172.16.10.95
+  openstack_benchmark_node01_hostname: bmk01
+  openstack_cluster_size: compact
+  openstack_compute_count: '100'
+  openstack_compute_rack01_hostname: cmp
+  openstack_compute_rack01_single_subnet: 172.16.10
+  openstack_compute_rack01_tenant_subnet: 10.1.0
+  openstack_control_address: 172.16.10.100
+  openstack_control_hostname: ctl
+  openstack_control_node01_address: 172.16.10.101
+  openstack_control_node01_hostname: ctl01
+  openstack_control_node02_address: 172.16.10.102
+  openstack_control_node02_hostname: ctl02
+  openstack_control_node03_address: 172.16.10.103
+  openstack_control_node03_hostname: ctl03
+  openstack_database_address: 172.16.10.100
+  openstack_database_hostname: dbs
+  openstack_database_node01_address: 172.16.10.101
+  openstack_database_node01_hostname: dbs01
+  openstack_database_node02_address: 172.16.10.102
+  openstack_database_node02_hostname: dbs02
+  openstack_database_node03_address: 172.16.10.103
+  openstack_database_node03_hostname: dbs03
+  openstack_enabled: 'True'
+  openstack_gateway_node01_address: 172.16.10.110
+  openstack_gateway_node01_hostname: gtw01
+  openstack_gateway_node01_tenant_address: 10.1.0.6
+  openstack_gateway_node02_address: 172.16.10.111
+  openstack_gateway_node02_hostname: gtw02
+  openstack_gateway_node02_tenant_address: 10.1.0.7
+  openstack_gateway_node03_address: 172.16.10.112
+  openstack_gateway_node03_hostname: gtw03
+  openstack_gateway_node03_tenant_address: 10.1.0.8
+  openstack_message_queue_address: 172.16.10.100
+  openstack_message_queue_hostname: msg
+  openstack_message_queue_node01_address: 172.16.10.101
+  openstack_message_queue_node01_hostname: msg01
+  openstack_message_queue_node02_address: 172.16.10.102
+  openstack_message_queue_node02_hostname: msg02
+  openstack_message_queue_node03_address: 172.16.10.103
+  openstack_message_queue_node03_hostname: msg03
+  openstack_network_engine: ovs
+  openstack_neutron_qos: 'False'
+  openstack_neutron_vlan_aware_vms: 'False'
+  openstack_nfv_dpdk_enabled: 'False'
+  openstack_nfv_sriov_enabled: 'False'
+  openstack_nova_compute_nfv_req_enabled: 'False'
+  openstack_ovs_dvr_enabled: 'True'
+  openstack_ovs_encapsulation_type: vxlan
+  openstack_proxy_address: 172.16.10.80
+  openstack_proxy_hostname: prx
+  openstack_proxy_node01_address: 172.16.10.121
+  openstack_proxy_node01_hostname: prx01
+  openstack_proxy_node02_address: 172.16.10.122
+  openstack_proxy_node02_hostname: prx02
+  openstack_upgrade_node01_address: 172.16.10.19
+  openstack_version: pike
+  oss_enabled: 'False'
+  oss_node03_address: ${_param:stacklight_monitor_node03_address}
+  oss_notification_app_id: '24'
+  oss_notification_sender_password: password
+  oss_notification_smtp_port: '587'
+  oss_notification_webhook_login_id: '13'
+  platform: openstack_enabled
+  public_host: ${_param:openstack_proxy_address}
+  publication_method: email
+  reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
+  salt_api_password: H0rTPdmktZ8RI7T7y6fjqY0uEbbs7Kwi
+  salt_api_password_hash: $6$lfbIFtMZ$.nTbTDMzs1iYv0WqkZHia8H8Fma963Nv3qyyz1x68jQh0YXK9i907B/hvoG4QHMvfolE7V7vQnFClJ1mVA3Yb.
+  salt_master_address: 172.16.10.90
+  salt_master_hostname: cfg01
+  salt_master_management_address: 192.168.10.90
+  shared_reclass_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.net:29418/salt-models/reclass-system.git
+  stacklight_enabled: 'True'
+  stacklight_log_address: 172.16.10.70
+  stacklight_log_hostname: log
+  stacklight_log_node01_address: 172.16.10.107
+  stacklight_log_node01_hostname: log01
+  stacklight_log_node02_address: 172.16.10.108
+  stacklight_log_node02_hostname: log02
+  stacklight_log_node03_address: 172.16.10.109
+  stacklight_log_node03_hostname: log03
+  stacklight_monitor_address: 172.16.10.70
+  stacklight_monitor_hostname: mon
+  stacklight_monitor_node01_address: 172.16.10.107
+  stacklight_monitor_node01_hostname: mon01
+  stacklight_monitor_node02_address: 172.16.10.108
+  stacklight_monitor_node02_hostname: mon02
+  stacklight_monitor_node03_address: 172.16.10.109
+  stacklight_monitor_node03_hostname: mon03
+  stacklight_notification_address: alerts@localhost
+  stacklight_notification_smtp_host: 127.0.0.1
+  stacklight_telemetry_address: 172.16.10.70
+  stacklight_telemetry_hostname: mtr
+  stacklight_telemetry_node01_address: 172.16.10.107
+  stacklight_telemetry_node01_hostname: mtr01
+  stacklight_telemetry_node02_address: 172.16.10.108
+  stacklight_telemetry_node02_hostname: mtr02
+  stacklight_telemetry_node03_address: 172.16.10.109
+  stacklight_telemetry_node03_hostname: mtr03
+  stacklight_version: '2'
+  static_ips_on_deploy_network_enabled: 'False'
+  tenant_network_gateway: 10.1.0.1
+  tenant_network_netmask: 255.255.255.0
+  tenant_network_subnet: 10.1.0.0/24
+  tenant_vlan: '20'
+  upstream_proxy_enabled: 'False'
+  use_default_network_scheme: 'False'
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr/_context-environment.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr/_context-environment.yaml
new file mode 100644
index 0000000..9a04b68
--- /dev/null
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr/_context-environment.yaml
@@ -0,0 +1,165 @@
+nodes:
+    cfg01.mcp11-ovs-dpdk.local:
+      reclass_storage_name: infra_config_node01
+      roles:
+      - infra_config
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_vlan_ctl
+
+    ctl01.mcp11-ovs-dpdk.local:
+      reclass_storage_name: openstack_control_node01
+      roles:
+      - infra_kvm
+      - openstack_control_leader
+      - openstack_database_leader
+      - openstack_message_queue
+      - features_designate_pool_manager_database
+      - features_designate_pool_manager
+      - features_designate_pool_manager_keystone
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_vlan_ctl
+
+    ctl02.mcp11-ovs-dpdk.local:
+      reclass_storage_name: openstack_control_node02
+      roles:
+      - infra_kvm
+      - openstack_control
+      - openstack_database
+      - openstack_message_queue
+      - features_designate_pool_manager_database
+      - features_designate_pool_manager
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_vlan_ctl
+
+    ctl03.mcp11-ovs-dpdk.local:
+      reclass_storage_name: openstack_control_node03
+      roles:
+      - infra_kvm
+      - openstack_control
+      - openstack_database
+      - openstack_message_queue
+      - features_designate_pool_manager_database
+      - features_designate_pool_manager
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_vlan_ctl
+
+    prx01.mcp11-ovs-dpdk.local:
+      reclass_storage_name: openstack_proxy_node01
+      roles:
+      - openstack_proxy
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_vlan_ctl
+
+    mon01.mcp11-ovs-dpdk.local:
+      reclass_storage_name: stacklight_server_node01
+      roles:
+      - stacklightv2_server_leader
+      - stacklight_telemetry_leader
+      - stacklight_log_leader_v2
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_vlan_ctl
+
+    mon02.mcp11-ovs-dpdk.local:
+      reclass_storage_name: stacklight_server_node02
+      roles:
+      - stacklightv2_server
+      - stacklight_telemetry
+      - stacklight_log
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_vlan_ctl
+
+    mon03.mcp11-ovs-dpdk.local:
+      reclass_storage_name: stacklight_server_node03
+      roles:
+      - stacklightv2_server
+      - stacklight_telemetry
+      - stacklight_log
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_vlan_ctl
+
+    # Generator-based computes. For compatibility only
+    cmp<<count>>.mcp11-ovs-dpdk.local:
+      reclass_storage_name: openstack_compute_rack01
+      roles:
+      - openstack_compute
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: bond0_ab_ovs_vxlan_ctl_mesh
+        ens5:
+          role: bond0_ab_ovs_vxlan_ctl_mesh
+        ens6:
+          role: bond1_ab_ovs_floating
+
+    gtw01.mcp11-ovs-dpdk.local:
+      reclass_storage_name: openstack_gateway_node01
+      roles:
+      - openstack_gateway
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: bond0_ab_ovs_vxlan_ctl_mesh
+        ens5:
+          role: bond0_ab_ovs_vxlan_ctl_mesh
+        ens6:
+          role: bond1_ab_ovs_floating
+
+    dns01.mcp11-ovs-dpdk.local:
+      reclass_storage_name: openstack_dns_node01
+      roles:
+      - features_designate_pool_manager_dns
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_vlan_ctl
+          single_address: ${_param:openstack_dns_node01_address}
+
+    dns02.mcp11-ovs-dpdk.local:
+      reclass_storage_name: openstack_dns_node02
+      roles:
+      - features_designate_pool_manager_dns
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_vlan_ctl
+          single_address: ${_param:openstack_dns_node02_address}
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr/openstack.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr/openstack.yaml
index 28ff9ac..45ededb 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr/openstack.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr/openstack.yaml
@@ -200,7 +200,7 @@
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
-  skip_fail: true
+  skip_fail: false
 
 - description: Check IP on computes
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr/salt.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr/salt.yaml
index a7c06dd..51a2203 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr/salt.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr/salt.yaml
@@ -17,6 +17,8 @@
 
 {{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
 
+{{ SHARED.ADJUST_SL_OPTS(OVERRIDES_FILENAME='/srv/salt/reclass/classes/cluster/' + SHARED.CLUSTER_NAME + '/stacklight/server.yml') }}
+
 {{ SHARED.MACRO_GENERATE_INVENTORY() }}
 
 {{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr/sl.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr/sl.yaml
index 04b5ca7..988fe96 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr/sl.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr/sl.yaml
@@ -18,105 +18,105 @@
   skip_fail: false
 
 - description: Configure docker service
-  cmd: salt -C 'I@docker:swarm' state.sls docker.host
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls docker.host
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: Install docker swarm on master node
-  cmd: salt -C 'I@docker:swarm:role:master' state.sls docker.swarm
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: Send grains to the swarm slave nodes
-  cmd: salt -C 'I@docker:swarm' state.sls salt.minion.grains
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls salt.minion.grains
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description:  Update mine
-  cmd: salt -C 'I@docker:swarm' mine.update
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' mine.update
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description:  Refresh modules
-  cmd: salt -C 'I@docker:swarm' saltutil.refresh_modules; sleep 5;
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' saltutil.refresh_modules; sleep 5;
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description:  Rerun swarm on slaves to proper token population
-  cmd: salt -C 'I@docker:swarm:role:master' state.sls docker.swarm
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description:  Configure slave nodes
-  cmd: salt -C 'I@docker:swarm:role:manager' state.sls docker.swarm -b 1
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:manager' state.sls docker.swarm -b 1
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description:  List registered Docker swarm nodes
-  cmd: salt -C 'I@docker:swarm:role:master' cmd.run 'docker node ls'
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' cmd.run 'docker node ls'
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 # Install slv2 infra
 - description: Install telegraf
-  cmd: salt -C 'I@telegraf:agent or I@telegraf:remote_agent' state.sls telegraf
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@telegraf:agent or I@telegraf:remote_agent' state.sls telegraf
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 2, delay: 10}
   skip_fail: false
 
 - description: Configure Prometheus exporters, if pillar 'prometheus:exporters' exists on any server
   cmd: |
-    if salt -C 'I@prometheus:exporters' match.pillar 'prometheus:exporters' ; then
-      salt -C 'I@prometheus:exporters' state.sls prometheus
+    if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' match.pillar 'prometheus:exporters' ; then
+      salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' state.sls prometheus
     fi
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: Configure collector
-  cmd: salt -C 'I@heka:log_collector' state.sls heka.log_collector
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:log_collector' state.sls heka.log_collector
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: Install elasticsearch server
-  cmd: salt -C 'I@elasticsearch:server' state.sls elasticsearch.server -b 1
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:server' state.sls elasticsearch.server -b 1
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: Install kibana server
-  cmd: salt -C 'I@kibana:server' state.sls kibana.server -b 1
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:server' state.sls kibana.server -b 1
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: Install elasticsearch client
-  cmd: salt -C 'I@elasticsearch:client' state.sls elasticsearch.client
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:client' state.sls elasticsearch.client
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: Install kibana client
-  cmd: salt -C 'I@kibana:client' state.sls kibana.client
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:client' state.sls kibana.client
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: Check influix db
   cmd: |
-    INFLUXDB_SERVICE=`salt -C 'I@influxdb:server' test.ping 1>/dev/null 2>&1 && echo true`;
+    INFLUXDB_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' test.ping 1>/dev/null 2>&1 && echo true`;
     echo "Influxdb service presence: ${INFLUXDB_SERVICE}";
     if [[ "$INFLUXDB_SERVICE" == "true" ]]; then
-        salt -C 'I@influxdb:server' state.sls influxdb
+        salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' state.sls influxdb
     fi
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
@@ -125,44 +125,44 @@
 # Collect grains needed to configure the services
 
 - description: Get grains
-  cmd: salt -C 'I@salt:minion' state.sls salt.minion.grains
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' state.sls salt.minion.grains
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: Sync modules
-  cmd: salt -C 'I@salt:minion' saltutil.refresh_modules
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' saltutil.refresh_modules
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: Update mine
-  cmd: salt -C 'I@salt:minion' mine.update; sleep 5;
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' mine.update; sleep 5;
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 5, delay: 15}
   skip_fail: false
 
 # Configure the services running in Docker Swarm
 - description: Install prometheus alertmanager
-  cmd: salt -C 'I@docker:swarm' state.sls prometheus,heka.remote_collector -b 1
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls prometheus,heka.remote_collector -b 1
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: run docker state
-  cmd: salt -C 'I@docker:swarm:role:master' state.sls docker
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: docker ps
-  cmd: salt -C 'I@docker:swarm' dockerng.ps
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' dockerng.ps
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: Configure Grafana dashboards and datasources
-  cmd: sleep 30;  salt -C 'I@grafana:client' state.sls grafana.client
+  cmd: sleep 30;  salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@grafana:client' state.sls grafana.client
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 2, delay: 10}
   skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr/underlay--user-data-cfg01.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr/underlay--user-data-cfg01.yaml
index 6448211..da7908d 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr/underlay--user-data-cfg01.yaml
@@ -18,8 +18,6 @@
    expire: False
 
   bootcmd:
-   # Block access to SSH while node is preparing
-   - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
    # Enable root access
    - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
    - service sshd restart
@@ -44,35 +42,7 @@
    - swapon /swapfile
    - echo "/swapfile   none    swap    defaults    0   0" >> /etc/fstab
 
-   ############## TCP Cloud cfg01 node ##################
-   #- sleep 120
-   - echo "Preparing base OS"
-
    - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
-   - echo "nameserver 8.8.8.8" >> /etc/resolv.conf;
-   - which wget >/dev/null || (apt-get update; apt-get install -y wget);
-
-   # Configure Ubuntu mirrors
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
-
-   - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
-   - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
-   - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;
-   - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;
-
-   - apt-get clean
-   - apt-get update
-
-   # Install common packages
-   - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree
-
-   ########################################################
-   # Node is ready, allow SSH access
-   - echo "Allow SSH access ..."
-   - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
-   ########################################################
 
   write_files:
    - path: /etc/network/interfaces
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr/underlay--user-data1604.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr/underlay--user-data1604.yaml
index c056295..3fbb777 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr/underlay--user-data1604.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr/underlay--user-data1604.yaml
@@ -18,8 +18,6 @@
    expire: False
 
   bootcmd:
-   # Block access to SSH while node is preparing
-   - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
    # Enable root access
    - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
    - service sshd restart
@@ -44,34 +42,6 @@
    - swapon /swapfile
    - echo "/swapfile   none    swap    defaults   0   0" >> /etc/fstab
 
-
-   ############## TCP Cloud cfg01 node ##################
-   #- sleep 120
-   - echo "Preparing base OS"
-   - which wget >/dev/null || (apt-get update; apt-get install -y wget)
-
-   # Configure Ubuntu mirrors
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
-
-   - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
-   - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
-   - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;
-   - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;
-
-   - apt-get clean
-   - eatmydata apt-get update && apt-get -y upgrade
-
-   # Install common packages
-   - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc
-
-   ########################################################
-   # Node is ready, allow SSH access
-   - echo "Allow SSH access ..."
-   - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
-   ########################################################
-
   write_files:
    - path: /etc/network/interfaces
      content: |
diff --git a/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/ceph.yaml b/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/ceph.yaml
index 94bce47..b602748 100644
--- a/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/ceph.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/ceph.yaml
@@ -39,8 +39,8 @@
 
 - description: Install ceph mgr if defined(needed only for Luminious)
   cmd: |
-    if salt -C 'I@ceph:mgr' match.pillar 'ceph:mgr' ; then
-      salt -C 'I@ceph:mgr' state.sls ceph.mgr
+    if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:mgr' match.pillar 'ceph:mgr' ; then
+      salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:mgr' state.sls ceph.mgr
     fi
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 2, delay: 5}
@@ -97,10 +97,10 @@
 
 - description: Install radosgw if exists
   cmd: |
-    if salt -C 'I@ceph:radosgw' match.pillar 'ceph:radosgw' ; then
-      salt -C 'I@ceph:radosgw' saltutil.sync_grains;
-      salt -C 'I@ceph:radosgw' state.sls ceph.radosgw;
-      salt -C 'I@keystone:client' state.sls keystone.client;
+    if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:radosgw' match.pillar 'ceph:radosgw' ; then
+      salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:radosgw' saltutil.sync_grains;
+      salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:radosgw' state.sls ceph.radosgw;
+      salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keystone:client' state.sls keystone.client;
     fi
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 2, delay: 5}
@@ -124,34 +124,34 @@
 
 - description: Connect ceph to glance
   cmd: |
-    salt -C 'I@ceph:common and I@glance:server' state.sls ceph.common,ceph.setup.keyring,glance;
-    salt -C 'I@ceph:common and I@glance:server' service.restart glance-api;
-    salt -C 'I@ceph:common and I@glance:server' service.restart glance-glare;
-    salt -C 'I@ceph:common and I@glance:server' service.restart glance-registry;
+    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@glance:server' state.sls ceph.common,ceph.setup.keyring,glance;
+    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@glance:server' service.restart glance-api;
+    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@glance:server' service.restart glance-glare;
+    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@glance:server' service.restart glance-registry;
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 2, delay: 5}
   skip_fail: false
 
 - description: Connect ceph to cinder and nova
   cmd: |
-    salt -C 'I@ceph:common and I@cinder:controller' state.sls ceph.common,ceph.setup.keyring,cinder;
-    salt -C 'I@ceph:common and I@nova:compute' state.sls ceph.common,ceph.setup.keyring;
-    salt -C 'I@ceph:common and I@nova:compute' saltutil.sync_grains;
-    salt -C 'I@ceph:common and I@nova:compute' state.sls nova;
+    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@cinder:controller' state.sls ceph.common,ceph.setup.keyring,cinder;
+    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@nova:compute' state.sls ceph.common,ceph.setup.keyring;
+    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@nova:compute' saltutil.sync_grains;
+    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@nova:compute' state.sls nova;
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 2, delay: 5}
   skip_fail: false
 
 - description: Restart cinder volume
   cmd: |
-    salt -C 'I@cinder:controller' service.restart cinder-volume;
+    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@cinder:controller' service.restart cinder-volume;
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 2, delay: 5}
   skip_fail: false
 
 - description: Restart nova-compute
   cmd: |
-    salt -C 'I@nova:compute' service.restart nova-compute;
+    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@nova:compute' service.restart nova-compute;
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 2, delay: 5}
   skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/openstack.yaml b/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/openstack.yaml
index d6fea5f..9d7dbf4 100644
--- a/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/openstack.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/openstack.yaml
@@ -164,7 +164,7 @@
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
-  skip_fail: true
+  skip_fail: false
 
 - description: Check IP on computes
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
diff --git a/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/underlay--user-data-cfg01.yaml b/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/underlay--user-data-cfg01.yaml
index 6448211..da7908d 100644
--- a/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/underlay--user-data-cfg01.yaml
@@ -18,8 +18,6 @@
    expire: False
 
   bootcmd:
-   # Block access to SSH while node is preparing
-   - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
    # Enable root access
    - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
    - service sshd restart
@@ -44,35 +42,7 @@
    - swapon /swapfile
    - echo "/swapfile   none    swap    defaults    0   0" >> /etc/fstab
 
-   ############## TCP Cloud cfg01 node ##################
-   #- sleep 120
-   - echo "Preparing base OS"
-
    - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
-   - echo "nameserver 8.8.8.8" >> /etc/resolv.conf;
-   - which wget >/dev/null || (apt-get update; apt-get install -y wget);
-
-   # Configure Ubuntu mirrors
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
-
-   - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
-   - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
-   - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;
-   - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;
-
-   - apt-get clean
-   - apt-get update
-
-   # Install common packages
-   - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree
-
-   ########################################################
-   # Node is ready, allow SSH access
-   - echo "Allow SSH access ..."
-   - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
-   ########################################################
 
   write_files:
    - path: /etc/network/interfaces
diff --git a/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/underlay--user-data1604.yaml b/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/underlay--user-data1604.yaml
index c056295..3fbb777 100644
--- a/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/underlay--user-data1604.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/underlay--user-data1604.yaml
@@ -18,8 +18,6 @@
    expire: False
 
   bootcmd:
-   # Block access to SSH while node is preparing
-   - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
    # Enable root access
    - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
    - service sshd restart
@@ -44,34 +42,6 @@
    - swapon /swapfile
    - echo "/swapfile   none    swap    defaults   0   0" >> /etc/fstab
 
-
-   ############## TCP Cloud cfg01 node ##################
-   #- sleep 120
-   - echo "Preparing base OS"
-   - which wget >/dev/null || (apt-get update; apt-get install -y wget)
-
-   # Configure Ubuntu mirrors
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
-
-   - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
-   - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
-   - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;
-   - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;
-
-   - apt-get clean
-   - eatmydata apt-get update && apt-get -y upgrade
-
-   # Install common packages
-   - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc
-
-   ########################################################
-   # Node is ready, allow SSH access
-   - echo "Allow SSH access ..."
-   - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
-   ########################################################
-
   write_files:
    - path: /etc/network/interfaces
      content: |
diff --git a/tcp_tests/templates/virtual-mcp-pike-ovs/_context-cookiecutter-mcp-pike-ovs.yaml b/tcp_tests/templates/virtual-mcp-pike-ovs/_context-cookiecutter-mcp-pike-ovs.yaml
new file mode 100644
index 0000000..6cbb6a8
--- /dev/null
+++ b/tcp_tests/templates/virtual-mcp-pike-ovs/_context-cookiecutter-mcp-pike-ovs.yaml
@@ -0,0 +1,154 @@
+default_context:
+  bmk_enabled: 'False'
+  ceph_enabled: 'False'
+  cicd_enabled: 'False'
+  cluster_domain: virtual-mcp-pike-ovs.local
+  cluster_name: virtual-mcp-pike-ovs
+  compute_bond_mode: active-backup
+  compute_primary_first_nic: eth1
+  compute_primary_second_nic: eth2
+  context_seed: wUqrwKeBTCpRpVrhK1KwZQv4cjM9VhG7L2vQ0iQsTuMrXASklEBDmJEf6bnPEqcK
+  control_network_netmask: 255.255.255.0
+  control_network_subnet: 172.16.10.0/24
+  control_vlan: '10'
+  cookiecutter_template_branch: master
+  cookiecutter_template_credentials: gerrit
+  cookiecutter_template_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.net:29418/mk/cookiecutter-templates.git
+  deploy_network_gateway: 192.168.10.1
+  deploy_network_netmask: 255.255.255.0
+  deploy_network_subnet: 192.168.10.0/24
+  deployment_type: physical
+  dns_server01: 8.8.8.8
+  dns_server02: 8.8.4.4
+  email_address: ddmitriev@mirantis.com
+  gateway_primary_first_nic: eth1
+  gateway_primary_second_nic: eth2
+  infra_bond_mode: active-backup
+  infra_deploy_nic: eth0
+  infra_kvm01_control_address: 172.16.10.101
+  infra_kvm01_deploy_address: 192.168.10.101
+  infra_kvm01_hostname: kvm01
+  infra_kvm02_control_address: 172.16.10.102
+  infra_kvm02_deploy_address: 192.168.10.102
+  infra_kvm02_hostname: kvm02
+  infra_kvm03_control_address: 172.16.10.103
+  infra_kvm03_deploy_address: 192.168.10.103
+  infra_kvm03_hostname: kvm03
+  infra_kvm_vip_address: 172.16.10.100
+  infra_primary_first_nic: eth1
+  infra_primary_second_nic: eth2
+  kubernetes_enabled: 'False'
+  local_repositories: 'False'
+  maas_deploy_address: 192.168.10.90
+  maas_hostname: cfg01
+  mcp_version: stable
+  offline_deployment: 'False'
+  opencontrail_enabled: 'False'
+  openstack_benchmark_node01_address: 172.16.10.95
+  openstack_benchmark_node01_hostname: bmk01
+  openstack_cluster_size: compact
+  openstack_compute_count: '100'
+  openstack_compute_rack01_hostname: cmp
+  openstack_compute_rack01_single_subnet: 172.16.10
+  openstack_compute_rack01_tenant_subnet: 10.1.0
+  openstack_control_address: 172.16.10.100
+  openstack_control_hostname: ctl
+  openstack_control_node01_address: 172.16.10.101
+  openstack_control_node01_hostname: ctl01
+  openstack_control_node02_address: 172.16.10.102
+  openstack_control_node02_hostname: ctl02
+  openstack_control_node03_address: 172.16.10.103
+  openstack_control_node03_hostname: ctl03
+  openstack_database_address: 172.16.10.100
+  openstack_database_hostname: dbs
+  openstack_database_node01_address: 172.16.10.101
+  openstack_database_node01_hostname: dbs01
+  openstack_database_node02_address: 172.16.10.102
+  openstack_database_node02_hostname: dbs02
+  openstack_database_node03_address: 172.16.10.103
+  openstack_database_node03_hostname: dbs03
+  openstack_enabled: 'True'
+  openstack_gateway_node01_address: 172.16.10.110
+  openstack_gateway_node01_hostname: gtw01
+  openstack_gateway_node01_tenant_address: 10.1.0.6
+  openstack_gateway_node02_address: 172.16.10.111
+  openstack_gateway_node02_hostname: gtw02
+  openstack_gateway_node02_tenant_address: 10.1.0.7
+  openstack_gateway_node03_address: 172.16.10.112
+  openstack_gateway_node03_hostname: gtw03
+  openstack_gateway_node03_tenant_address: 10.1.0.8
+  openstack_message_queue_address: 172.16.10.100
+  openstack_message_queue_hostname: msg
+  openstack_message_queue_node01_address: 172.16.10.101
+  openstack_message_queue_node01_hostname: msg01
+  openstack_message_queue_node02_address: 172.16.10.102
+  openstack_message_queue_node02_hostname: msg02
+  openstack_message_queue_node03_address: 172.16.10.103
+  openstack_message_queue_node03_hostname: msg03
+  openstack_network_engine: ovs
+  openstack_neutron_qos: 'False'
+  openstack_neutron_vlan_aware_vms: 'False'
+  openstack_nfv_dpdk_enabled: 'False'
+  openstack_nfv_sriov_enabled: 'False'
+  openstack_nova_compute_nfv_req_enabled: 'False'
+  openstack_ovs_dvr_enabled: 'False'
+  openstack_ovs_encapsulation_type: vxlan
+  openstack_proxy_address: 172.16.10.80
+  openstack_proxy_hostname: prx
+  openstack_proxy_node01_address: 172.16.10.121
+  openstack_proxy_node01_hostname: prx01
+  openstack_proxy_node02_address: 172.16.10.122
+  openstack_proxy_node02_hostname: prx02
+  openstack_upgrade_node01_address: 172.16.10.19
+  openstack_version: pike
+  oss_enabled: 'False'
+  oss_node03_address: ${_param:stacklight_monitor_node03_address}
+  oss_notification_app_id: '24'
+  oss_notification_sender_password: password
+  oss_notification_smtp_port: '587'
+  oss_notification_webhook_login_id: '13'
+  platform: openstack_enabled
+  public_host: ${_param:openstack_proxy_address}
+  publication_method: email
+  reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
+  salt_api_password: H0rTPdmktZ8RI7T7y6fjqY0uEbbs7Kwi
+  salt_api_password_hash: $6$lfbIFtMZ$.nTbTDMzs1iYv0WqkZHia8H8Fma963Nv3qyyz1x68jQh0YXK9i907B/hvoG4QHMvfolE7V7vQnFClJ1mVA3Yb.
+  salt_master_address: 172.16.10.90
+  salt_master_hostname: cfg01
+  salt_master_management_address: 192.168.10.90
+  shared_reclass_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.net:29418/salt-models/reclass-system.git
+  stacklight_enabled: 'True'
+  stacklight_log_address: 172.16.10.70
+  stacklight_log_hostname: log
+  stacklight_log_node01_address: 172.16.10.107
+  stacklight_log_node01_hostname: log01
+  stacklight_log_node02_address: 172.16.10.108
+  stacklight_log_node02_hostname: log02
+  stacklight_log_node03_address: 172.16.10.109
+  stacklight_log_node03_hostname: log03
+  stacklight_monitor_address: 172.16.10.70
+  stacklight_monitor_hostname: mon
+  stacklight_monitor_node01_address: 172.16.10.107
+  stacklight_monitor_node01_hostname: mon01
+  stacklight_monitor_node02_address: 172.16.10.108
+  stacklight_monitor_node02_hostname: mon02
+  stacklight_monitor_node03_address: 172.16.10.109
+  stacklight_monitor_node03_hostname: mon03
+  stacklight_notification_address: alerts@localhost
+  stacklight_notification_smtp_host: 127.0.0.1
+  stacklight_telemetry_address: 172.16.10.70
+  stacklight_telemetry_hostname: mtr
+  stacklight_telemetry_node01_address: 172.16.10.107
+  stacklight_telemetry_node01_hostname: mtr01
+  stacklight_telemetry_node02_address: 172.16.10.108
+  stacklight_telemetry_node02_hostname: mtr02
+  stacklight_telemetry_node03_address: 172.16.10.109
+  stacklight_telemetry_node03_hostname: mtr03
+  stacklight_version: '2'
+  static_ips_on_deploy_network_enabled: 'False'
+  tenant_network_gateway: 10.1.0.1
+  tenant_network_netmask: 255.255.255.0
+  tenant_network_subnet: 10.1.0.0/24
+  tenant_vlan: '20'
+  upstream_proxy_enabled: 'False'
+  use_default_network_scheme: 'False'
diff --git a/tcp_tests/templates/virtual-mcp-pike-ovs/_context-environment.yaml b/tcp_tests/templates/virtual-mcp-pike-ovs/_context-environment.yaml
new file mode 100644
index 0000000..18a87be
--- /dev/null
+++ b/tcp_tests/templates/virtual-mcp-pike-ovs/_context-environment.yaml
@@ -0,0 +1,141 @@
+nodes:
+    cfg01.mcp11-ovs-dpdk.local:
+      reclass_storage_name: infra_config_node01
+      roles:
+      - infra_config
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_vlan_ctl
+
+    ctl01.mcp11-ovs-dpdk.local:
+      reclass_storage_name: openstack_control_node01
+      roles:
+      - infra_kvm
+      - openstack_control_leader
+      - openstack_database_leader
+      - openstack_message_queue
+      - features_designate_database
+      - features_designate
+      - features_designate_keystone
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_vlan_ctl
+
+    ctl02.mcp11-ovs-dpdk.local:
+      reclass_storage_name: openstack_control_node02
+      roles:
+      - infra_kvm
+      - openstack_control
+      - openstack_database
+      - openstack_message_queue
+      - features_designate_database
+      - features_designate
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_vlan_ctl
+
+    ctl03.mcp11-ovs-dpdk.local:
+      reclass_storage_name: openstack_control_node03
+      roles:
+      - infra_kvm
+      - openstack_control
+      - openstack_database
+      - openstack_message_queue
+      - features_designate_database
+      - features_designate
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_vlan_ctl
+
+    prx01.mcp11-ovs-dpdk.local:
+      reclass_storage_name: openstack_proxy_node01
+      roles:
+      - openstack_proxy
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_vlan_ctl
+
+    mon01.mcp11-ovs-dpdk.local:
+      reclass_storage_name: stacklight_server_node01
+      roles:
+      - stacklightv2_server_leader
+      - stacklight_telemetry_leader
+      - stacklight_log_leader_v2
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_vlan_ctl
+
+    mon02.mcp11-ovs-dpdk.local:
+      reclass_storage_name: stacklight_server_node02
+      roles:
+      - stacklightv2_server
+      - stacklight_telemetry
+      - stacklight_log
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_vlan_ctl
+
+    mon03.mcp11-ovs-dpdk.local:
+      reclass_storage_name: stacklight_server_node03
+      roles:
+      - stacklightv2_server
+      - stacklight_telemetry
+      - stacklight_log
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_vlan_ctl
+
+    # Generator-based computes. For compatibility only
+    cmp<<count>>.mcp11-ovs-dpdk.local:
+      reclass_storage_name: openstack_compute_rack01
+      roles:
+      - openstack_compute
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: bond0_ab_ovs_vxlan_ctl_mesh
+        ens5:
+          role: bond0_ab_ovs_vxlan_ctl_mesh
+        ens6:
+          role: bond1_ab_ovs_floating
+
+    gtw01.mcp11-ovs-dpdk.local:
+      reclass_storage_name: openstack_gateway_node01
+      roles:
+      - openstack_gateway
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: bond0_ab_ovs_vxlan_ctl_mesh
+        ens5:
+          role: bond0_ab_ovs_vxlan_ctl_mesh
+        ens6:
+          role: bond1_ab_ovs_floating
diff --git a/tcp_tests/templates/virtual-mcp-pike-ovs/openstack.yaml b/tcp_tests/templates/virtual-mcp-pike-ovs/openstack.yaml
index b58e25e..65f3dab 100644
--- a/tcp_tests/templates/virtual-mcp-pike-ovs/openstack.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-ovs/openstack.yaml
@@ -179,7 +179,7 @@
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
-  skip_fail: true
+  skip_fail: false
 
 - description: Check IP on computes
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
diff --git a/tcp_tests/templates/virtual-mcp-pike-ovs/salt.yaml b/tcp_tests/templates/virtual-mcp-pike-ovs/salt.yaml
index 3eb5082..88b25cd 100644
--- a/tcp_tests/templates/virtual-mcp-pike-ovs/salt.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-ovs/salt.yaml
@@ -17,6 +17,8 @@
 
 {{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
 
+{{ SHARED.ADJUST_SL_OPTS(OVERRIDES_FILENAME='/srv/salt/reclass/classes/cluster/' + SHARED.CLUSTER_NAME + '/stacklight/server.yml') }}
+
 {{ SHARED.MACRO_GENERATE_INVENTORY() }}
 
 {{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
diff --git a/tcp_tests/templates/virtual-mcp-pike-ovs/sl.yaml b/tcp_tests/templates/virtual-mcp-pike-ovs/sl.yaml
index 0c37346..a8ca944 100644
--- a/tcp_tests/templates/virtual-mcp-pike-ovs/sl.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-ovs/sl.yaml
@@ -20,102 +20,102 @@
   skip_fail: false
 
 - description: Configure docker service
-  cmd: salt -C 'I@docker:swarm' state.sls docker.host
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls docker.host
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: Install docker swarm on master node
-  cmd: salt -C 'I@docker:swarm:role:master' state.sls docker.swarm
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: Send grains to the swarm slave nodes
-  cmd: salt -C 'I@docker:swarm' state.sls salt.minion.grains
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls salt.minion.grains
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description:  Update mine
-  cmd: salt -C 'I@docker:swarm' mine.update
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' mine.update
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description:  Refresh modules
-  cmd: salt -C 'I@docker:swarm' saltutil.refresh_modules; sleep 5;
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' saltutil.refresh_modules; sleep 5;
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description:  Rerun swarm on slaves to proper token population
-  cmd: salt -C 'I@docker:swarm:role:master' state.sls docker.swarm
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description:  Configure slave nodes
-  cmd: salt -C 'I@docker:swarm:role:manager' state.sls docker.swarm -b 1
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:manager' state.sls docker.swarm -b 1
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description:  List registered Docker swarm nodes
-  cmd: salt -C 'I@docker:swarm:role:master' cmd.run 'docker node ls'
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' cmd.run 'docker node ls'
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 # Install slv2 infra
 - description: Install telegraf
-  cmd: salt -C 'I@telegraf:agent or I@telegraf:remote_agent' state.sls telegraf
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@telegraf:agent or I@telegraf:remote_agent' state.sls telegraf
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 2, delay: 10}
   skip_fail: false
 
 - description: Configure Prometheus exporters
-  cmd: salt -C 'I@prometheus:exporters' state.sls prometheus
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' state.sls prometheus
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: Configure collector
-  cmd: salt -C 'I@heka:log_collector' state.sls heka.log_collector
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:log_collector' state.sls heka.log_collector
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: Install elasticsearch server
-  cmd: salt -C 'I@elasticsearch:server' state.sls elasticsearch.server -b 1
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:server' state.sls elasticsearch.server -b 1
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: Install kibana server
-  cmd: salt -C 'I@kibana:server' state.sls kibana.server -b 1
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:server' state.sls kibana.server -b 1
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: Install elasticsearch client
-  cmd: salt -C 'I@elasticsearch:client' state.sls elasticsearch.client
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:client' state.sls elasticsearch.client
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: Install kibana client
-  cmd: salt -C 'I@kibana:client' state.sls kibana.client
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:client' state.sls kibana.client
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: Check influix db
   cmd: |
-    INFLUXDB_SERVICE=`salt -C 'I@influxdb:server' test.ping 1>/dev/null 2>&1 && echo true`;
+    INFLUXDB_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' test.ping 1>/dev/null 2>&1 && echo true`;
     echo "Influxdb service presence: ${INFLUXDB_SERVICE}";
     if [[ "$INFLUXDB_SERVICE" == "true" ]]; then
-        salt -C 'I@influxdb:server' state.sls influxdb
+        salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' state.sls influxdb
     fi
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
@@ -124,44 +124,44 @@
 # Collect grains needed to configure the services
 
 - description: Get grains
-  cmd: salt -C 'I@salt:minion' state.sls salt.minion.grains
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' state.sls salt.minion.grains
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: Sync modules
-  cmd: salt -C 'I@salt:minion' saltutil.refresh_modules
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' saltutil.refresh_modules
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: Update mine
-  cmd: salt -C 'I@salt:minion' mine.update; sleep 5;
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' mine.update; sleep 5;
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 # Configure the services running in Docker Swarm
 - description: Install prometheus alertmanager
-  cmd: salt -C 'I@docker:swarm' state.sls prometheus,heka.remote_collector -b 1
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls prometheus,heka.remote_collector -b 1
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: run docker state
-  cmd: salt -C 'I@docker:swarm:role:master' state.sls docker
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: docker ps
-  cmd: salt -C 'I@docker:swarm' dockerng.ps
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' dockerng.ps
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: Configure Grafana dashboards and datasources
-  cmd: sleep 30;  salt -C 'I@grafana:client' state.sls grafana.client
+  cmd: sleep 30;  salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@grafana:client' state.sls grafana.client
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 2, delay: 10}
   skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp-pike-ovs/underlay--user-data-cfg01.yaml b/tcp_tests/templates/virtual-mcp-pike-ovs/underlay--user-data-cfg01.yaml
index 6448211..da7908d 100644
--- a/tcp_tests/templates/virtual-mcp-pike-ovs/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-ovs/underlay--user-data-cfg01.yaml
@@ -18,8 +18,6 @@
    expire: False
 
   bootcmd:
-   # Block access to SSH while node is preparing
-   - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
    # Enable root access
    - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
    - service sshd restart
@@ -44,35 +42,7 @@
    - swapon /swapfile
    - echo "/swapfile   none    swap    defaults    0   0" >> /etc/fstab
 
-   ############## TCP Cloud cfg01 node ##################
-   #- sleep 120
-   - echo "Preparing base OS"
-
    - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
-   - echo "nameserver 8.8.8.8" >> /etc/resolv.conf;
-   - which wget >/dev/null || (apt-get update; apt-get install -y wget);
-
-   # Configure Ubuntu mirrors
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
-
-   - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
-   - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
-   - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;
-   - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;
-
-   - apt-get clean
-   - apt-get update
-
-   # Install common packages
-   - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree
-
-   ########################################################
-   # Node is ready, allow SSH access
-   - echo "Allow SSH access ..."
-   - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
-   ########################################################
 
   write_files:
    - path: /etc/network/interfaces
diff --git a/tcp_tests/templates/virtual-mcp-pike-ovs/underlay--user-data1604.yaml b/tcp_tests/templates/virtual-mcp-pike-ovs/underlay--user-data1604.yaml
index c056295..3fbb777 100644
--- a/tcp_tests/templates/virtual-mcp-pike-ovs/underlay--user-data1604.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-ovs/underlay--user-data1604.yaml
@@ -18,8 +18,6 @@
    expire: False
 
   bootcmd:
-   # Block access to SSH while node is preparing
-   - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
    # Enable root access
    - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
    - service sshd restart
@@ -44,34 +42,6 @@
    - swapon /swapfile
    - echo "/swapfile   none    swap    defaults   0   0" >> /etc/fstab
 
-
-   ############## TCP Cloud cfg01 node ##################
-   #- sleep 120
-   - echo "Preparing base OS"
-   - which wget >/dev/null || (apt-get update; apt-get install -y wget)
-
-   # Configure Ubuntu mirrors
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
-
-   - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
-   - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
-   - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;
-   - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;
-
-   - apt-get clean
-   - eatmydata apt-get update && apt-get -y upgrade
-
-   # Install common packages
-   - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc
-
-   ########################################################
-   # Node is ready, allow SSH access
-   - echo "Allow SSH access ..."
-   - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
-   ########################################################
-
   write_files:
    - path: /etc/network/interfaces
      content: |
diff --git a/tcp_tests/templates/virtual-mcp-sl-os/Readme.txt b/tcp_tests/templates/virtual-mcp-sl-os/Readme.txt
new file mode 100644
index 0000000..b4fa30f
--- /dev/null
+++ b/tcp_tests/templates/virtual-mcp-sl-os/Readme.txt
@@ -0,0 +1,7 @@
+Template for deploying mitaka/newton models:
+- virtual-mcp-mitaka-dvr
+- virtual-mcp-mitaka-ovs
+- virtual-mcp-newton-dvr
+- virtual-mcp-newton-ovs
+
+Used by maintenance team.
\ No newline at end of file
diff --git a/tcp_tests/templates/virtual-mcp-sl-os/common-services.yaml b/tcp_tests/templates/virtual-mcp-sl-os/common-services.yaml
new file mode 100644
index 0000000..3ad67c2
--- /dev/null
+++ b/tcp_tests/templates/virtual-mcp-sl-os/common-services.yaml
@@ -0,0 +1,118 @@
+{% from 'virtual-mcp-sl-os/underlay.yaml' import HOSTNAME_CFG01 with context %}
+
+# Install support services
+- description: Install keepalived on ctl01
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keepalived:cluster and *01*' state.sls keepalived
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: true
+
+- description: Install keepalived
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keepalived:cluster' state.sls keepalived
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: true
+
+- description: Check the VIP
+  cmd: |
+    OPENSTACK_CONTROL_ADDRESS=`salt-call --out=newline_values_only pillar.get _param:openstack_control_address`;
+    echo "_param:openstack_control_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
+    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+
+- description: Install glusterfs
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@glusterfs:server' state.sls glusterfs.server.service
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Setup glusterfs on primary controller
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 5}
+  skip_fail: false
+
+- description: Check the gluster status
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Install RabbitMQ on ctl01
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Install RabbitMQ
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@rabbitmq:server' state.sls rabbitmq
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Check the rabbitmq status
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Install Galera on first server
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@galera:master' state.sls galera
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Install Galera on other servers
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@galera:slave' state.sls galera
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Check mysql status
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: true
+
+
+- description: Install haproxy
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@haproxy:proxy' state.sls haproxy
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Check haproxy status
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@haproxy:proxy' service.status haproxy
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Restart rsyslog
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@haproxy:proxy' service.restart rsyslog
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Install memcached on all controllers
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@memcached:server' state.sls memcached
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp-sl-os/openstack.yaml b/tcp_tests/templates/virtual-mcp-sl-os/openstack.yaml
new file mode 100644
index 0000000..0ddf871
--- /dev/null
+++ b/tcp_tests/templates/virtual-mcp-sl-os/openstack.yaml
@@ -0,0 +1,294 @@
+{% from 'virtual-mcp-sl-os/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'virtual-mcp-sl-os/underlay.yaml' import HOSTNAME_CTL01 with context %}
+{% from 'virtual-mcp-sl-os/underlay.yaml' import HOSTNAME_CTL02 with context %}
+{% from 'virtual-mcp-sl-os/underlay.yaml' import HOSTNAME_CTL03 with context %}
+{% from 'virtual-mcp-sl-os/underlay.yaml' import HOSTNAME_GTW01 with context %}
+{% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
+{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
+
+# Install OpenStack control services
+
+- description: Install glance on all controllers
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+     -C 'I@glance:server' state.sls glance -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Install keystone service (note that different fernet keys are created on different nodes)
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keystone:server' state.sls keystone.server -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 15}
+  skip_fail: false
+
+- description: Restart apache due to PROD-10477
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' cmd.run "systemctl restart apache2"
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 15}
+  skip_fail: false
+
+- description: Check apache status to PROD-10477
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' cmd.run "systemctl status apache2"
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 15}
+  skip_fail: false
+
+- description: Mount glusterfs.client volumes (resuires created 'keystone' and 'glusterfs' system users)
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@glance:server' state.sls glusterfs.client
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Update fernet keys for keystone server on the mounted glusterfs volume
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keystone:server' state.sls keystone.server -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Populate keystone services/tenants/admins
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keystone:client' state.sls keystone.client
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Check keystone service-list
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keystone:server' cmd.run '. /root/keystonercv3; openstack service list'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Check glance image-list
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keystone:server' cmd.run '. /root/keystonerc; glance image-list'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+
+- description: Install nova on all controllers
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@nova:controller' state.sls nova -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 5}
+  skip_fail: false
+
+- description: Check nova service-list
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keystone:server' cmd.run '. /root/keystonerc; nova --debug service-list'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 3, delay: 5}
+  skip_fail: false
+
+
+- description: Install cinder
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@cinder:controller' state.sls cinder -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Check cinder list
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keystone:server' cmd.run '. /root/keystonerc; cinder list'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+
+- description: Install neutron service
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@neutron:server' state.sls neutron -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Install neutron on gtw node
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@neutron:gateway' state.sls neutron
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+# isntall designate
+#- description: Install bind
+#  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+#    -C 'I@bind:server' state.sls bind
+#  node_name: {{ HOSTNAME_CFG01 }}
+#  retry: {count: 1, delay: 5}
+#  skip_fail: false
+#
+#- description: Install designate
+#  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+#    -C 'I@designate:server' state.sls designate -b 1
+#  node_name: {{ HOSTNAME_CFG01 }}
+#  retry: {count: 5, delay: 10}
+#  skip_fail: false
+
+- description: Check neutron agent-list
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keystone:server' cmd.run '. /root/keystonerc; neutron agent-list'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+
+- description: Install heat service
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@heat:server' state.sls heat -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Check heat service
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keystone:server' cmd.run '. /root/keystonercv3; openstack orchestration resource type list'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 5, delay: 10}
+  skip_fail: false
+
+
+- description: Deploy horizon dashboard
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@horizon:server' state.sls horizon
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: true
+
+- description: Deploy nginx proxy
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@nginx:server' state.sls nginx
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: true
+
+
+# Install compute node
+
+- description: Apply formulas for compute node
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: true
+
+- description: Re-apply(as in doc) formulas for compute node
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: true
+
+- description: Check IP on computes
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
+    'ip a'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 10, delay: 30}
+  skip_fail: false
+
+
+  # Upload cirros image
+
+- description: Upload cirros image on ctl01
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    'wget http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-i386-disk.img'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 30}
+  skip_fail: false
+
+- description: Register image in glance
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; glance --timeout 120 image-create --name cirros --visibility public --disk-format qcow2 --container-format bare --progress < /root/cirros-0.3.4-i386-disk.img'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Create net04_external
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron net-create net04_ext --router:external True --provider:physical_network physnet1 --provider:network_type flat'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Create subnet_external
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron subnet-create net04_ext {{ IPV4_NET_EXTERNAL_PREFIX }}.0/24 --name net04_ext__subnet --disable-dhcp --allocation-pool start={{ IPV4_NET_EXTERNAL_PREFIX }}.150,end={{ IPV4_NET_EXTERNAL_PREFIX }}.180 --gateway {{ IPV4_NET_EXTERNAL_PREFIX }}.1'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Create net04
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron net-create net04'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Create subnet_net04
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron subnet-create net04 {{ IPV4_NET_TENANT_PREFIX }}.0/24 --name net04__subnet --allocation-pool start={{ IPV4_NET_TENANT_PREFIX }}.120,end={{ IPV4_NET_TENANT_PREFIX }}.240'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Create router
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron router-create net04_router01'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Set geteway
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron router-gateway-set net04_router01 net04_ext'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description:  Add interface
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron router-interface-add net04_router01 net04__subnet'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+#- description:  Allow all tcp
+#  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+#    '. /root/keystonercv3; openstack security group rule create --proto tcp --dst-port 22 default'
+#  node_name: {{ HOSTNAME_CFG01 }}
+#  retry: {count: 1, delay: 30}
+#  skip_fail: false
+#
+#- description:  Allow all icmp
+#  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+#    '. /root/keystonercv3; openstack security group rule create --proto icmp default'
+#  node_name: {{ HOSTNAME_CFG01 }}
+#  retry: {count: 1, delay: 30}
+#  skip_fail: false
+
+- description: sync time
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run
+    'service ntp stop; ntpd -gq; service ntp start'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: true
+
+- description: Install docker.io on gtw
+  cmd: salt-call cmd.run 'apt-get install docker.io -y'
+  node_name: {{ HOSTNAME_GTW01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: create rc file on cfg
+  cmd: scp ctl01:/root/keystonercv3 /root
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Copy rc file
+  cmd: scp /root/keystonercv3 gtw01:/root
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp-sl-os/salt.yaml b/tcp_tests/templates/virtual-mcp-sl-os/salt.yaml
new file mode 100644
index 0000000..595695f
--- /dev/null
+++ b/tcp_tests/templates/virtual-mcp-sl-os/salt.yaml
@@ -0,0 +1,28 @@
+{% from 'virtual-mcp-sl-os/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'virtual-mcp-sl-os/underlay.yaml' import LAB_CONFIG_NAME with context %}
+{% from 'virtual-mcp-sl-os/underlay.yaml' import DOMAIN_NAME with context %}
+
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+# Other salt model repository parameters see in shared-salt.yaml
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+
+{{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
+
+{{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
+
+{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "fluentd"') }}
+
+{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
+
+{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
+
+{{ SHARED.MACRO_GENERATE_INVENTORY() }}
+
+{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
+
+- description: WR run linux state to fix hosts
+  cmd: salt "cfg*" state.sls linux
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: true
diff --git a/tcp_tests/templates/virtual-mcp-sl-os/sl.yaml b/tcp_tests/templates/virtual-mcp-sl-os/sl.yaml
new file mode 100644
index 0000000..76b76b6
--- /dev/null
+++ b/tcp_tests/templates/virtual-mcp-sl-os/sl.yaml
@@ -0,0 +1,176 @@
+{% from 'virtual-mcp-sl-os/underlay.yaml' import HOSTNAME_CFG01 with context %}
+
+{% import 'shared-sl-tests.yaml' as SHARED_SL_TESTS with context %}
+
+# Install docker swarm
+- description: Install keepalived on mon nodes
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'mon*' state.sls keepalived
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Check the VIP on mon nodes
+  cmd: |
+    SL_VIP=`salt-call --out=newline_values_only pillar.get _param:stacklight_monitor_address`;
+    echo "_param:stacklight_monitor_address (vip): ${SL_VIP}";
+    salt --hard-crash --state-output=mixed --state-verbose=False -C 'mon*' cmd.run "ip a | grep ${SL_VIP}" | grep -B1 ${SL_VIP}
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Configure docker service
+  cmd: salt -C 'I@docker:swarm' state.sls docker.host
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Install docker swarm on master node
+  cmd: salt -C 'I@docker:swarm:role:master' state.sls docker.swarm
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Send grains to the swarm slave nodes
+  cmd: salt -C 'I@docker:swarm' state.sls salt.minion.grains
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description:  Update mine
+  cmd: salt -C 'I@docker:swarm' mine.update
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description:  Refresh modules
+  cmd: salt -C 'I@docker:swarm' saltutil.refresh_modules; sleep 5;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description:  Rerun swarm on slaves to proper token population
+  cmd: salt -C 'I@docker:swarm:role:master' state.sls docker.swarm
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description:  Configure slave nodes
+  cmd: salt -C 'I@docker:swarm:role:manager' state.sls docker.swarm -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description:  List registered Docker swarm nodes
+  cmd: salt -C 'I@docker:swarm:role:master' cmd.run 'docker node ls'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+# Install slv2 infra
+- description: Install telegraf
+  cmd: salt -C 'I@telegraf:agent or I@telegraf:remote_agent' state.sls telegraf
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 10}
+  skip_fail: false
+
+- description: Configure Prometheus exporters
+  cmd: salt -C 'I@prometheus:exporters' state.sls prometheus
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Configure collector
+  cmd: salt -C 'I@heka:log_collector' state.sls heka.log_collector
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Install elasticsearch server
+  cmd: salt -C 'I@elasticsearch:server' state.sls elasticsearch.server -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Install kibana server
+  cmd: salt -C 'I@kibana:server' state.sls kibana.server -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Install elasticsearch client
+  cmd: salt -C 'I@elasticsearch:client' state.sls elasticsearch.client
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Install kibana client
+  cmd: salt -C 'I@kibana:client' state.sls kibana.client
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Check influix db
+  cmd: |
+    INFLUXDB_SERVICE=`salt -C 'I@influxdb:server' test.ping 1>/dev/null 2>&1 && echo true`;
+    echo "Influxdb service presence: ${INFLUXDB_SERVICE}";
+    if [[ "$INFLUXDB_SERVICE" == "true" ]]; then
+        salt -C 'I@influxdb:server' state.sls influxdb
+    fi
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: true
+
+# Collect grains needed to configure the services
+
+- description: Get grains
+  cmd: salt -C 'I@salt:minion' state.sls salt.minion.grains
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Sync modules
+  cmd: salt -C 'I@salt:minion' saltutil.refresh_modules
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Update mine
+  cmd: salt -C 'I@salt:minion' mine.update; sleep 5;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+# Configure the services running in Docker Swarm
+- description: Install prometheus alertmanager
+  cmd: salt -C 'I@docker:swarm' state.sls prometheus,heka.remote_collector -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: run docker state
+  cmd: salt -C 'I@docker:swarm:role:master' state.sls docker
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: docker ps
+  cmd: salt -C 'I@docker:swarm' dockerng.ps
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Configure Grafana dashboards and datasources
+  cmd: sleep 30;  salt -C 'I@grafana:client' state.sls grafana.client
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 10}
+  skip_fail: false
+
+- description: Run salt minion to create cert files
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False "*" state.sls salt.minion
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+{{  SHARED_SL_TESTS.MACRO_CLONE_SL_TESTS() }}
+{{  SHARED_SL_TESTS.MACRO_CONFIGURE_TESTS() }}
diff --git a/tcp_tests/templates/virtual-mcp-sl-os/underlay--meta-data.yaml b/tcp_tests/templates/virtual-mcp-sl-os/underlay--meta-data.yaml
new file mode 100644
index 0000000..3699401
--- /dev/null
+++ b/tcp_tests/templates/virtual-mcp-sl-os/underlay--meta-data.yaml
@@ -0,0 +1,4 @@
+| # All the data below will be stored as a string object
+  instance-id: iid-local1
+  hostname: {hostname}
+  local-hostname: {hostname}
diff --git a/tcp_tests/templates/virtual-mcp-sl-os/underlay--user-data-cfg01.yaml b/tcp_tests/templates/virtual-mcp-sl-os/underlay--user-data-cfg01.yaml
new file mode 100644
index 0000000..6448211
--- /dev/null
+++ b/tcp_tests/templates/virtual-mcp-sl-os/underlay--user-data-cfg01.yaml
@@ -0,0 +1,91 @@
+| # All the data below will be stored as a string object
+  #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+
+  ssh_pwauth: True
+  users:
+   - name: root
+     sudo: ALL=(ALL) NOPASSWD:ALL
+     shell: /bin/bash
+     ssh_authorized_keys:
+     {% for key in config.underlay.ssh_keys %}
+      - ssh-rsa {{ key['public'] }}
+     {% endfor %}
+
+  disable_root: false
+  chpasswd:
+   list: |
+    root:r00tme
+   expire: False
+
+  bootcmd:
+   # Block access to SSH while node is preparing
+   - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
+   # Enable root access
+   - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+   - service sshd restart
+  output:
+    all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
+
+  runcmd:
+   # Configure dhclient
+   - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
+   - sudo resolvconf -u
+
+   # Prepare network connection
+   - sudo ifdown ens3
+   - sudo ip r d default || true  # remove existing default route to get it from dhcp
+   - sudo ifup ens3
+   #- sudo route add default gw {gateway} {interface_name}
+
+   # Create swap
+   - fallocate -l 4G /swapfile
+   - chmod 600 /swapfile
+   - mkswap /swapfile
+   - swapon /swapfile
+   - echo "/swapfile   none    swap    defaults    0   0" >> /etc/fstab
+
+   ############## TCP Cloud cfg01 node ##################
+   #- sleep 120
+   - echo "Preparing base OS"
+
+   - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
+   - echo "nameserver 8.8.8.8" >> /etc/resolv.conf;
+   - which wget >/dev/null || (apt-get update; apt-get install -y wget);
+
+   # Configure Ubuntu mirrors
+   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
+   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
+   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
+
+   - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
+   - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
+   - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;
+   - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;
+
+   - apt-get clean
+   - apt-get update
+
+   # Install common packages
+   - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree
+
+   ########################################################
+   # Node is ready, allow SSH access
+   - echo "Allow SSH access ..."
+   - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
+   ########################################################
+
+  write_files:
+   - path: /etc/network/interfaces
+     content: |
+          auto ens3
+          iface ens3 inet dhcp
+
+   - path: /root/.ssh/config
+     owner: root:root
+     permissions: '0600'
+     content: |
+          Host *
+            ServerAliveInterval 300
+            ServerAliveCountMax 10
+            StrictHostKeyChecking no
+            UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/virtual-mcp-sl-os/underlay--user-data1604.yaml b/tcp_tests/templates/virtual-mcp-sl-os/underlay--user-data1604.yaml
new file mode 100644
index 0000000..c056295
--- /dev/null
+++ b/tcp_tests/templates/virtual-mcp-sl-os/underlay--user-data1604.yaml
@@ -0,0 +1,80 @@
+| # All the data below will be stored as a string object
+  #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+
+  ssh_pwauth: True
+  users:
+   - name: root
+     sudo: ALL=(ALL) NOPASSWD:ALL
+     shell: /bin/bash
+     ssh_authorized_keys:
+     {% for key in config.underlay.ssh_keys %}
+      - ssh-rsa {{ key['public'] }}
+     {% endfor %}
+
+  disable_root: false
+  chpasswd:
+   list: |
+    root:r00tme
+   expire: False
+
+  bootcmd:
+   # Block access to SSH while node is preparing
+   - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
+   # Enable root access
+   - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+   - service sshd restart
+  output:
+    all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
+
+  runcmd:
+   - export TERM=linux
+   - export LANG=C
+   # Configure dhclient
+   - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
+   - sudo resolvconf -u
+
+   # Prepare network connection
+   - sudo ifup ens3
+   #- sudo route add default gw {gateway} {interface_name}
+
+   # Create swap
+   - fallocate -l 4G /swapfile
+   - chmod 600 /swapfile
+   - mkswap /swapfile
+   - swapon /swapfile
+   - echo "/swapfile   none    swap    defaults   0   0" >> /etc/fstab
+
+
+   ############## TCP Cloud cfg01 node ##################
+   #- sleep 120
+   - echo "Preparing base OS"
+   - which wget >/dev/null || (apt-get update; apt-get install -y wget)
+
+   # Configure Ubuntu mirrors
+   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
+   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
+   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
+
+   - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
+   - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
+   - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;
+   - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;
+
+   - apt-get clean
+   - eatmydata apt-get update && apt-get -y upgrade
+
+   # Install common packages
+   - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc
+
+   ########################################################
+   # Node is ready, allow SSH access
+   - echo "Allow SSH access ..."
+   - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
+   ########################################################
+
+  write_files:
+   - path: /etc/network/interfaces
+     content: |
+          auto ens3
+          iface ens3 inet dhcp
+
diff --git a/tcp_tests/templates/virtual-mcp-sl-os/underlay.yaml b/tcp_tests/templates/virtual-mcp-sl-os/underlay.yaml
new file mode 100644
index 0000000..955750b
--- /dev/null
+++ b/tcp_tests/templates/virtual-mcp-sl-os/underlay.yaml
@@ -0,0 +1,510 @@
+# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
+{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
+
+{% import 'virtual-mcp-sl-os/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
+{% import 'virtual-mcp-sl-os/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
+{% import 'virtual-mcp-sl-os/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
+
+---
+aliases:
+ - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
+ - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
+ - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
+ - &cloudinit_user_data_1604 {{ CLOUDINIT_USER_DATA_1604 }}
+
+{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'virtual-mcp-sl-os') %}
+{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME) + '.local' %}
+{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MON01 = os_env('HOSTNAME_MON01', 'mon01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MON02 = os_env('HOSTNAME_MON02', 'mon02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MON03 = os_env('HOSTNAME_MON03', 'mon03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
+
+template:
+  devops_settings:
+    env_name: {{ os_env('ENV_NAME', 'virtual-mcp-sl-os_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
+
+    address_pools:
+      private-pool01:
+        net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.60.0.0/16:24') }}
+        params:
+          ip_reserved:
+            gateway: +1
+            l2_network_device: +1
+            default_{{ HOSTNAME_CFG01 }}: +100
+            default_{{ HOSTNAME_CTL01 }}: +101
+            default_{{ HOSTNAME_CTL02 }}: +102
+            default_{{ HOSTNAME_CTL03 }}: +103
+            default_{{ HOSTNAME_CMP01 }}: +105
+            default_{{ HOSTNAME_CMP02 }}: +106
+            default_{{ HOSTNAME_MON01 }}: +107
+            default_{{ HOSTNAME_MON02 }}: +108
+            default_{{ HOSTNAME_MON03 }}: +109
+            default_{{ HOSTNAME_GTW01 }}: +110
+            default_{{ HOSTNAME_PRX01 }}: +121
+          ip_ranges:
+            dhcp: [+90, -10]
+
+      admin-pool01:
+        net: {{ os_env('ADMIN_ADDRESS_POOL01', '10.70.0.0/16:24') }}
+        params:
+          ip_reserved:
+            gateway: +1
+            l2_network_device: +1
+            default_{{ HOSTNAME_CFG01 }}: +90
+            default_{{ HOSTNAME_CTL01 }}: +101
+            default_{{ HOSTNAME_CTL02 }}: +102
+            default_{{ HOSTNAME_CTL03 }}: +103
+            default_{{ HOSTNAME_CMP01 }}: +105
+            default_{{ HOSTNAME_CMP02 }}: +106
+            default_{{ HOSTNAME_MON01 }}: +107
+            default_{{ HOSTNAME_MON02 }}: +108
+            default_{{ HOSTNAME_MON03 }}: +109
+            default_{{ HOSTNAME_GTW01 }}: +110
+            default_{{ HOSTNAME_PRX01 }}: +121
+          ip_ranges:
+            dhcp: [+90, -10]
+
+      tenant-pool01:
+        net: {{ os_env('TENANT_ADDRESS_POOL01', '10.80.0.0/16:24') }}
+        params:
+          ip_reserved:
+            gateway: +1
+            l2_network_device: +1
+            default_{{ HOSTNAME_CFG01 }}: +100
+            default_{{ HOSTNAME_CTL01 }}: +101
+            default_{{ HOSTNAME_CTL02 }}: +102
+            default_{{ HOSTNAME_CTL03 }}: +103
+            default_{{ HOSTNAME_CMP01 }}: +105
+            default_{{ HOSTNAME_CMP02 }}: +106
+            default_{{ HOSTNAME_MON01 }}: +107
+            default_{{ HOSTNAME_MON02 }}: +108
+            default_{{ HOSTNAME_MON03 }}: +109
+            default_{{ HOSTNAME_GTW01 }}: +110
+            default_{{ HOSTNAME_PRX01 }}: +121
+          ip_ranges:
+            dhcp: [+10, -10]
+
+      external-pool01:
+        net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '10.90.0.0/16:24') }}
+        params:
+          ip_reserved:
+            gateway: +1
+            l2_network_device: +1
+            default_{{ HOSTNAME_CFG01 }}: +100
+            default_{{ HOSTNAME_CTL01 }}: +101
+            default_{{ HOSTNAME_CTL02 }}: +102
+            default_{{ HOSTNAME_CTL03 }}: +103
+            default_{{ HOSTNAME_CMP01 }}: +105
+            default_{{ HOSTNAME_CMP02 }}: +106
+            default_{{ HOSTNAME_MON01 }}: +107
+            default_{{ HOSTNAME_MON02 }}: +108
+            default_{{ HOSTNAME_MON03 }}: +109
+            default_{{ HOSTNAME_GTW01 }}: +110
+            default_{{ HOSTNAME_PRX01 }}: +121
+          ip_ranges:
+            dhcp: [+10, -10]
+
+
+    groups:
+      - name: default
+        driver:
+          name: devops.driver.libvirt
+          params:
+            connection_string: !os_env CONNECTION_STRING, qemu:///system
+            storage_pool_name: !os_env STORAGE_POOL_NAME, default
+            stp: False
+            hpet: False
+            enable_acpi: true
+            use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
+            use_hugepages: !os_env DRIVER_USE_HUGEPAGES, false
+
+        network_pools:
+          admin: admin-pool01
+          private: private-pool01
+          tenant: tenant-pool01
+          external: external-pool01
+
+        l2_network_devices:
+          private:
+            address_pool: private-pool01
+            dhcp: true
+
+          admin:
+            address_pool: admin-pool01
+            dhcp: true
+            forward:
+              mode: nat
+
+          tenant:
+            address_pool: tenant-pool01
+            dhcp: true
+
+          external:
+            address_pool: external-pool01
+            dhcp: true
+            forward:
+              mode: nat
+
+
+        group_volumes:
+         - name: cloudimage1604    # This name is used for 'backing_store' option for node volumes.
+           source_image: !os_env IMAGE_PATH1604  # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
+           format: qcow2
+         - name: cfg01_day01_image               # Pre-configured day01 image
+           source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01', os_env('IMAGE_PATH1604')) }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
+           format: qcow2
+
+        nodes:
+          - name: {{ HOSTNAME_CFG01 }}
+            role: salt_master
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 4096
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: cfg01_day01_image
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_cfg01
+
+              interfaces:
+                - label: ens3
+                  l2_network_device: admin
+                  interface_model: *interface_model
+                - label: ens4
+                  l2_network_device: private
+                  interface_model: *interface_model
+              network_config:
+                ens3:
+                  networks:
+                    - admin
+                ens4:
+                  networks:
+                    - private
+
+          - name: {{ HOSTNAME_CTL01 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 12288
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: cloudimage1604
+                  format: qcow2
+                - name: cinder
+                  capacity: 50
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: &interfaces
+                - label: ens3
+                  l2_network_device: admin
+                  interface_model: *interface_model
+                - label: ens4
+                  l2_network_device: private
+                  interface_model: *interface_model
+              network_config: &network_config
+                ens3:
+                  networks:
+                    - admin
+                ens4:
+                  networks:
+                    - private
+
+          - name: {{ HOSTNAME_CTL02 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 12288
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: cloudimage1604
+                  format: qcow2
+                - name: cinder
+                  capacity: 50
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_CTL03 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 12288
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: cloudimage1604
+                  format: qcow2
+                - name: cinder
+                  capacity: 50
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_MON01 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 3
+              memory: !os_env SLAVE_NODE_MEMORY, 2048
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: cloudimage1604
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_MON02 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 3
+              memory: !os_env SLAVE_NODE_MEMORY, 2048
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: cloudimage1604
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_MON03 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 3
+              memory: !os_env SLAVE_NODE_MEMORY, 2048
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: cloudimage1604
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_PRX01 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 1
+              memory: !os_env SLAVE_NODE_MEMORY, 2048
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: cloudimage1604
+                  format: qcow2
+                - name: cinder
+                  capacity: 50
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_CMP01 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 3
+              memory: !os_env SLAVE_NODE_MEMORY, 4096
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: cloudimage1604
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+
+              interfaces: &all_interfaces
+                - label: ens3
+                  l2_network_device: admin
+                  interface_model: *interface_model
+                - label: ens4
+                  l2_network_device: private
+                  interface_model: *interface_model
+                - label: ens5
+                  l2_network_device: tenant
+                  interface_model: *interface_model
+                - label: ens6
+                  l2_network_device: external
+                  interface_model: *interface_model
+              network_config: &all_network_config
+                ens3:
+                  networks:
+                    - admin
+                ens4:
+                  networks:
+                    - private
+                ens5:
+                  networks:
+                    - tenant
+                ens6:
+                  networks:
+                    - external
+
+          - name: {{ HOSTNAME_CMP02 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 3
+              memory: !os_env SLAVE_NODE_MEMORY, 4096
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: cloudimage1604
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: *all_interfaces
+              network_config: *all_network_config
+
+          - name: {{ HOSTNAME_GTW01 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 1
+              memory: !os_env SLAVE_NODE_MEMORY, 2048
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: cloudimage1604
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: *all_interfaces
+              network_config: *all_network_config
diff --git a/tcp_tests/templates/virtual-mcp10-contrail/openstack.yaml b/tcp_tests/templates/virtual-mcp10-contrail/openstack.yaml
index 59c350d..8732168 100644
--- a/tcp_tests/templates/virtual-mcp10-contrail/openstack.yaml
+++ b/tcp_tests/templates/virtual-mcp10-contrail/openstack.yaml
@@ -208,7 +208,7 @@
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
-  skip_fail: true
+  skip_fail: false
 
 - description: Check IP on computes
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
diff --git a/tcp_tests/templates/virtual-mcp10-dvr/openstack.yaml b/tcp_tests/templates/virtual-mcp10-dvr/openstack.yaml
index 5b292f8..05d07be 100644
--- a/tcp_tests/templates/virtual-mcp10-dvr/openstack.yaml
+++ b/tcp_tests/templates/virtual-mcp10-dvr/openstack.yaml
@@ -148,7 +148,7 @@
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
-  skip_fail: true
+  skip_fail: false
 
 - description: Check IP on computes
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
diff --git a/tcp_tests/templates/virtual-mcp10-ovs/openstack.yaml b/tcp_tests/templates/virtual-mcp10-ovs/openstack.yaml
index 87657f9..b8982b9 100644
--- a/tcp_tests/templates/virtual-mcp10-ovs/openstack.yaml
+++ b/tcp_tests/templates/virtual-mcp10-ovs/openstack.yaml
@@ -154,7 +154,7 @@
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
-  skip_fail: true
+  skip_fail: false
 
 - description: Check IP on computes
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
diff --git a/tcp_tests/templates/virtual-mcp11-dvr/openstack.yaml b/tcp_tests/templates/virtual-mcp11-dvr/openstack.yaml
index 5712ad7..16ada26 100644
--- a/tcp_tests/templates/virtual-mcp11-dvr/openstack.yaml
+++ b/tcp_tests/templates/virtual-mcp11-dvr/openstack.yaml
@@ -165,7 +165,7 @@
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
-  skip_fail: true
+  skip_fail: false
 
 - description: Check IP on computes
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
diff --git a/tcp_tests/templates/virtual-mcp11-k8s-calico/salt.yaml b/tcp_tests/templates/virtual-mcp11-k8s-calico/salt.yaml
index b98a23a..a650b87 100644
--- a/tcp_tests/templates/virtual-mcp11-k8s-calico/salt.yaml
+++ b/tcp_tests/templates/virtual-mcp11-k8s-calico/salt.yaml
@@ -22,6 +22,8 @@
 
 {{ SHARED.ADJUST_K8S_OPTS() }}
 
+{{ SHARED.ADJUST_SL_OPTS(OVERRIDES_FILENAME='/srv/salt/reclass/classes/cluster/' + SHARED.CLUSTER_NAME + '/stacklight/server.yml') }}
+
 {%- if ENABLE_COMPUTES_SELF_REGISTER != '' %}
 {{ SHARED.REGISTER_COMPUTE_NODES() }}
 {%- endif %}
diff --git a/tcp_tests/templates/virtual-mcp11-k8s-calico/sl.yaml b/tcp_tests/templates/virtual-mcp11-k8s-calico/sl.yaml
index ee723cb..831cded 100644
--- a/tcp_tests/templates/virtual-mcp11-k8s-calico/sl.yaml
+++ b/tcp_tests/templates/virtual-mcp11-k8s-calico/sl.yaml
@@ -3,105 +3,105 @@
 
 # Install docker swarm
 - description: Configure docker service
-  cmd: salt -C 'I@docker:swarm' state.sls docker.host
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls docker.host
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: Install docker swarm on master node
-  cmd: salt -C 'I@docker:swarm:role:master' state.sls docker.swarm
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: Send grains to the swarm slave nodes
-  cmd: salt -C 'I@docker:swarm' state.sls salt.minion.grains
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls salt.minion.grains
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description:  Update mine
-  cmd: salt -C 'I@docker:swarm' mine.update
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' mine.update
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description:  Refresh modules
-  cmd: salt -C 'I@docker:swarm' saltutil.refresh_modules; sleep 5;
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' saltutil.refresh_modules; sleep 5;
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description:  Rerun swarm on slaves to proper token population
-  cmd: salt -C 'I@docker:swarm:role:master' state.sls docker.swarm
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description:  Configure slave nodes
-  cmd: salt -C 'I@docker:swarm:role:manager' state.sls docker.swarm -b 1
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:manager' state.sls docker.swarm -b 1
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description:  List registered Docker swarm nodes
-  cmd: salt -C 'I@docker:swarm:role:master' cmd.run 'docker node ls'
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' cmd.run 'docker node ls'
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 # Install slv2 infra
 - description: Install telegraf
-  cmd: salt -C 'I@telegraf:agent or I@telegraf:remote_agent' state.sls telegraf
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@telegraf:agent or I@telegraf:remote_agent' state.sls telegraf
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 2, delay: 10}
   skip_fail: false
 
 - description: Configure Prometheus exporters, if pillar 'prometheus:exporters' exists on any server
   cmd: |
-    if salt -C 'I@prometheus:exporters' match.pillar 'prometheus:exporters' ; then
-      salt -C 'I@prometheus:exporters' state.sls prometheus
+    if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' match.pillar 'prometheus:exporters' ; then
+      salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' state.sls prometheus
     fi
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: Configure fluentd
-  cmd: salt -C 'I@fluentd:agent' state.sls fluentd
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@fluentd:agent' state.sls fluentd
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: Install elasticsearch server
-  cmd: salt -C 'I@elasticsearch:server' state.sls elasticsearch.server -b 1
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:server' state.sls elasticsearch.server -b 1
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: Install kibana server
-  cmd: salt -C 'I@kibana:server' state.sls kibana.server -b 1
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:server' state.sls kibana.server -b 1
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: Install elasticsearch client
-  cmd: salt -C 'I@elasticsearch:client' state.sls elasticsearch.client
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:client' state.sls elasticsearch.client
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: Install kibana client
-  cmd: salt -C 'I@kibana:client' state.sls kibana.client
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:client' state.sls kibana.client
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: Check influix db
   cmd: |
-    INFLUXDB_SERVICE=`salt -C 'I@influxdb:server' test.ping 1>/dev/null 2>&1 && echo true`;
+    INFLUXDB_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' test.ping 1>/dev/null 2>&1 && echo true`;
     echo "Influxdb service presence: ${INFLUXDB_SERVICE}";
     if [[ "$INFLUXDB_SERVICE" == "true" ]]; then
-        salt -C 'I@influxdb:server' state.sls influxdb
+        salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' state.sls influxdb
     fi
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
@@ -110,56 +110,44 @@
 # Collect grains needed to configure the services
 
 - description: Get grains
-  cmd: salt -C 'I@salt:minion' state.sls salt.minion.grains
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' state.sls salt.minion.grains
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: Sync modules
-  cmd: salt -C 'I@salt:minion' saltutil.refresh_modules
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' saltutil.refresh_modules
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: Update mine
-  cmd: salt -C 'I@salt:minion' mine.update; sleep 5;
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' mine.update; sleep 5;
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
-# Change environment configuration before deploy
-- description: Set SL docker images deploy parameters
-  cmd: |
-  {% for sl_opt, value in config.sl_deploy.items() %}
-    {% if value|string() %}
-    salt-call reclass.cluster_meta_set {{ sl_opt }} {{ value }};
-    {% endif %}
-  {% endfor %}
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 1}
-  skip_fail: false
-
 # Configure the services running in Docker Swarm
 - description: Install prometheus alertmanager
-  cmd: salt -C 'I@docker:swarm' state.sls prometheus -b 1
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:server' state.sls prometheus -b 1
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: run docker state
-  cmd: salt -C 'I@docker:swarm:role:master' state.sls docker
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: docker ps
-  cmd: salt -C 'I@docker:swarm' dockerng.ps
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' dockerng.ps
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: Configure Grafana dashboards and datasources
-  cmd: sleep 30;  salt -C 'I@grafana:client' state.sls grafana.client
+  cmd: sleep 30;  salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@grafana:client' state.sls grafana.client
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 2, delay: 10}
   skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp11-k8s-calico/underlay--user-data-cfg01.yaml b/tcp_tests/templates/virtual-mcp11-k8s-calico/underlay--user-data-cfg01.yaml
index 25c662b..504fd80 100644
--- a/tcp_tests/templates/virtual-mcp11-k8s-calico/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/virtual-mcp11-k8s-calico/underlay--user-data-cfg01.yaml
@@ -18,8 +18,6 @@
    expire: False
 
   bootcmd:
-   # Block access to SSH while node is preparing
-   - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
    # Enable root access
    - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
    - service sshd restart
@@ -46,34 +44,7 @@
    - swapon /swapfile
    - echo "/swapfile   none    swap    defaults    0   0" >> /etc/fstab
 
-   ############## TCP Cloud cfg01 node ##################
-   #- sleep 120
-   - echo "Preparing base OS"
-
    - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
-   - echo "nameserver 8.8.8.8" >> /etc/resolv.conf;
-   - which wget >/dev/null || (apt-get update; apt-get install -y wget);
-
-   # Configure Ubuntu mirrors
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
-
-   - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
-   - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
-   - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;
-   - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;
-
-   - eatmydata apt-get clean && apt-get update
-
-   # Install common packages
-   - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree
-
-   ########################################################
-   # Node is ready, allow SSH access
-   - echo "Allow SSH access ..."
-   - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
-   ########################################################
 
   write_files:
    - path: /etc/network/interfaces
diff --git a/tcp_tests/templates/virtual-mcp11-k8s-calico/underlay--user-data1604.yaml b/tcp_tests/templates/virtual-mcp11-k8s-calico/underlay--user-data1604.yaml
index c0c8e1f..6fd3272 100644
--- a/tcp_tests/templates/virtual-mcp11-k8s-calico/underlay--user-data1604.yaml
+++ b/tcp_tests/templates/virtual-mcp11-k8s-calico/underlay--user-data1604.yaml
@@ -18,8 +18,6 @@
    expire: False
 
   bootcmd:
-   # Block access to SSH while node is preparing
-   - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
    # Enable root access
    - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
    - service sshd restart
@@ -45,34 +43,6 @@
    - swapon /swapfile
    - echo "/swapfile   none    swap    defaults   0   0" >> /etc/fstab
 
-
-   ############## TCP Cloud cfg01 node ##################
-   #- sleep 120
-   - echo "Preparing base OS"
-   - which wget >/dev/null || (apt-get update; apt-get install -y wget)
-
-   # Configure Ubuntu mirrors
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
-
-   - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
-   - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
-   - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;
-   - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;
-
-   - apt-get clean
-   - eatmydata apt-get update && apt-get -y upgrade
-
-   # Install common packages
-   - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc
-
-   ########################################################
-   # Node is ready, allow SSH access
-   - echo "Allow SSH access ..."
-   - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
-   ########################################################
-
   write_files:
    - path: /etc/network/interfaces
      content: |
diff --git a/tcp_tests/templates/virtual-mcp11-k8s-contrail/salt.yaml b/tcp_tests/templates/virtual-mcp11-k8s-contrail/salt.yaml
index 0062db5..3c37187 100644
--- a/tcp_tests/templates/virtual-mcp11-k8s-contrail/salt.yaml
+++ b/tcp_tests/templates/virtual-mcp11-k8s-contrail/salt.yaml
@@ -23,6 +23,8 @@
 
 {{ SHARED.ADJUST_K8S_OPTS() }}
 
+{{ SHARED.ADJUST_SL_OPTS(OVERRIDES_FILENAME='/srv/salt/reclass/classes/cluster/' + SHARED.CLUSTER_NAME + '/stacklight/server.yml') }}
+
 {%- if ENABLE_COMPUTES_SELF_REGISTER != '' %}
 {{ SHARED.REGISTER_COMPUTE_NODES() }}
 {%- endif %}
diff --git a/tcp_tests/templates/virtual-mcp11-k8s-contrail/sl.yaml b/tcp_tests/templates/virtual-mcp11-k8s-contrail/sl.yaml
index 14264f0..43d6ad8 100644
--- a/tcp_tests/templates/virtual-mcp11-k8s-contrail/sl.yaml
+++ b/tcp_tests/templates/virtual-mcp11-k8s-contrail/sl.yaml
@@ -10,7 +10,7 @@
   skip_fail: false
 
 - description: Restart keepalived service
-  cmd: salt -C 'mon*' cmd.run "systemctl restart keepalived"
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'mon*' cmd.run "systemctl restart keepalived"
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
@@ -25,105 +25,105 @@
   skip_fail: false
 
 - description: Configure docker service
-  cmd: salt -C 'I@docker:swarm' state.sls docker.host
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls docker.host
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: Install docker swarm on master node
-  cmd: salt -C 'I@docker:swarm:role:master' state.sls docker.swarm
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: Send grains to the swarm slave nodes
-  cmd: salt -C 'I@docker:swarm' state.sls salt.minion.grains
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls salt.minion.grains
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description:  Update mine
-  cmd: salt -C 'I@docker:swarm' mine.update
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' mine.update
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description:  Refresh modules
-  cmd: salt -C 'I@docker:swarm' saltutil.refresh_modules; sleep 5;
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' saltutil.refresh_modules; sleep 5;
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description:  Rerun swarm on slaves to proper token population
-  cmd: salt -C 'I@docker:swarm:role:master' state.sls docker.swarm
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description:  Configure slave nodes
-  cmd: salt -C 'I@docker:swarm:role:manager' state.sls docker.swarm -b 1
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:manager' state.sls docker.swarm -b 1
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description:  List registered Docker swarm nodes
-  cmd: salt -C 'I@docker:swarm:role:master' cmd.run 'docker node ls'
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' cmd.run 'docker node ls'
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 # Install slv2 infra
 - description: Install telegraf
-  cmd: salt -C 'I@telegraf:agent or I@telegraf:remote_agent' state.sls telegraf
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@telegraf:agent or I@telegraf:remote_agent' state.sls telegraf
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 2, delay: 10}
   skip_fail: false
 
 - description: Configure Prometheus exporters, if pillar 'prometheus:exporters' exists on any server
   cmd: |
-    if salt -C 'I@prometheus:exporters' match.pillar 'prometheus:exporters' ; then
-      salt -C 'I@prometheus:exporters' state.sls prometheus
+    if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' match.pillar 'prometheus:exporters' ; then
+      salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' state.sls prometheus
     fi
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: Configure collector
-  cmd: salt -C 'I@heka:log_collector' state.sls heka.log_collector
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:log_collector' state.sls heka.log_collector
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: Install elasticsearch server
-  cmd: salt -C 'I@elasticsearch:server' state.sls elasticsearch.server -b 1
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:server' state.sls elasticsearch.server -b 1
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: Install kibana server
-  cmd: salt -C 'I@kibana:server' state.sls kibana.server -b 1
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:server' state.sls kibana.server -b 1
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: Install elasticsearch client
-  cmd: salt -C 'I@elasticsearch:client' state.sls elasticsearch.client
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:client' state.sls elasticsearch.client
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: Install kibana client
-  cmd: salt -C 'I@kibana:client' state.sls kibana.client
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:client' state.sls kibana.client
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: Check influix db
   cmd: |
-    INFLUXDB_SERVICE=`salt -C 'I@influxdb:server' test.ping 1>/dev/null 2>&1 && echo true`;
+    INFLUXDB_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' test.ping 1>/dev/null 2>&1 && echo true`;
     echo "Influxdb service presence: ${INFLUXDB_SERVICE}";
     if [[ "$INFLUXDB_SERVICE" == "true" ]]; then
-        salt -C 'I@influxdb:server' state.sls influxdb
+        salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' state.sls influxdb
     fi
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
@@ -132,44 +132,44 @@
 # Collect grains needed to configure the services
 
 - description: Get grains
-  cmd: salt -C 'I@salt:minion' state.sls salt.minion.grains
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' state.sls salt.minion.grains
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: Sync modules
-  cmd: salt -C 'I@salt:minion' saltutil.refresh_modules
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' saltutil.refresh_modules
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: Update mine
-  cmd: salt -C 'I@salt:minion' mine.update; sleep 5;
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' mine.update; sleep 5;
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 3, delay: 10}
   skip_fail: false
 
 # Configure the services running in Docker Swarm
 - description: Install prometheus alertmanager
-  cmd: salt -C 'I@docker:swarm' state.sls prometheus,heka.remote_collector -b 1
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls prometheus,heka.remote_collector -b 1
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: run docker state
-  cmd: salt -C 'I@docker:swarm:role:master' state.sls docker
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: docker ps
-  cmd: salt -C 'I@docker:swarm' dockerng.ps
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' dockerng.ps
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: Configure Grafana dashboards and datasources
-  cmd: sleep 60;  salt -C 'I@grafana:client' state.sls grafana.client
+  cmd: sleep 60;  salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@grafana:client' state.sls grafana.client
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 2, delay: 10}
   skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp11-k8s-contrail/underlay--user-data-cfg01.yaml b/tcp_tests/templates/virtual-mcp11-k8s-contrail/underlay--user-data-cfg01.yaml
index 84799cf..4f140a0 100644
--- a/tcp_tests/templates/virtual-mcp11-k8s-contrail/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/virtual-mcp11-k8s-contrail/underlay--user-data-cfg01.yaml
@@ -18,8 +18,6 @@
    expire: False
 
   bootcmd:
-   # Block access to SSH while node is preparing
-   - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
    # Enable root access
    - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
    - service sshd restart
@@ -48,34 +46,7 @@
    - swapon /swapfile
    - echo "/swapfile   none    swap    defaults    0   0" >> /etc/fstab
 
-   ############## TCP Cloud cfg01 node ##################
-   #- sleep 120
-   - echo "Preparing base OS"
-
    - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
-   - echo "nameserver 8.8.8.8" >> /etc/resolv.conf;
-   - which wget >/dev/null || (apt-get update; apt-get install -y wget);
-
-   # Configure Ubuntu mirrors
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
-
-   - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
-   - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
-   - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;
-   - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;
-
-   - eatmydata apt-get clean && eatmydata apt-get update
-
-   # Install common packages
-   - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree
-
-   ########################################################
-   # Node is ready, allow SSH access
-   - echo "Allow SSH access ..."
-   - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
-   ########################################################
 
   write_files:
    - path: /etc/network/interfaces
diff --git a/tcp_tests/templates/virtual-mcp11-k8s-contrail/underlay--user-data1404.yaml b/tcp_tests/templates/virtual-mcp11-k8s-contrail/underlay--user-data1404.yaml
index 9593ddf..2a41ee3 100644
--- a/tcp_tests/templates/virtual-mcp11-k8s-contrail/underlay--user-data1404.yaml
+++ b/tcp_tests/templates/virtual-mcp11-k8s-contrail/underlay--user-data1404.yaml
@@ -18,8 +18,6 @@
    expire: False
 
   bootcmd:
-   # Block access to SSH while node is preparing
-   - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
    # Enable root access
    - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
    - service sshd restart
@@ -44,37 +42,6 @@
    - swapon /swapfile
    - echo "/swapfile   none    swap    defaults    0   0" >> /etc/fstab
 
-   ############## tcp cloud cfg01 node ##################
-   #- sleep 120
-   - echo "Preparing base OS"
-   - which wget >/dev/null || (apt-get update; apt-get install -y wget)
-
-   # Configure Ubuntu mirrors
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ trusty main restricted universe" > /etc/apt/sources.list
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ trusty-updates main restricted universe" >> /etc/apt/sources.list
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ trusty-security main restricted universe" >> /etc/apt/sources.list
-
-   - echo "deb [arch=amd64] http://apt.mirantis.com/trusty {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list
-   - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -
-   - echo "deb http://repo.saltstack.com/apt/ubuntu/14.04/amd64/2016.3 trusty main" > /etc/apt/sources.list.d/saltstack.list
-   - wget -O - https://repo.saltstack.com/apt/ubuntu/14.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -
-
-   - eatmydata apt-get clean
-   - eatmydata apt-get update && eatmydata apt-get -y upgrade
-
-   # install common packages
-   - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc
-
-   # Workaround for bug https://mirantis.jira.com/browse/PROD-8214
-   - eatmydata apt-get -y install --install-recommends linux-generic-lts-xenial
-   - reboot
-
-   ########################################################
-   # Node is ready, allow SSH access
-   #- echo "Allow SSH access ..."
-   #- sudo iptables -D INPUT -p tcp --dport 22 -j DROP
-   ########################################################
-
   write_files:
    - path: /etc/network/interfaces
      content: |
diff --git a/tcp_tests/templates/virtual-mcp11-k8s-contrail/underlay--user-data1604.yaml b/tcp_tests/templates/virtual-mcp11-k8s-contrail/underlay--user-data1604.yaml
index 974682c..5fc02ce 100644
--- a/tcp_tests/templates/virtual-mcp11-k8s-contrail/underlay--user-data1604.yaml
+++ b/tcp_tests/templates/virtual-mcp11-k8s-contrail/underlay--user-data1604.yaml
@@ -18,8 +18,6 @@
    expire: False
 
   bootcmd:
-   # Block access to SSH while node is preparing
-   - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
    # Enable root access
    - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
    - service sshd restart
@@ -46,33 +44,6 @@
    - swapon /swapfile
    - echo "/swapfile   none    swap    defaults   0   0" >> /etc/fstab
 
-   ############## TCP Cloud cfg01 node ##################
-   #- sleep 120
-   - echo "Preparing base OS"
-   - which wget >/dev/null || (apt-get update; apt-get install -y wget)
-
-   # Configure Ubuntu mirrors
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
-
-   - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
-   - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
-   - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;
-   - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;
-
-   - apt-get clean
-   - eatmydata apt-get update && apt-get -y upgrade
-
-   # install common packages
-   - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc
-
-   ########################################################
-   # Node is ready, allow SSH access
-   - echo "Allow SSH access ..."
-   - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
-   ########################################################
-
   write_files:
    - path: /etc/network/interfaces
      content: |
diff --git a/tcp_tests/templates/virtual-mcp11-ovs-dpdk/openstack.yaml b/tcp_tests/templates/virtual-mcp11-ovs-dpdk/openstack.yaml
index 0c93c11..afe3b21 100644
--- a/tcp_tests/templates/virtual-mcp11-ovs-dpdk/openstack.yaml
+++ b/tcp_tests/templates/virtual-mcp11-ovs-dpdk/openstack.yaml
@@ -164,7 +164,7 @@
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
-  skip_fail: true
+  skip_fail: false
 
 - description: Check IP on computes
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
diff --git a/tcp_tests/templates/virtual-mcp11-ovs/openstack.yaml b/tcp_tests/templates/virtual-mcp11-ovs/openstack.yaml
index 6ab7940..f9dca1d 100644
--- a/tcp_tests/templates/virtual-mcp11-ovs/openstack.yaml
+++ b/tcp_tests/templates/virtual-mcp11-ovs/openstack.yaml
@@ -165,7 +165,7 @@
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
-  skip_fail: true
+  skip_fail: false
 
 - description: Check IP on computes
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
diff --git a/tcp_tests/templates/virtual-offline-pike-ovs/openstack.yaml b/tcp_tests/templates/virtual-offline-pike-ovs/openstack.yaml
index d10e308..44024a9 100644
--- a/tcp_tests/templates/virtual-offline-pike-ovs/openstack.yaml
+++ b/tcp_tests/templates/virtual-offline-pike-ovs/openstack.yaml
@@ -164,7 +164,7 @@
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
-  skip_fail: true
+  skip_fail: false
 
 - description: Check IP on computes
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
diff --git a/tcp_tests/templates/virtual-pike-ovs-dpdk/openstack.yaml b/tcp_tests/templates/virtual-pike-ovs-dpdk/openstack.yaml
index 64371d7..2ed45be 100644
--- a/tcp_tests/templates/virtual-pike-ovs-dpdk/openstack.yaml
+++ b/tcp_tests/templates/virtual-pike-ovs-dpdk/openstack.yaml
@@ -164,7 +164,7 @@
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
-  skip_fail: true
+  skip_fail: false
 
 - description: Check IP on computes
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
diff --git a/tcp_tests/templates/virtual-pike-ovs-dpdk/salt.yaml b/tcp_tests/templates/virtual-pike-ovs-dpdk/salt.yaml
index 990136b..6cdf0de 100644
--- a/tcp_tests/templates/virtual-pike-ovs-dpdk/salt.yaml
+++ b/tcp_tests/templates/virtual-pike-ovs-dpdk/salt.yaml
@@ -19,6 +19,12 @@
 
 {{ SHARED.MACRO_GENERATE_INVENTORY() }}
 
+- description: Enable hugepages on cmp nodes
+  cmd: salt 'cmp*' cmd.run "apt-get install -y hugepages; echo 2048 > /proc/sys/vm/nr_hugepages";
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
 {{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
 
 - description: Hack gtw node
diff --git a/tcp_tests/templates/virtual-pike-ovs-dpdk/underlay--user-data-cfg01.yaml b/tcp_tests/templates/virtual-pike-ovs-dpdk/underlay--user-data-cfg01.yaml
index 6448211..da7908d 100644
--- a/tcp_tests/templates/virtual-pike-ovs-dpdk/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/virtual-pike-ovs-dpdk/underlay--user-data-cfg01.yaml
@@ -18,8 +18,6 @@
    expire: False
 
   bootcmd:
-   # Block access to SSH while node is preparing
-   - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
    # Enable root access
    - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
    - service sshd restart
@@ -44,35 +42,7 @@
    - swapon /swapfile
    - echo "/swapfile   none    swap    defaults    0   0" >> /etc/fstab
 
-   ############## TCP Cloud cfg01 node ##################
-   #- sleep 120
-   - echo "Preparing base OS"
-
    - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
-   - echo "nameserver 8.8.8.8" >> /etc/resolv.conf;
-   - which wget >/dev/null || (apt-get update; apt-get install -y wget);
-
-   # Configure Ubuntu mirrors
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
-
-   - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
-   - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
-   - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;
-   - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;
-
-   - apt-get clean
-   - apt-get update
-
-   # Install common packages
-   - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree
-
-   ########################################################
-   # Node is ready, allow SSH access
-   - echo "Allow SSH access ..."
-   - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
-   ########################################################
 
   write_files:
    - path: /etc/network/interfaces
diff --git a/tcp_tests/templates/virtual-pike-ovs-dpdk/underlay--user-data1604.yaml b/tcp_tests/templates/virtual-pike-ovs-dpdk/underlay--user-data1604.yaml
index b416c16..3fbb777 100644
--- a/tcp_tests/templates/virtual-pike-ovs-dpdk/underlay--user-data1604.yaml
+++ b/tcp_tests/templates/virtual-pike-ovs-dpdk/underlay--user-data1604.yaml
@@ -18,8 +18,6 @@
    expire: False
 
   bootcmd:
-   # Block access to SSH while node is preparing
-   - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
    # Enable root access
    - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
    - service sshd restart
@@ -44,37 +42,6 @@
    - swapon /swapfile
    - echo "/swapfile   none    swap    defaults   0   0" >> /etc/fstab
 
-
-   ############## TCP Cloud cfg01 node ##################
-   #- sleep 120
-   - echo "Preparing base OS"
-   - which wget >/dev/null || (apt-get update; apt-get install -y wget)
-
-   # Configure Ubuntu mirrors
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
-
-   - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
-   - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
-   - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;
-   - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;
-
-   - apt-get clean
-   - eatmydata apt-get update && apt-get -y upgrade
-
-   # Install common packages
-   - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc hugepages
-
-   # Enable on nodes hugepages
-   - echo 2048 > /proc/sys/vm/nr_hugepages
-
-   ########################################################
-   # Node is ready, allow SSH access
-   - echo "Allow SSH access ..."
-   - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
-   ########################################################
-
   write_files:
    - path: /etc/network/interfaces
      content: |
diff --git a/tcp_tests/tests/system/test_failover_ceph.py b/tcp_tests/tests/system/test_failover_ceph.py
new file mode 100644
index 0000000..4a68705
--- /dev/null
+++ b/tcp_tests/tests/system/test_failover_ceph.py
@@ -0,0 +1,244 @@
+#    Copyright 2017 Mirantis, Inc.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+import pytest
+
+from tcp_tests import logger
+
+LOG = logger.logger
+
+
+class TestFailoverCeph(object):
+    """Test class for testing MCP ceph failover"""
+
+    def get_ceph_health(self, underlay, node_names):
+        """Get ceph health on the specified nodes
+
+        Returns the dict {<node_name>: <str>, }
+        where <str> is the 'ceph -s' output
+        """
+        res = {
+            node_name: underlay.check_call("ceph -s",
+                                           node_name=node_name,
+                                           raise_on_err=False)['stdout_str']
+            for node_name in node_names
+        }
+        return res
+
+    def show_failed_msg(self, failed):
+        return "There are failed tempest tests:\n\n  {0}".format(
+            '\n\n  '.join([(name + ': ' + detail)
+                           for name, detail in failed.items()]))
+
+    @pytest.mark.grab_versions
+    @pytest.mark.fail_snapshot
+    def test_restart_osd_node(self, func_name, underlay, config,
+                              openstack_deployed, ceph_deployed,
+                              openstack_actions,
+                              rally, show_step):
+        """Test restart ceph osd node
+
+        Scenario:
+            1. Find ceph osd nodes
+            2. Check ceph health before restart
+            3. Restart 1 ceph osd node
+            4. Check ceph health after restart
+            5. Run tempest smoke after failover
+            6. Check tempest report for failed tests
+
+        Requiremets:
+            - Salt cluster
+            - OpenStack cluster
+            - Ceph cluster
+        """
+        openstack_actions._salt.local(
+            tgt='*', fun='cmd.run',
+            args='service ntp stop; ntpd -gq; service ntp start')
+        # STEP #1
+        show_step(1)
+        osd_node_names = underlay.get_target_node_names(
+            target='osd')
+
+        # STEP #2
+        show_step(2)
+        # Get the ceph health output before restart
+        health_before = self.get_ceph_health(underlay, osd_node_names)
+        assert all(["OK" in p for n, p in health_before.items()]), (
+            "'Ceph health is not ok from node: {0}".format(health_before))
+
+        # STEP #3
+        show_step(3)
+        openstack_actions.warm_restart_nodes('osd01')
+
+        openstack_actions._salt.local(
+            tgt='*', fun='cmd.run',
+            args='service ntp stop; ntpd -gq; service ntp start')
+
+        # STEP #4
+        show_step(4)
+        # Get the ceph health output after restart
+        health_after = self.get_ceph_health(underlay, osd_node_names)
+        assert all(["OK" in p for n, p in health_before.items()]), (
+            "'Ceph health is not ok from node: {0}".format(health_after))
+
+        rally.run_container()
+
+        # STEP #5
+        show_step(5)
+        results = rally.run_tempest(pattern='set=smoke',
+                                    conf_name='/var/lib/ceph_mcp.conf',
+                                    report_prefix=func_name,
+                                    designate_plugin=False,
+                                    timeout=1800)
+        # Step #6
+        show_step(6)
+        assert not results['fail'], self.show_failed_msg(results['fail'])
+
+        LOG.info("*************** DONE **************")
+
+    @pytest.mark.grab_versions
+    @pytest.mark.fail_snapshot
+    def test_restart_cmn_node(self, func_name, underlay, config,
+                              openstack_deployed, ceph_deployed,
+                              common_services_actions,
+                              salt_actions, openstack_actions,
+                              rally, show_step):
+        """Test restart ceph cmn node
+
+        Scenario:
+            1. Find ceph cmn nodes
+            2. Check ceph health before restart
+            3. Restart 1 ceph cmn node
+            4. Check ceph health after restart
+            5. Run tempest smoke after failover
+            6. Check tempest report for failed tests
+
+        Requiremets:
+            - Salt cluster
+            - OpenStack cluster
+            - Ceph cluster
+        """
+        openstack_actions._salt.local(
+            tgt='*', fun='cmd.run',
+            args='service ntp stop; ntpd -gq; service ntp start')
+        # STEP #1
+        show_step(1)
+        cmn_node_names = underlay.get_target_node_names(
+            target='cmn')
+
+        # STEP #2
+        show_step(2)
+        # Get the ceph health output before restart
+        health_before = self.get_ceph_health(underlay, cmn_node_names)
+        assert all(["OK" in p for n, p in health_before.items()]), (
+            "'Ceph health is not ok from node: {0}".format(health_before))
+
+        # STEP #3
+        show_step(3)
+        openstack_actions.warm_restart_nodes('cmn01')
+
+        openstack_actions._salt.local(
+            tgt='*', fun='cmd.run',
+            args='service ntp stop; ntpd -gq; service ntp start')
+
+        # STEP #4
+        show_step(4)
+        # Get the ceph health output after restart
+        health_after = self.get_ceph_health(underlay, cmn_node_names)
+        assert all(["OK" in p for n, p in health_before.items()]), (
+            "'Ceph health is not ok from node: {0}".format(health_after))
+
+        rally.run_container()
+
+        # STEP #5
+        show_step(5)
+        results = rally.run_tempest(pattern='set=smoke',
+                                    conf_name='/var/lib/ceph_mcp.conf',
+                                    report_prefix=func_name,
+                                    designate_plugin=False,
+                                    timeout=1800)
+        # Step #6
+        show_step(6)
+        assert not results['fail'], self.show_failed_msg(results['fail'])
+
+        LOG.info("*************** DONE **************")
+
+    @pytest.mark.grab_versions
+    @pytest.mark.fail_snapshot
+    def test_restart_rgw_node(self, func_name, underlay, config,
+                              openstack_deployed, ceph_deployed,
+                              common_services_actions,
+                              salt_actions, openstack_actions,
+                              rally, show_step):
+        """Test restart ceph rgw node
+
+        Scenario:
+            1. Find ceph rgw nodes
+            2. Check ceph health before restart
+            3. Restart 1 ceph rgw node
+            4. Check ceph health after restart
+            5. Run tempest smoke after failover
+            6. Check tempest report for failed tests
+
+        Requiremets:
+            - Salt cluster
+            - OpenStack cluster
+            - Ceph cluster
+        """
+        openstack_actions._salt.local(
+            tgt='*', fun='cmd.run',
+            args='service ntp stop; ntpd -gq; service ntp start')
+
+        # STEP #1
+        show_step(1)
+        rgw_node_names = underlay.get_target_node_names(
+            target='rgw')
+        if not rgw_node_names:
+            pytest.skip('Skip as there are not rgw nodes in deploy')
+
+        # STEP #2
+        show_step(2)
+        # Get the ceph health output before restart
+        health_before = self.get_ceph_health(underlay, rgw_node_names)
+        assert all(["OK" in p for n, p in health_before.items()]), (
+            "'Ceph health is not ok from node: {0}".format(health_before))
+
+        # STEP #3
+        show_step(3)
+        openstack_actions.warm_restart_nodes('rgw01')
+
+        openstack_actions._salt.local(
+            tgt='*', fun='cmd.run',
+            args='service ntp stop; ntpd -gq; service ntp start')
+
+        # STEP #4
+        show_step(4)
+        # Get the ceph health output after restart
+        health_after = self.get_ceph_health(underlay, rgw_node_names)
+        assert all(["OK" in p for n, p in health_before.items()]), (
+            "'Ceph health is not ok from node: {0}".format(health_after))
+
+        rally.run_container()
+
+        # STEP #5
+        show_step(5)
+        results = rally.run_tempest(pattern='set=smoke',
+                                    conf_name='/var/lib/ceph_mcp.conf',
+                                    designate_plugin=False,
+                                    report_prefix=func_name,
+                                    timeout=1800)
+        # Step #6
+        show_step(6)
+        assert not results['fail'], self.show_failed_msg(results['fail'])
+
+        LOG.info("*************** DONE **************")
diff --git a/tcp_tests/tests/system/test_install_mcp11_ovs_newton.py b/tcp_tests/tests/system/test_install_mcp11_ovs_newton.py
index 9235edb..c6a1f8c 100644
--- a/tcp_tests/tests/system/test_install_mcp11_ovs_newton.py
+++ b/tcp_tests/tests/system/test_install_mcp11_ovs_newton.py
@@ -21,7 +21,7 @@
 
 
 @pytest.mark.deploy
-class TestMcp11Install(object):
+class TestMcp11NewtonInstall(object):
     """Test class for testing mcp11 vxlan deploy"""
 
     @pytest.mark.grab_versions
diff --git a/tcp_tests/tests/system/test_install_mcp_ovs_pike.py b/tcp_tests/tests/system/test_install_mcp_ovs_pike.py
index 299c7af..b1adedc 100644
--- a/tcp_tests/tests/system/test_install_mcp_ovs_pike.py
+++ b/tcp_tests/tests/system/test_install_mcp_ovs_pike.py
@@ -137,9 +137,10 @@
             '/root/stacklight-pytest/stacklight_tests/report.xml')
         LOG.info("*************** DONE **************")
 
+    @pytest.mark.grab_versions
     @pytest.mark.fail_snapshot
     def test_mcp11_pike_dpdk_install(self, underlay, openstack_deployed,
-                                     show_step):
+                                     show_step, openstack_actions):
         """Test for deploying an mcp dpdk environment and check it
         Scenario:
         1. Prepare salt on hosts
@@ -147,3 +148,11 @@
         3. Setup compute nodes
         """
         LOG.info("*************** DONE **************")
+        openstack_actions._salt.local(
+            tgt='*', fun='cmd.run',
+            args='service ntp stop; ntpd -gq; service ntp start')
+
+        if settings.RUN_TEMPEST:
+            openstack_actions.run_tempest(pattern=settings.PATTERN)
+            openstack_actions.download_tempest_report()
+        LOG.info("*************** DONE **************")
diff --git a/tcp_tests/tests/system/test_install_mcp_sl_os.py b/tcp_tests/tests/system/test_install_mcp_sl_os.py
new file mode 100644
index 0000000..04d769f
--- /dev/null
+++ b/tcp_tests/tests/system/test_install_mcp_sl_os.py
@@ -0,0 +1,79 @@
+#    Copyright 2018 Mirantis, Inc.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import pytest
+
+from tcp_tests import logger
+from tcp_tests import settings
+
+LOG = logger.logger
+
+
+@pytest.mark.deploy
+class TestMcpInstallStacklightOpenstack(object):
+    """Test class for testing mcp11 vxlan deploy"""
+
+    @pytest.mark.grab_versions
+    @pytest.mark.fail_snapshot
+    def test_mcp_os_install(self, underlay, openstack_deployed,
+                            openstack_actions):
+        """Test for deploying an mcp environment and check it
+        Scenario:
+        1. Prepare salt on hosts
+        2. Setup controller nodes
+        3. Setup compute nodes
+        4. Run tempest
+
+        """
+        openstack_actions._salt.local(
+                tgt='*', fun='cmd.run',
+                args='service ntp stop; ntpd -gq; service ntp start')
+
+        if settings.RUN_TEMPEST:
+            openstack_actions.run_tempest(pattern=settings.PATTERN)
+            openstack_actions.download_tempest_report()
+        LOG.info("*************** DONE **************")
+
+    @pytest.mark.grab_versions
+    @pytest.mark.fail_snapshot
+    def test_mcp_sl_os_install(self, underlay, config, openstack_deployed,
+                               sl_deployed):
+        """Test for deploying an mcp environment and check it
+        Scenario:
+        1. Prepare salt on hosts
+        2. Setup controller nodes
+        3. Setup compute nodes
+        4. Get monitoring nodes
+        5. Check that docker services are running
+        6. Check current prometheus targets are UP
+        7. Run SL component tests
+        8. Download SL component tests report
+        """
+        mon_nodes = sl_deployed.get_monitoring_nodes()
+        LOG.debug('Mon nodes list {0}'.format(mon_nodes))
+
+        sl_deployed.check_prometheus_targets(mon_nodes)
+
+        # Run SL component tetsts
+        sl_deployed.run_sl_functional_tests(
+            'cfg01',
+            '/root/stacklight-pytest/stacklight_tests/',
+            'tests/prometheus',
+            'test_alerts.py')
+
+        # Download report
+        sl_deployed.download_sl_test_report(
+            'cfg01',
+            '/root/stacklight-pytest/stacklight_tests/report.xml')
+        LOG.info("*************** DONE **************")
diff --git a/tcp_tests/tests/system/test_offline.py b/tcp_tests/tests/system/test_offline.py
new file mode 100644
index 0000000..e94188a
--- /dev/null
+++ b/tcp_tests/tests/system/test_offline.py
@@ -0,0 +1,154 @@
+#    Copyright 2017 Mirantis, Inc.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+# import pytest
+
+from tcp_tests import logger
+from tcp_tests.managers.jenkins.client import JenkinsClient
+
+LOG = logger.logger
+
+
+class TestOfflineDeployment(object):
+    """docstring for TestOfflineDeployment"""
+
+    def test_deploy_day1(self, show_step, underlay, common_services_deployed,
+                         salt_deployed):
+        """Test for deploying an mcp from day01 images
+
+        Scenario:
+            1. Approve local ssh key to jenkins
+            2. Boot CFG and APT virtual machines
+            3. Setup jq
+            4. Wait salt master
+            5. Addition config of MaaS
+            6. Wait dhcpd server
+            7. Start comissioning node via MaaS
+            8. Wait of comissioning node by MaaS
+            9. Start deploing node via MaaS
+            10. Wait of deploing node by
+            11. Accept all keys
+            12. Run deploy OS job
+
+        """
+        # group = hardware._get_default_node_group()
+        nodes = underlay.node_names()
+        LOG.info("Nodes - {}".format(nodes))
+        cfg_node = 'cfg01.offline-ocata-vxlan.local'
+        verbose = True
+
+        # show_step(1)
+        # cmd = ("mkdir -p /var/lib/jenkins/.ssh && "
+        #        "ssh-keyscan cfg01 > /var/lib/jenkins/.ssh/known_hosts && "
+        #        "chown jenkins /var/lib/jenkins/.ssh/known_hosts")
+        # underlay.check_call(
+        #     node_name=cfg_node, verbose=verbose,
+        #     cmd=cmd)
+
+        # show_step(2)
+        # underlay.check_call(node_name=cfg_node, verbose=verbose,
+        #                     cmd='salt-key')
+
+        # show_step(3)
+        # underlay.check_call(node_name=cfg_node, verbose=verbose,
+        #                     cmd='apt install -y jq')
+
+        show_step(4)
+        underlay.check_call(
+            node_name=cfg_node,
+            verbose=verbose,
+            cmd="""timeout 300s /bin/bash -c 'while ! salt-call test.ping; do echo "salt master still isnt running"; sleep 10; done'""")  # noqa
+
+        show_step(5)
+        underlay.check_call(node_name=cfg_node, verbose=verbose,
+                            cmd='salt-call saltutil.sync_all')
+        underlay.check_call(node_name=cfg_node, verbose=verbose,
+                            cmd='salt-call state.sls maas.region')
+        underlay.check_call(
+            node_name=cfg_node, verbose=verbose,
+            cmd='maas logout mirantis && '
+            'maas login mirantis '
+            'http://localhost/MAAS/api/2.0/ '
+            'FTvqwe7ybBp68gPar2:5mcctTAXVL8mns4ef4:zrA9LZwu2tMc8BAZpsPUfwWwTyQnAtDN') # noqa
+
+        underlay.check_call(
+            node_name=cfg_node, verbose=verbose,
+            cmd="maas mirantis ipranges create "
+            "type=dynamic start_ip=10.10.191.255 end_ip=10.10.255.254 "
+            "subnet=$(maas mirantis subnets read | jq '.[] | select(.name==\"10.10.0.0/16\") | .id')") # noqa
+        underlay.check_call(node_name=cfg_node, verbose=verbose,
+            cmd="maas mirantis vlan update "
+            "$(maas mirantis subnets read | jq '.[] | select(.name==\"10.10.0.0/16\") | .vlan.fabric_id') " # noqa
+            "0 dhcp_on=True primary_rack='cfg01'")
+
+        underlay.check_call(
+            node_name=cfg_node, verbose=verbose,
+            cmd="ssh-keygen -y -f ~root/.ssh/id_rsa > ~root/.ssh/id_rsa.pub")
+        underlay.check_call(
+            node_name=cfg_node, verbose=verbose,
+            cmd='maas mirantis sshkeys create '
+                'key="$(cat ~root/.ssh/id_rsa.pub)"')
+
+        show_step(6)
+        underlay.check_call(node_name=cfg_node, verbose=verbose,
+            cmd="""timeout 90s /bin/bash -c 'while ! pidof dhcpd; do  echo "dhcpd still isnt running"; sleep 10; done'""") # noqa
+
+        show_step(7)
+        underlay.check_call(node_name=cfg_node, verbose=verbose,
+                            cmd='salt-call state.sls maas.machines')
+        show_step(8)
+        cmd = """   timeout 600s bash -c 'hosts=$(maas mirantis nodes read | jq -r ".[] | select(.node_type_name==\\"Machine\\") | select(.status_name==\\"Ready\\") | .hostname "); while ! [ $(echo "$hosts" | wc -w) -eq 10 ]; do echo "Ready hosts:\n$hosts"; sleep 30; hosts=$(maas mirantis nodes read | jq -r ".[] | select(.node_type_name==\\"Machine\\") | select(.status_name==\\"Ready\\") | .hostname "); done '   """ # noqa
+        underlay.check_call(node_name=cfg_node, verbose=verbose, cmd=cmd)
+        underlay.check_call(node_name=cfg_node, verbose=verbose,
+                            cmd='salt-key')
+        show_step(9)
+        underlay.check_call(
+            node_name=cfg_node, verbose=verbose,
+            cmd='salt-call state.sls maas.machines.deploy')
+        show_step(10)
+        underlay.check_call(
+            node_name=cfg_node, verbose=verbose,
+            cmd='salt-call state.sls maas.machines.wait_for_deployed')
+        underlay.check_call(node_name=cfg_node, verbose=verbose,
+                            cmd='salt-key')
+
+        show_step(11)
+        underlay.check_call(
+            node_name=cfg_node, verbose=verbose, expected=[0, 1],
+            cmd='salt-key -A -y --include-denied --include-rejected')
+        underlay.check_call(
+            node_name=cfg_node, verbose=verbose,
+            cmd='salt-key')
+
+        salt_api = \
+            salt_deployed.get_pillar(cfg_node, '_param:jenkins_salt_api_url')
+        salt_api = salt_api[0].get(cfg_node)
+
+        show_step(12)
+        jenkins = JenkinsClient(
+            host='http://172.16.44.33:8081',
+            username='admin',
+            password='r00tme')
+        params = jenkins.make_defults_params('deploy_openstack')
+        params['SALT_MASTER_URL'] = salt_api
+        build = jenkins.run_build('deploy_openstack', params)
+
+        jenkins.wait_end_of_build(
+            name=build[0],
+            build_id=build[1],
+            timeout=60 * 60 * 2)
+
+        assert \
+            jenkins.build_info(
+                name=build[0], build_id=build[1])['result'] == 'SUCCESS', \
+            "Deploy openstack was failed"