Merge "Add template for env with mitaka on Ubuntu 14.04 (trusty)"
diff --git a/tcp_tests/helpers/utils.py b/tcp_tests/helpers/utils.py
index 15f9c8f..46bf9c8 100644
--- a/tcp_tests/helpers/utils.py
+++ b/tcp_tests/helpers/utils.py
@@ -18,6 +18,7 @@
 import StringIO
 import time
 import traceback
+import signal
 
 import jinja2
 import paramiko
@@ -444,3 +445,23 @@
               .format(top_fixtures_marks))
 
     return top_fixtures_marks
+
+
+class RunLimit(object):
+    def __init__(self, seconds=60, error_message='Timeout'):
+        self.seconds = seconds
+        self.error_message = error_message
+
+    def handle_timeout(self, signum, frame):
+        raise TimeoutException(self.error_message)
+
+    def __enter__(self):
+        signal.signal(signal.SIGALRM, self.handle_timeout)
+        signal.alarm(self.seconds)
+
+    def __exit__(self, exc_type, value, traceback):
+        signal.alarm(0)
+
+
+class TimeoutException(Exception):
+    pass
diff --git a/tcp_tests/managers/common_services_manager.py b/tcp_tests/managers/common_services_manager.py
index c62114d..4e1e34a 100644
--- a/tcp_tests/managers/common_services_manager.py
+++ b/tcp_tests/managers/common_services_manager.py
@@ -177,6 +177,7 @@
                             'keepalived:cluster:instance',
                             message)
 
+                    # keepalived 'priority' can be the same on multiple nodes
                     if any([priority == prio
                             for node, prio in vips[address]['nodes'].items()]):
                         message = (
@@ -188,10 +189,7 @@
                                                address,
                                                vips[address]['nodes'].keys())
                         )
-                        raise exceptions.SaltPillarError(
-                            minion_id,
-                            'keepalived:cluster:instance',
-                            message)
+                        LOG.warning("On {0}, {1}".format(minion_id, message))
 
                     # Add data to the vips
                     vips[address]['nodes'][minion_id] = priority
diff --git a/tcp_tests/managers/envmanager_devops.py b/tcp_tests/managers/envmanager_devops.py
index 80818cf..1e7f66c 100644
--- a/tcp_tests/managers/envmanager_devops.py
+++ b/tcp_tests/managers/envmanager_devops.py
@@ -16,16 +16,21 @@
 
 from devops import error
 from devops.helpers import helpers
+from devops.helpers.helpers import ssh_client
 from devops import models
 from django import db
 from oslo_config import cfg
 
+from paramiko.ssh_exception import (
+    AuthenticationException,
+    BadAuthenticationType)
+
+from tcp_tests.helpers import env_config
+from tcp_tests.helpers import exceptions
+from tcp_tests.helpers import ext
+from tcp_tests import logger
 from tcp_tests import settings
 from tcp_tests import settings_oslo
-from tcp_tests.helpers import env_config
-from tcp_tests.helpers import ext
-from tcp_tests.helpers import exceptions
-from tcp_tests import logger
 
 LOG = logger.logger
 
@@ -305,8 +310,29 @@
         for node in self.__env.get_nodes(role__in=underlay_node_roles):
             LOG.info("Waiting for SSH on node '{0}' / {1} ...".format(
                 node.name, self.node_ip(node)))
+
+            def _ssh_wait(host,
+                          port,
+                          username=settings.SSH_NODE_CREDENTIALS['login'],
+                          password=settings.SSH_NODE_CREDENTIALS['password'],
+                          timeout=0):
+                try:
+                    ssh = ssh_client.SSHClient(
+                        host=host, port=port,
+                        auth=ssh_client.SSHAuth(
+                            username=username,
+                            password=password))
+                except AuthenticationException:
+                    return True
+                except BadAuthenticationType:
+                    return True
+                except Exception:
+                    return False
+
+                return ssh.execute('echo ok')['exit_code'] == 0
+
             helpers.wait(
-                lambda: helpers.tcp_ping(self.node_ip(node), 22),
+                lambda: _ssh_wait(self.node_ip(node), 22),
                 timeout=timeout,
                 timeout_msg="Node '{}' didn't open SSH in {} sec".format(
                     node.name, timeout
diff --git a/tcp_tests/managers/jenkins/client.py b/tcp_tests/managers/jenkins/client.py
index f781305..474713c 100644
--- a/tcp_tests/managers/jenkins/client.py
+++ b/tcp_tests/managers/jenkins/client.py
@@ -63,3 +63,6 @@
             timeout=timeout,
             timeout_msg='Timeout waiting, job {0} are not finished "{1}" build'
                         ' still'.format(name, build_id))
+
+    def get_build_output(self, name, build_id):
+        return self.__client.get_build_console_output(name, build_id)
diff --git a/tcp_tests/managers/openstack_manager.py b/tcp_tests/managers/openstack_manager.py
index 8f6b140..feb270b 100644
--- a/tcp_tests/managers/openstack_manager.py
+++ b/tcp_tests/managers/openstack_manager.py
@@ -95,19 +95,23 @@
             self,
             target='gtw01', pattern=None,
             conf_name='lvm_mcp.conf',
-            registry=None):
+            registry=None, node_name=None):
         if not registry:
             registry = ('{0}/{1}'.format(settings.DOCKER_REGISTRY,
                                          settings.DOCKER_NAME))
-        target_name = [node_name for node_name
-                       in self.__underlay.node_names() if target in node_name]
+        if node_name is None and target is not None:
+            target_name = next(
+                name for name in self.__underlay.node_names()
+                if target in name)
+        else:
+            target_name = node_name
 
         cmd = ("apt-get -y install docker.io")
-        with self.__underlay.remote(node_name=target_name[0]) as node_remote:
+        with self.__underlay.remote(node_name=target_name) as node_remote:
             result = node_remote.execute(cmd, verbose=True)
 
         cmd_iptables = "iptables --policy FORWARD ACCEPT"
-        with self.__underlay.remote(node_name=target_name[0]) as node_remote:
+        with self.__underlay.remote(node_name=target_name) as node_remote:
             result = node_remote.execute(cmd_iptables, verbose=True)
 
         with self.__underlay.remote(
diff --git a/tcp_tests/managers/saltmanager.py b/tcp_tests/managers/saltmanager.py
index 06e7d0b..2b06dc3 100644
--- a/tcp_tests/managers/saltmanager.py
+++ b/tcp_tests/managers/saltmanager.py
@@ -198,10 +198,20 @@
                 'password': settings.SSH_NODE_CREDENTIALS['password']
             }
 
-        return [
-            host(k, next(i for i in v['ipv4'] if i in pool_net))
-            for k, v in hosts.items()
-            if next(i for i in v['ipv4'] if i in pool_net)]
+        try:
+            ret = [
+                host(k, next(i for i in v['ipv4'] if i in pool_net))
+                for k, v in hosts.items()
+                if next(i for i in v['ipv4'] if i in pool_net)]
+            LOG.debug("Fetched ssh data from salt grains - {}".format(ret))
+            return ret
+        except StopIteration:
+            msg = ("Can't match nodes ip address with network cidr\n"
+                   "Managment network - {net}\n"
+                   "Host with address - {host_list}".format(
+                       net=pool_net,
+                       host_list={k: v['ipv4'] for k, v in hosts.items()}))
+            raise StopIteration(msg)
 
     def service_status(self, tgt, service):
         result = self.local(tgt=tgt, fun='service.status', args=service)
diff --git a/tcp_tests/managers/sl_manager.py b/tcp_tests/managers/sl_manager.py
index 364f219..ba45e16 100644
--- a/tcp_tests/managers/sl_manager.py
+++ b/tcp_tests/managers/sl_manager.py
@@ -97,17 +97,25 @@
         return service_stat_dict
 
     def run_sl_functional_tests(self, node_to_run, tests_path,
-                                test_to_run, skip_tests):
+                                test_to_run, skip_tests,
+                                reruns=5, reruns_delay=60):
         target_node_name = [node_name for node_name
                             in self.__underlay.node_names()
                             if node_to_run in node_name]
         cmd = (". venv-stacklight-pytest/bin/activate;"
-               "cd {0}; "
+               "cd {tests_path}; "
                "export VOLUME_STATUS='available';"
-               "pytest -k {1} {2}".format(
-                   tests_path,
-                   "'not " + skip_tests + "'" if skip_tests else '',
-                   test_to_run))
+               "pytest {reruns} {reruns_delay} "
+               "-k {skip_tests} {test_to_run}".format(**{
+                   "tests_path": tests_path,
+                   "skip_tests": ("'not " + skip_tests + "'"
+                                  if skip_tests else ''),
+                   "test_to_run": test_to_run,
+                   "reruns": ("--reruns {}".format(reruns)
+                              if reruns > 1 else ""),
+                   "reruns_delay": ("--reruns-delay {}".format(reruns_delay)
+                                    if reruns_delay > 0 else ""),
+                   }))
 
         with self.__underlay.remote(node_name=target_node_name[0]) \
                 as node_remote:
@@ -118,18 +126,25 @@
         return result
 
     def run_sl_tests_json(self, node_to_run, tests_path,
-                          test_to_run, skip_tests):
+                          test_to_run, skip_tests, reruns=5, reruns_delay=60):
         target_node_name = [node_name for node_name
                             in self.__underlay.node_names()
                             if node_to_run in node_name]
         cmd = (". venv-stacklight-pytest/bin/activate;"
-               "cd {0}; "
+               "cd {tests_path}; "
                "export VOLUME_STATUS='available';"
                "pip install pytest-json;"
-               "pytest --json=report.json -k {1} {2}".format(
-                   tests_path,
-                   "'not " + skip_tests + "'" if skip_tests else '',
-                   test_to_run))
+               "pytest --json=report.json {reruns} {reruns_delay} "
+               "-k {skip_tests} {test_to_run}".format(**{
+                   "tests_path": tests_path,
+                   "skip_tests": ("'not " + skip_tests + "'"
+                                  if skip_tests else ''),
+                   "test_to_run": test_to_run,
+                   "reruns": ("--reruns {}".format(reruns)
+                              if reruns > 1 else ""),
+                   "reruns_delay": ("--reruns-delay {}".format(reruns_delay)
+                                    if reruns_delay > 0 else ""),
+                   }))
 
         with self.__underlay.remote(node_name=target_node_name[0]) \
                 as node_remote:
diff --git a/tcp_tests/managers/underlay_ssh_manager.py b/tcp_tests/managers/underlay_ssh_manager.py
index fce80cc..2c9ed55 100644
--- a/tcp_tests/managers/underlay_ssh_manager.py
+++ b/tcp_tests/managers/underlay_ssh_manager.py
@@ -175,6 +175,7 @@
                     else:
                         ssh_data = ssh
         if ssh_data is None:
+            LOG.debug("config_ssh - {}".format(self.config_ssh))
             raise Exception('Auth data for node was not found using '
                             'node_name="{}" , host="{}" , address_pool="{}"'
                             .format(node_name, host, address_pool))
diff --git a/tcp_tests/report.py b/tcp_tests/report.py
index 46f5995..03530d6 100644
--- a/tcp_tests/report.py
+++ b/tcp_tests/report.py
@@ -88,12 +88,15 @@
     LOG.info("Get runs from plan - {}".format(plan_name))
     ret = []
     plan = t_client.plan(plan_name)
-    for e in plan.entries:
-        for r in e.runs:
-            LOG.info("Run {} #{}".format(r.name, r.id))
-            if run_name is not None and r.name != run_name:
-                continue
-            ret.append(r)
+    if plan:
+        for e in plan.entries:
+            for r in e.runs:
+                LOG.info("Run {} #{}".format(r.name, r.id))
+                if run_name is not None and r.name != run_name:
+                    continue
+                ret.append(r)
+    else:
+        LOG.warning("Plan {} is empty".format(plan_name))
     return ret
 
 
@@ -110,14 +113,11 @@
 
 
 def get_results(t_client, run):
-    _statuses = ('product_failed', 'failed',
-                 'prodfailed', 'blocked')
     LOG.info("Get results for run - {}".format(run.name))
     results = t_client.results(run)
     ret = [(run.id, r) for r in results
            if r.raw_data()['status_id'] is not None and
-           r.raw_data()['defects'] is not None and
-           r.status.name.lower() in _statuses]
+           r.raw_data()['defects'] is not None]
     for r in ret:
         run_id, result = r
         test = fetch_test(result.api, result.raw_data()['test_id'], run_id)
@@ -321,14 +321,15 @@
                date=datetime.datetime.now().strftime("%a %b %d %H:%M:%S %Y"),
                table=get_md_table(table))
     plan = t_client.plan(plan_name)
-    plan.description = text
-    plan.api._post(
-        'update_plan/{}'.format(plan.id),
-        {
-            'name': plan.name,
-            'description': plan.description,
-            'milestone_id': plan.milestone.id
-        })
+    if plan:
+        plan.description = text
+        plan.api._post(
+            'update_plan/{}'.format(plan.id),
+            {
+                'name': plan.name,
+                'description': plan.description,
+                'milestone_id': plan.milestone.id
+            })
 
 
 def create_report(**kwargs):
diff --git a/tcp_tests/requirements.txt b/tcp_tests/requirements.txt
index d419b33..86ef693 100644
--- a/tcp_tests/requirements.txt
+++ b/tcp_tests/requirements.txt
@@ -1,6 +1,6 @@
 # git+git://github.com/openstack/fuel-devops.git@887368d#egg=project[postgre]   # Use this requirement for PostgreSQL
 libvirt-python>=3.5.0,<4.1.0  # LGPLv2+
-git+git://github.com/openstack/fuel-devops.git@b8c6fe45a12b091619ba43cc14ab6cf05f0cd8f0   # Use this requirement for Sqlite3, or if requirements for PostgreSQL are already installed
+git+git://github.com/openstack/fuel-devops.git@25d4cc67315132b1b27131977b2e07029b3ffbe1   # Use this requirement for Sqlite3, or if requirements for PostgreSQL are already installed
 git+git://github.com/dis-xcom/fuel-devops-driver-ironic
 paramiko
 six
@@ -13,7 +13,7 @@
 junit-xml
 jinja2>=2.1
 jira
-testrail
+testrail<=0.3.8
 functools32
 python-k8sclient==0.4.0
 salt-pepper<=0.5.3
diff --git a/tcp_tests/settings_oslo.py b/tcp_tests/settings_oslo.py
index 25abbc9..c6a4201 100644
--- a/tcp_tests/settings_oslo.py
+++ b/tcp_tests/settings_oslo.py
@@ -293,24 +293,25 @@
            default='{}/mirantis/kubernetes/hyperkube-amd64:v1.8.11-9'.format(
                settings.DOCKER_REGISTRY)),
     ct.Cfg('kubernetes_calico_image', ct.String(),
-           default='{}/mirantis/projectcalico/calico/node:latest'.format(
+           default='{}/mirantis/projectcalico/calico/node:v2.6.9'.format(
                settings.DOCKER_REGISTRY)),
-    ct.Cfg('kubernetes_calicoctl_image', ct.String(),
-           default='{}/mirantis/projectcalico/calico/ctl:latest'.format(
+    ct.Cfg('kubernetes_calico_calicoctl_image', ct.String(),
+           default='{}/mirantis/projectcalico/calico/ctl:v1.6.4'.format(
                settings.DOCKER_REGISTRY)),
     ct.Cfg('kubernetes_calico_cni_image', ct.String(),
-           default='{}/mirantis/projectcalico/calico/cni:latest'.format(
+           default='{}/mirantis/projectcalico/calico/cni:v1.11.5'.format(
                settings.DOCKER_REGISTRY)),
     ct.Cfg('kubernetes_netchecker_enabled', ct.Boolean(),
            help="", default=True),
     ct.Cfg('kubernetes_netchecker_agent_image', ct.String(),
-           default='mirantis/k8s-netchecker-agent:latest'),
+           default='mirantis/k8s-netchecker-agent:v1.2.2'),
     ct.Cfg('kubernetes_netchecker_server_image', ct.String(),
-           default='mirantis/k8s-netchecker-server:latest'),
+           default='mirantis/k8s-netchecker-server:v1.2.2'),
     ct.Cfg('kubernetes_calico_policy_enabled', ct.Boolean(),
            help="", default=False),
     ct.Cfg('kubernetes_calico_policy_image', ct.String(),
-           default='calico/kube-policy-controller:v0.5.4'),
+           default='{}/mirantis/projectcalico/calico/kube-controllers:'
+                   'v1.0.4'.format(settings.DOCKER_REGISTRY)),
     ct.Cfg('kubernetes_helm_enabled', ct.Boolean(),
            help="", default=False),
     ct.Cfg('kubernetes_virtlet_enabled', ct.Boolean(),
diff --git a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt.yaml b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt.yaml
index bcf5dbb..019b590 100644
--- a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt.yaml
@@ -58,8 +58,6 @@
 
 {{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
 
-{{ SHARED.ADJUST_SL_OPTS(OVERRIDES_FILENAME='/srv/salt/reclass/classes/environment/' + ENVIRONMENT_MODEL_INVENTORY_NAME + '/overrides.yml') }}
-
 {{ SHARED.MACRO_GENERATE_INVENTORY() }}
 
 {{ SHARED.MACRO_NETWORKING_WORKAROUNDS() }}
diff --git a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/sl.yaml b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/sl.yaml
index 3c4ce05..69306b9 100644
--- a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/sl.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/sl.yaml
@@ -102,7 +102,7 @@
 - description: Install elasticsearch client
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:client' state.sls elasticsearch.client
   node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
+  retry: {count: 2, delay: 30}
   skip_fail: false
 
 - description: Install kibana client
diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/common-services.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/common-services.yaml
new file mode 100644
index 0000000..d99c834
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/common-services.yaml
@@ -0,0 +1,126 @@
+{% from 'cookied-bm-mcp-ocata-contrail-nfv/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-bm-mcp-ocata-contrail-nfv/underlay.yaml' import HOSTNAME_KVM01 with context %}
+{% from 'cookied-bm-mcp-ocata-contrail-nfv/underlay.yaml' import HOSTNAME_KVM02 with context %}
+{% from 'cookied-bm-mcp-ocata-contrail-nfv/underlay.yaml' import HOSTNAME_KVM03 with context %}
+
+- description: Install glusterfs
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@glusterfs:server' state.sls glusterfs.server.service
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Setup glusterfs on primary controller
+  cmd: sleep 30; salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 30}
+  skip_fail: false
+
+- description: Check the gluster status
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+# Install support services
+- description: Install keepalived on ctl01
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keepalived:cluster and *01*' state.sls keepalived
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: true
+
+- description: Install keepalived
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keepalived:cluster' state.sls keepalived
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: true
+
+- description: Check the OpenStack control VIP
+  cmd: |
+    OPENSTACK_CONTROL_ADDRESS=$(salt --out=newline_values_only "ctl01*" pillar.get _param:cluster_vip_address);
+    echo "_param:cluster_vip_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
+    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Install RabbitMQ on ctl01
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Install RabbitMQ
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@rabbitmq:server' state.sls rabbitmq
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Check the rabbitmq status
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Install Galera on first server
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@galera:master' state.sls galera
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Install Galera on other servers
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@galera:slave' state.sls galera -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Check mysql status
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: true
+
+- description: Install haproxy
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@haproxy:proxy' state.sls haproxy
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Check haproxy status
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@haproxy:proxy' service.status haproxy
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Install nginx on prx nodes
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@nginx:server' state.sls nginx
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Restart rsyslog
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@haproxy:proxy' service.restart rsyslog
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Install memcached on all controllers
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@memcached:server' state.sls memcached
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/lab04-physical-inventory.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/lab04-physical-inventory.yaml
new file mode 100644
index 0000000..e66753d
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/lab04-physical-inventory.yaml
@@ -0,0 +1,81 @@
+nodes:

+    cfg01.contrail-nfv.local:

+      reclass_storage_name: infra_config_node01

+      roles:

+      - infra_config

+      - linux_system_codename_xenial

+      interfaces:

+        ens3:

+          role: single_dhcp

+    # Physical nodes

+

+    kvm01.contrail-nfv.local:

+      reclass_storage_name: infra_kvm_node01

+      roles:

+      - infra_kvm

+      - linux_system_codename_xenial

+      interfaces:

+        enp9s0f0:

+          role: single_mgm

+        enp9s0f1:

+          role: bond0_ab_ovs_vlan_ctl

+

+    kvm02.contrail-nfv.local:

+      reclass_storage_name: infra_kvm_node02

+      roles:

+      - infra_kvm

+      - linux_system_codename_xenial

+      interfaces:

+        enp9s0f0:

+          role: single_mgm

+        enp9s0f1:

+          role: bond0_ab_ovs_vlan_ctl

+

+    kvm03.contrail-nfv.local:

+      reclass_storage_name: infra_kvm_node03

+      roles:

+      - infra_kvm

+      - linux_system_codename_xenial

+      interfaces:

+        enp9s0f0:

+          role: single_mgm

+        enp9s0f1:

+          role: bond0_ab_ovs_vlan_ctl

+

+    cmp001.contrail-nfv.local:

+      reclass_storage_name: openstack_compute_node01

+      roles:

+      - openstack_compute_dpdk

+      - features_lvm_backend

+      - linux_system_codename_xenial

+      interfaces:

+        enp2s0f0:

+          role: single_vlan_ctl

+          single_address: 10.167.8.101

+        enp2s0f1:

+          role: single_mgm

+          deploy_address: 172.16.49.73

+        enp5s0f0:

+          role: single_contrail_dpdk_prv

+          tenant_address: 192.168.0.101

+          dpdk_pci: "'0000:05:00.0'"

+          dpdk_mac: '90:e2:ba:19:c2:18'

+

+    cmp002.contrail-nfv.local:

+      reclass_storage_name: openstack_compute_node02

+      roles:

+      - openstack_compute_dpdk

+      - features_lvm_backend

+      - linux_system_codename_xenial

+      interfaces:

+        enp2s0f0:

+          role: single_vlan_ctl

+          single_address: 10.167.8.102

+        enp2s0f1:

+          role: single_mgm

+          deploy_address: 172.16.49.74

+        enp5s0f0:

+          role: single_contrail_dpdk_prv

+          tenant_address: 192.168.0.102

+          dpdk_pci: "'0000:05:00.0'"

+          dpdk_mac: '00:1b:21:87:21:98'

diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/openstack.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/openstack.yaml
new file mode 100644
index 0000000..a297622
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/openstack.yaml
@@ -0,0 +1,305 @@
+{% from 'cookied-bm-mcp-ocata-contrail-nfv/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-bm-mcp-ocata-contrail-nfv/underlay.yaml' import HOSTNAME_GTW01 with context %}
+{% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
+{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
+{% set PATTERN = os_env('PATTERN', 'false') %}
+{% set RUN_TEMPEST = os_env('RUN_TEMPEST', 'false') %}
+# Install OpenStack control services
+
+- description: Install glance on all controllers
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+     -C 'I@glance:server' state.sls glance -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Install keystone service (note that different fernet keys are created on different nodes)
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keystone:server' state.sls keystone.server -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 15}
+  skip_fail: false
+
+- description: Restart apache due to PROD-10477
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' cmd.run "systemctl restart apache2"
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 15}
+  skip_fail: false
+
+- description: Check apache status to PROD-10477
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' cmd.run "systemctl status apache2"
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 15}
+  skip_fail: false
+
+- description: Mount glusterfs.client volumes (resuires created 'keystone' and 'glusterfs' system users)
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@glance:server' state.sls glusterfs.client
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Update fernet keys for keystone server on the mounted glusterfs volume
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keystone:server' state.sls keystone.server -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Populate keystone services/tenants/admins
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keystone:client' state.sls keystone.client
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 5}
+  skip_fail: false
+
+- description: Check keystone service-list
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keystone:server' cmd.run '. /root/keystonercv3; openstack service list'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Check glance image-list
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keystone:server' cmd.run '. /root/keystonerc; glance image-list'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Install nova on all controllers
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@nova:controller' state.sls nova -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 5}
+  skip_fail: false
+
+- description: Check nova service-list
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keystone:server' cmd.run '. /root/keystonerc; nova --debug service-list'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 3, delay: 5}
+  skip_fail: false
+
+- description: Install cinder
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@cinder:controller' state.sls cinder -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Install cinder volume
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@cinder:volume' state.sls cinder
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Check cinder list
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keystone:server' cmd.run '. /root/keystonerc; cinder list'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Install neutron service
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@neutron:server' state.sls neutron -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: true
+
+# install contrail
+- description: Install Opencontrail db on ctl01
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@opencontrail:database and *01*' state.sls opencontrail.database
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 20}
+  skip_fail: false
+
+- description: Install Opencontrail db on all nodes
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@opencontrail:database' state.sls opencontrail.database
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 20}
+  skip_fail: false
+
+- description: Install Opencontrail control on ctl01
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@opencontrail:control and *01*' state.sls opencontrail exclude=opencontrail.client
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Install Opencontrail control on all nodes
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@opencontrail:control' state.sls opencontrail exclude=opencontrail.client
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Install Opencontrail on collector
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@opencontrail:collector' state.sls opencontrail exclude=opencontrail.client
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Workaround for https://mirantis.jira.com/browse/PROD-12798
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@opencontrail:control' service.restart 'keepalived'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+# OpenContrail vrouters
+- description: Install Opencontrail client
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@opencontrail:database:id:1' state.sls 'opencontrail.client'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Install Opencontrail client on computes
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@opencontrail:compute' state.sls 'opencontrail.client'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 5}
+  skip_fail: false
+
+# The next four steps should be converted to one (state.sls opencontrail)  with skip_fail: false
+# It is related to bug with hugepages. So we need to add WR, then reboot
+# for only 1G hugepages were mounted. Then re-apply state
+- description: Install Opencontrail on computes
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@opencontrail:compute' state.sls 'opencontrail'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: true
+
+- description: Hack vrouter (Delete default moun point)
+  cmd: salt "cmp*" cmd.run "sed -i 's/exit 0//g' /etc/rc.local; echo 'umount /dev/hugepages; service supervisor-vrouter restart' >> /etc/rc.local; echo 'exit 0' >> /etc/rc.local"
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: true
+
+- description: Reboot computes
+  cmd: salt --timeout=600  "cmp*" system.reboot
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: true
+
+- description: Install Opencontrail on computes
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@opencontrail:compute' state.sls 'opencontrail'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 5}
+  skip_fail: false
+
+- description: Remove crashes files from /var/crashes/ while vrouter was crashed
+  cmd: salt  "cmp*" cmd.run "rm -rf /var/crashes/*"
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: true
+
+- description: Test Opencontrail
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@opencontrail:control' cmd.run 'contrail-status'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Install heat service
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@heat:server' state.sls heat -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Check heat service
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keystone:server' cmd.run '. /root/keystonercv3; openstack orchestration resource type list'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 5, delay: 10}
+  skip_fail: false
+
+- description: Deploy horizon dashboard
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@horizon:server' state.sls horizon
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: true
+
+- description: Deploy nginx proxy
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@nginx:server' state.sls nginx
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: true
+
+# Install compute node
+- description: Apply formulas for compute node
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: true
+
+- description: Re-apply(as in doc) formulas for compute node
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Check IP on computes
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
+    'ip a'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 10, delay: 30}
+  skip_fail: false
+
+  # Upload cirros image
+- description: Upload cirros image on ctl01
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    'wget http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-i386-disk.img'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 30}
+  skip_fail: false
+
+- description: Register image in glance
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; glance --timeout 120 image-create --name cirros --visibility public --disk-format qcow2 --container-format bare --progress < /root/cirros-0.3.4-i386-disk.img'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: sync time
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run
+    'service ntp stop; ntpd -gq;  service ntp start'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Install docker.io on ctl
+  cmd: salt "ctl01*" cmd.run 'apt-get install docker.io -y'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Enable forward policy
+  cmd: salt "ctl01*" cmd.run 'iptables --policy FORWARD ACCEPT'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Hack resolv.conf on VCP nodes for internal services access
+  cmd: |
+    salt --hard-crash --state-output=mixed --state-verbose=False -C '* and not kvm* and not cmp* and not gtw* and not cfg*' cmd.run "echo 'nameserver 172.18.208.44' > /etc/resolv.conf;"
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Reboot computes
+  cmd: salt --timeout=600  "cmp*" system.reboot
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: true
diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/salt-context-cookiecutter-contrail-dpdk.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/salt-context-cookiecutter-contrail-dpdk.yaml
new file mode 100644
index 0000000..954323c
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/salt-context-cookiecutter-contrail-dpdk.yaml
@@ -0,0 +1,200 @@
+default_context:
+  backup_private_key: |-
+    -----BEGIN RSA PRIVATE KEY-----
+    MIIEpAIBAAKCAQEA1Ne+K9yyvgc8Z0QO2qt5eacOQmtbwmChnptlG0e+JdIorz5l
+    ++AiduF5KrPt7rlx5Ow8XHiBUqcrAwFiVGzGYOv0YJhBgZzpwKPcSAP8jo56d7ZB
+    RNMZAB3kgjODvWa7DIp6M5cXZ2FmCeFW72HbaZd3B0lAUiqm9si9gZBkircaYdbu
+    DqeAngHiNVbWjxHh9hZ8cWidk++98GYH5HbLJ7L9l+2rFGyW1EwtiMb6SyipDAdu
+    szEuemU3coJebArzm3Is6lmatcVTe5M6DTgK7IP7cd2DqqYa5tYDN55SwAd93wUc
+    oyCbL5DmAGNcCMbFE/CGI0UYBrx7XXatgskj9wIDAQABAoIBAGv43fImTU51IUFJ
+    lzd60W7TPjqXd78Ngi+RqSLDrERHbngn8VhrBVDFZNAy4rq9vHdjF+PZBdHGF924
+    cAdf/urgB+KQmnqD/VjKR08JJq+yu5HLFSUy6XXTtD4Qn/4PBUiBXyiYtzisgjT0
+    6UMao1HXknxRvp1wIa8Deo8ljruG5n/1ZX7x7eqSud5xgKz4MlXVwxgoA373R0mg
+    m8S2p7b3wS2vhWpf3oU3Y47q1Gdd5aOYblqkw3yvkUvBoSW9iwoNc5vLB6hHHs36
+    gMO5bANLdhbrf2eRULUsbRwuewiHhu5GaWSF0/FKcf1V7OoBcaO767ZEpnhMtFag
+    Rb9TJtECgYEA87ojKcwmbUUBqu4MPNqc5kIYMn3yEV/q+8uMM8cK79iumG9htrcF
+    E8WhwZBBj7BUzO57LmsrADVu1FMpHFTq1kVpg7flldz59EcaUYL0dvGbRyygrVNO
+    7/rxWPZfaXgxJhyzp2gODFhH4SA+5cZALDCtmgArCquGhbh5xA70sO0CgYEA3496
+    b3KK1TAflUH/n3YEaXn+rp+tOLnLovOGPXlTzEvVWJCxoXcwpiV1fBuleENto9S/
+    90KQVhVh9XJFvSy+AiRlDNvKX3fEJ/x7YdDrWfoU2KiWpkydxrE1i6ZSRomPyFZO
+    Ik7eMXXEaYS99I8EPGo7w/i3m4J1DMP78Fbqn/MCgYANu+ZxW4Sq0aGnbSSZZURE
+    IWNVrQ0v93S5XZ336PZvz4j/I/+gmS2bEJ7t1mArZadRqPqeAgH5UAl/w3PkmdBl
+    5KFuf7VbPYmEP2F3SGbYMQMr/pLLUY94LG7fMXrs6Y3zdNiWzWFFRtA+GmAQ+Jvz
+    IYcWz32da074SA3sg188fQKBgQCCAE80S6lL+2YCR5S8R8adB2IAbb4vRGuUYIRp
+    bwo5vMddbxa8TDEwDIxbFUCNxLgXEvpmcIC6bki+kCrZrRD48e0JIy51gZHBpuKg
+    qPqTIgfJTY/9OIRvLFF02czyU8AWwYlCDhbLMC59JcHIWvodn7ENbq5mceBbAgSZ
+    aBGb3QKBgQCyzxau9wyntLPukNJMOSAPnvssBp3Op1mF+eOy9PnwqhlJMPeQnraO
+    qeM3zYUcIsYUbEBE394mmbH4JexaqWLCU4p7tcMK15ALWnlk6QZqBwZqSNYYDvXK
+    ahdK0isC35cOm+IFYBhLLJfjONZVzT1qj9cD+bxEIvSberA/urkLmw==
+    -----END RSA PRIVATE KEY-----
+  backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDU174r3LK+BzxnRA7aq3l5pw5Ca1vCYKGem2UbR74l0iivPmX74CJ24Xkqs+3uuXHk7DxceIFSpysDAWJUbMZg6/RgmEGBnOnAo9xIA/yOjnp3tkFE0xkAHeSCM4O9ZrsMinozlxdnYWYJ4VbvYdtpl3cHSUBSKqb2yL2BkGSKtxph1u4Op4CeAeI1VtaPEeH2FnxxaJ2T773wZgfkdssnsv2X7asUbJbUTC2IxvpLKKkMB26zMS56ZTdygl5sCvObcizqWZq1xVN7kzoNOArsg/tx3YOqphrm1gM3nlLAB33fBRyjIJsvkOYAY1wIxsUT8IYjRRgGvHtddq2CySP3
+  bmk_enabled: 'False'
+  ceph_enabled: 'False'
+  cicd_enabled: 'False'
+  cluster_domain: contrail-nfv.local
+  cluster_name: cookied-bm-mcp-ocata-contrail-nfv
+  compute_bond_mode: active-backup
+  compute_padding_with_zeros: 'True'
+  compute_primary_first_nic: enp5s0f0
+  compute_primary_second_nic: enp5s0f1
+  context_seed: 5Y5caLTPMlq2bA5VpY8E1vXDt4ajJ6t4pVtClPXn0WCGNtM7GHw4qLYZknH2R1pt
+  control_network_netmask: 255.255.255.0
+  control_network_subnet: 10.167.8.0/24
+  control_vlan: '2422'
+  cookiecutter_template_branch: ''
+  cookiecutter_template_credentials: gerrit
+  cookiecutter_template_url: https://gerrit.mcp.mirantis.net/mk/cookiecutter-templates.git
+  deploy_network_gateway: 172.16.49.126
+  deploy_network_netmask: 255.255.255.192
+  deploy_network_subnet: 172.16.49.64/26
+  deployment_type: physical
+  dns_server01: 172.18.176.6
+  dns_server02: 172.18.208.44
+  email_address: sgudz@mirantis.com
+  infra_bond_mode: active-backup
+  infra_deploy_nic: eth0
+  infra_kvm01_control_address: 10.167.8.241
+  infra_kvm01_deploy_address: 172.16.49.67
+  infra_kvm01_hostname: kvm01
+  infra_kvm02_control_address: 10.167.8.242
+  infra_kvm02_deploy_address: 172.16.49.68
+  infra_kvm02_hostname: kvm02
+  infra_kvm03_control_address: 10.167.8.243
+  infra_kvm03_deploy_address: 172.16.49.69
+  infra_kvm03_hostname: kvm03
+  infra_kvm_vip_address: 10.167.8.240
+  infra_primary_first_nic: eth1
+  infra_primary_second_nic: eth2
+  kubernetes_enabled: 'False'
+  local_repositories: 'False'
+  maas_deploy_address: 172.16.49.66
+  maas_hostname: cfg01
+  mcp_common_scripts_branch: ''
+  mcp_version: proposed
+  offline_deployment: 'False'
+  opencontrail_analytics_address: 10.167.8.30
+  opencontrail_analytics_hostname: nal
+  opencontrail_analytics_node01_address: 10.167.8.31
+  opencontrail_analytics_node01_hostname: nal01
+  opencontrail_analytics_node02_address: 10.167.8.32
+  opencontrail_analytics_node02_hostname: nal02
+  opencontrail_analytics_node03_address: 10.167.8.33
+  opencontrail_analytics_node03_hostname: nal03
+  opencontrail_compute_iface_mask: '24'
+  opencontrail_control_address: 10.167.8.20
+  opencontrail_control_hostname: ntw
+  opencontrail_control_node01_address: 10.167.8.21
+  opencontrail_control_node01_hostname: ntw01
+  opencontrail_control_node02_address: 10.167.8.22
+  opencontrail_control_node02_hostname: ntw02
+  opencontrail_control_node03_address: 10.167.8.23
+  opencontrail_control_node03_hostname: ntw03
+  opencontrail_enabled: 'True'
+  opencontrail_router01_address: 10.167.8.101
+  opencontrail_router01_hostname: rtr01
+  opencontrail_router02_address: 10.167.8.102
+  opencontrail_router02_hostname: rtr02
+  openstack_benchmark_node01_address: 10.167.8.95
+  openstack_benchmark_node01_hostname: bmk01
+  openstack_cluster_size: compact
+  openstack_compute_count: '2'
+  openstack_compute_rack01_hostname: cmp
+  openstack_compute_rack01_single_subnet: 10.167.8
+  openstack_compute_rack01_tenant_subnet: 192.168.0
+  openstack_control_address: 10.167.8.10
+  openstack_control_hostname: ctl
+  openstack_control_node01_address: 10.167.8.11
+  openstack_control_node01_hostname: ctl01
+  openstack_control_node02_address: 10.167.8.12
+  openstack_control_node02_hostname: ctl02
+  openstack_control_node03_address: 10.167.8.13
+  openstack_control_node03_hostname: ctl03
+  openstack_database_address: 10.167.8.50
+  openstack_database_hostname: dbs
+  openstack_database_node01_address: 10.167.8.51
+  openstack_database_node01_hostname: dbs01
+  openstack_database_node02_address: 10.167.8.52
+  openstack_database_node02_hostname: dbs02
+  openstack_database_node03_address: 10.167.8.53
+  openstack_database_node03_hostname: dbs03
+  openstack_enabled: 'True'
+  openstack_message_queue_address: 10.167.8.40
+  openstack_message_queue_hostname: msg
+  openstack_message_queue_node01_address: 10.167.8.41
+  openstack_message_queue_node01_hostname: msg01
+  openstack_message_queue_node02_address: 10.167.8.42
+  openstack_message_queue_node02_hostname: msg02
+  openstack_message_queue_node03_address: 10.167.8.43
+  openstack_message_queue_node03_hostname: msg03
+  openstack_network_engine: opencontrail
+  openstack_neutron_bgp_vpn: 'False'
+  openstack_neutron_bgp_vpn_driver: bagpipe
+  openstack_nfv_dpdk_enabled: 'True'
+  openstack_nfv_sriov_enabled: 'True'
+  openstack_nfv_sriov_network: physnet1
+  openstack_nfv_sriov_numvfs: '7'
+  openstack_nfv_sriov_pf_nic: enp5s0f1
+  openstack_nova_compute_hugepages_count: '16'
+  openstack_nova_compute_nfv_req_enabled: 'True'
+  openstack_nova_cpu_pinning: 6,7,8,9,10,11
+  openstack_proxy_address: 10.167.8.80
+  openstack_proxy_hostname: prx
+  openstack_proxy_node01_address: 10.167.8.81
+  openstack_proxy_node01_hostname: prx01
+  openstack_proxy_node02_address: 10.167.8.82
+  openstack_proxy_node02_hostname: prx02
+  openstack_upgrade_node01_address: 10.167.8.19
+  openstack_version: ocata
+  oss_enabled: 'False'
+  oss_node03_address: ${_param:stacklight_monitor_node03_address}
+  oss_notification_smtp_use_tls: 'False'
+  oss_pushkin_email_sender_password: password
+  oss_pushkin_smtp_host: 127.0.0.1
+  oss_pushkin_smtp_port: '587'
+  oss_webhook_app_id: '24'
+  oss_webhook_login_id: '13'
+  platform: openstack_enabled
+  public_host: ${_param:openstack_proxy_address}
+  publication_method: email
+  reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
+  salt_api_password: 3SjmVdpfyQfhBVhaTJ3t39EWxBWeUzMr
+  salt_api_password_hash: $6$XLFCxibF$HqQC55s/Hl78vPrrpM8KJOfjXboakdS6ctgsEhO/DVWCN3ecxrg/TaLh0l2ieS6ukdBDurskX73FOIqz2Fs53/
+  salt_master_address: 172.16.49.66
+  salt_master_hostname: cfg01
+  salt_master_management_address: 172.16.49.66
+  shared_reclass_branch: ''
+  shared_reclass_url: https://gerrit.mcp.mirantis.net/salt-models/reclass-system.git
+  stacklight_enabled: 'True'
+  stacklight_log_address: 10.167.8.60
+  stacklight_log_hostname: log
+  stacklight_log_node01_address: 10.167.8.61
+  stacklight_log_node01_hostname: log01
+  stacklight_log_node02_address: 10.167.8.62
+  stacklight_log_node02_hostname: log02
+  stacklight_log_node03_address: 10.167.8.63
+  stacklight_log_node03_hostname: log03
+  stacklight_long_term_storage_type: influxdb
+  stacklight_monitor_address: 10.167.8.70
+  stacklight_monitor_hostname: mon
+  stacklight_monitor_node01_address: 10.167.8.71
+  stacklight_monitor_node01_hostname: mon01
+  stacklight_monitor_node02_address: 10.167.8.72
+  stacklight_monitor_node02_hostname: mon02
+  stacklight_monitor_node03_address: 10.167.8.73
+  stacklight_monitor_node03_hostname: mon03
+  stacklight_telemetry_address: 10.167.8.85
+  stacklight_telemetry_hostname: mtr
+  stacklight_telemetry_node01_address: 10.167.8.86
+  stacklight_telemetry_node01_hostname: mtr01
+  stacklight_telemetry_node02_address: 10.167.8.87
+  stacklight_telemetry_node02_hostname: mtr02
+  stacklight_telemetry_node03_address: 10.167.8.88
+  stacklight_telemetry_node03_hostname: mtr03
+  stacklight_version: '2'
+  static_ips_on_deploy_network_enabled: 'False'
+  tenant_network_gateway: 192.168.0.1
+  tenant_network_netmask: 255.255.255.0
+  tenant_network_subnet: 192.168.0.0/24
+  tenant_vlan: '2423'
+  upstream_proxy_enabled: 'False'
+  use_default_network_scheme: 'True'
diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/salt-context-environment.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/salt-context-environment.yaml
new file mode 100644
index 0000000..c568b78
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/salt-context-environment.yaml
@@ -0,0 +1,236 @@
+nodes:

+    # Virtual Control Plane nodes

+

+    ctl01.contrail-nfv.local:

+      reclass_storage_name: openstack_control_node01

+      roles:

+      - openstack_control_leader

+      - linux_system_codename_xenial

+      interfaces:

+        ens3:

+          role: single_ctl

+

+    ctl02.contrail-nfv.local:

+      reclass_storage_name: openstack_control_node02

+      roles:

+      - openstack_control

+      - linux_system_codename_xenial

+      interfaces:

+        ens3:

+          role: single_ctl

+

+    ctl03.contrail-nfv.local:

+      reclass_storage_name: openstack_control_node03

+      roles:

+      - openstack_control

+      - linux_system_codename_xenial

+      interfaces:

+        ens3:

+          role: single_ctl

+

+    dbs01.contrail-nfv.local:

+      reclass_storage_name: openstack_database_node01

+      roles:

+      - openstack_database_leader

+      - linux_system_codename_xenial

+      interfaces:

+        ens3:

+          role: single_ctl

+

+    dbs02.contrail-nfv.local:

+      reclass_storage_name: openstack_database_node02

+      roles:

+      - openstack_database

+      - linux_system_codename_xenial

+      interfaces:

+        ens3:

+          role: single_ctl

+

+    dbs03.contrail-nfv.local:

+      reclass_storage_name: openstack_database_node03

+      roles:

+      - openstack_database

+      - linux_system_codename_xenial

+      interfaces:

+        ens3:

+          role: single_ctl

+

+    msg01.contrail-nfv.local:

+      reclass_storage_name: openstack_message_queue_node01

+      roles:

+      - openstack_message_queue

+      - linux_system_codename_xenial

+      interfaces:

+        ens3:

+          role: single_ctl

+

+    msg02.contrail-nfv.local:

+      reclass_storage_name: openstack_message_queue_node02

+      roles:

+      - openstack_message_queue

+      - linux_system_codename_xenial

+      interfaces:

+        ens3:

+          role: single_ctl

+

+    msg03.contrail-nfv.local:

+      reclass_storage_name: openstack_message_queue_node03

+      roles:

+      - openstack_message_queue

+      - linux_system_codename_xenial

+      interfaces:

+        ens3:

+          role: single_ctl

+

+    prx01.contrail-nfv.local:

+      reclass_storage_name: openstack_proxy_node01

+      roles:

+      - openstack_proxy

+      - linux_system_codename_xenial

+      interfaces:

+        ens3:

+          role: single_ctl

+

+    prx02.contrail-nfv.local:

+      reclass_storage_name: openstack_proxy_node02

+      roles:

+      - openstack_proxy

+      - linux_system_codename_xenial

+      interfaces:

+        ens3:

+          role: single_ctl

+

+    mon01.contrail-nfv.local:

+      reclass_storage_name: stacklight_server_node01

+      roles:

+      - stacklightv2_server_leader

+      - linux_system_codename_xenial

+      interfaces:

+        ens3:

+          role: single_ctl

+

+    mon02.contrail-nfv.local:

+      reclass_storage_name: stacklight_server_node02

+      roles:

+      - stacklightv2_server

+      - linux_system_codename_xenial

+      interfaces:

+        ens3:

+          role: single_ctl

+

+    mon03.contrail-nfv.local:

+      reclass_storage_name: stacklight_server_node03

+      roles:

+      - stacklightv2_server

+      - linux_system_codename_xenial

+      interfaces:

+        ens3:

+          role: single_ctl

+

+    nal01.contrail-nfv.local:

+      reclass_storage_name: opencontrail_analytics_node01

+      roles:

+      - opencontrail_analytics

+      - linux_system_codename_trusty

+      interfaces:

+        eth1:

+          role: single_ctl

+

+    nal02.contrail-nfv.local:

+      reclass_storage_name: opencontrail_analytics_node02

+      roles:

+      - opencontrail_analytics

+      - linux_system_codename_trusty

+      interfaces:

+        eth1:

+          role: single_ctl

+

+    nal03.contrail-nfv.local:

+      reclass_storage_name: opencontrail_analytics_node03

+      roles:

+      - opencontrail_analytics

+      - linux_system_codename_trusty

+      interfaces:

+        eth1:

+          role: single_ctl

+

+    ntw01.contrail-nfv.local:

+      reclass_storage_name: opencontrail_control_node01

+      roles:

+      - opencontrail_control

+      - linux_system_codename_trusty

+      interfaces:

+        eth1:

+          role: single_ctl

+

+    ntw02.contrail-nfv.local:

+      reclass_storage_name: opencontrail_control_node02

+      roles:

+      - opencontrail_control

+      - linux_system_codename_trusty

+      interfaces:

+        eth1:

+          role: single_ctl

+

+    ntw03.contrail-nfv.local:

+      reclass_storage_name: opencontrail_control_node03

+      roles:

+      - opencontrail_control

+      - linux_system_codename_trusty

+      interfaces:

+        eth1:

+          role: single_ctl

+

+    mtr01.contrail-nfv.local:

+      reclass_storage_name: stacklight_telemetry_node01

+      roles:

+      - stacklight_telemetry

+      - linux_system_codename_xenial

+      interfaces:

+        ens3:

+          role: single_ctl

+

+    mtr02.contrail-nfv.local:

+      reclass_storage_name: stacklight_telemetry_node02

+      roles:

+      - stacklight_telemetry

+      - linux_system_codename_xenial

+      interfaces:

+        ens3:

+          role: single_ctl

+

+    mtr03.contrail-nfv.local:

+      reclass_storage_name: stacklight_telemetry_node03

+      roles:

+      - stacklight_telemetry

+      - linux_system_codename_xenial

+      interfaces:

+        ens3:

+          role: single_ctl

+

+    log01.contrail-nfv.local:

+      reclass_storage_name: stacklight_log_node01

+      roles:

+      - stacklight_log_leader_v2

+      - linux_system_codename_xenial

+      interfaces:

+        ens3:

+          role: single_ctl

+

+    log02.contrail-nfv.local:

+      reclass_storage_name: stacklight_log_node02

+      roles:

+      - stacklight_log

+      - linux_system_codename_xenial

+      interfaces:

+        ens3:

+          role: single_ctl

+

+    log03.contrail-nfv.local:

+      reclass_storage_name: stacklight_log_node03

+      roles:

+      - stacklight_log

+      - linux_system_codename_xenial

+      interfaces:

+        ens3:

+          role: single_ctl

diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/salt.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/salt.yaml
new file mode 100644
index 0000000..8b327e0
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/salt.yaml
@@ -0,0 +1,163 @@
+{% from 'cookied-bm-mcp-ocata-contrail-nfv/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-bm-mcp-ocata-contrail-nfv/underlay.yaml' import LAB_CONFIG_NAME with context %}
+{% from 'cookied-bm-mcp-ocata-contrail-nfv/underlay.yaml' import DOMAIN_NAME with context %}
+{% from 'cookied-bm-mcp-ocata-contrail-nfv/underlay.yaml' import HOSTNAME_KVM01 with context %}
+{% from 'cookied-bm-mcp-ocata-contrail-nfv/underlay.yaml' import HOSTNAME_KVM02 with context %}
+{% from 'cookied-bm-mcp-ocata-contrail-nfv/underlay.yaml' import HOSTNAME_KVM03 with context %}
+
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+# Other salt model repository parameters see in shared-salt.yaml
+# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
+{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME','physical-cookied-bm-mcp-ocata-contrail-nfv') %}
+# Path to the context files used to render Cluster and Environment models
+{%- set CLUSTER_CONTEXT_NAME = 'salt-context-cookiecutter-contrail-dpdk.yaml' %}
+{%- set ENVIRONMENT_CONTEXT_NAMES = ['salt-context-environment.yaml','lab04-physical-inventory.yaml'] %}
+{%- set CONTROL_VLAN = os_env('CONTROL_VLAN', '2422') %}
+{%- set TENANT_VLAN = os_env('TENANT_VLAN', '2423') %}
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+
+{{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
+
+{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL(CONTROL_VLAN=CONTROL_VLAN, TENANT_VLAN=TENANT_VLAN) }}
+
+{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
+
+{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "powerdns" "glusterfs" "xtrabackup" "maas" "backupninja" "jenkins" "fluentd"') }}
+
+{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
+
+{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
+
+- description: "Workaround for rack01 compute generator"
+  cmd: |
+    set -e;
+    # Remove rack01 key
+    . /root/venv-reclass-tools/bin/activate;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_compute_rack01 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml;
+    # Add openstack_compute_node definition from system
+    reclass-tools add-key 'classes' 'system.reclass.storage.system.openstack_compute_multi' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml --merge;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: "WR for changing image to proposed"
+  cmd: |
+    set -e;
+    # Add message_queu host for opencontrail
+    . /root/venv-reclass-tools/bin/activate;
+    reclass-tools add-key parameters._param.salt_control_xenial_image 'http://ci.mcp.mirantis.net:8085/images/ubuntu-16-04-x64-mcpproposed.qcow2' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/init.yml;
+    reclass-tools add-key parameters._param.salt_control_trusty_image 'http://ci.mcp.mirantis.net:8085/images/ubuntu-14-04-x64-mcpproposed.qcow2' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/init.yml;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+{{ SHARED.MACRO_GENERATE_INVENTORY() }}
+
+- description: "WR for dpdk pci to be in correct quotes"
+  cmd: |
+    set -e;
+    . /root/venv-reclass-tools/bin/activate;
+    reclass-tools add-key parameters._param.compute_vrouter_dpdk_pci '0000:05:00.0' /srv/salt/reclass/nodes/_generated/cmp001.{{ DOMAIN_NAME }}.yml;
+    reclass-tools add-key parameters._param.compute_vrouter_dpdk_pci '0000:05:00.0' /srv/salt/reclass/nodes/_generated/cmp002.{{ DOMAIN_NAME }}.yml;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+{{ SHARED.MACRO_NETWORKING_WORKAROUNDS() }}
+
+- description: "Workaround for PROD-14060"
+  cmd: |
+    set -e;
+    # Add tenant and single addresses for computes
+    salt-call reclass.cluster_meta_set deploy_address 172.16.49.73 /srv/salt/reclass/nodes/_generated/cmp001.{{ DOMAIN_NAME }}.yml
+    salt-call reclass.cluster_meta_set tenant_address 192.168.0.101 /srv/salt/reclass/nodes/_generated/cmp001.{{ DOMAIN_NAME }}.yml
+    salt-call reclass.cluster_meta_set single_address 10.167.8.101 /srv/salt/reclass/nodes/_generated/cmp001.{{ DOMAIN_NAME }}.yml
+    salt-call reclass.cluster_meta_set deploy_address 172.16.49.74 /srv/salt/reclass/nodes/_generated/cmp002.{{ DOMAIN_NAME }}.yml
+    salt-call reclass.cluster_meta_set tenant_address 192.168.0.102 /srv/salt/reclass/nodes/_generated/cmp002.{{ DOMAIN_NAME }}.yml
+    salt-call reclass.cluster_meta_set single_address 10.167.8.102 /srv/salt/reclass/nodes/_generated/cmp002.{{ DOMAIN_NAME }}.yml
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Temporary workaround for removing cinder-volume from CTL nodes
+  cmd: |
+    sed -i 's/\-\ system\.cinder\.volume\.single//g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control.yml;
+    sed -i 's/\-\ system\.cinder\.volume\.notification\.messagingv2//g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control.yml;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: true
+
+- description: Temporary WR for correct bridge name according to envoronment templates
+  cmd: |
+    sed -i 's/br\-ctl/br\_ctl/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
+    sed -i 's/br\-mgm/br\_mgm/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: "WR for correct opencontrail_compute_iface value. Cookiecutter context doesn't have such parameter"
+  cmd: |
+    set -e;
+    . /root/venv-reclass-tools/bin/activate;
+    reclass-tools add-key parameters._param.opencontrail_compute_iface 'enp5s0f0' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/opencontrail/init.yml;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
+
+########################################
+# Spin up Control Plane VMs on KVM nodes
+########################################
+
+- description: Execute 'libvirt' states to create necessary libvirt networks
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'kvm*' state.sls libvirt
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 10}
+  skip_fail: false
+
+- description: Create VMs for control plane
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'kvm*' state.sls salt.control
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 3, delay: 10}
+  skip_fail: false
+
+- description: '*Workaround* for waiting the control-plane VMs in the salt-key (instead of sleep)'
+  cmd: |
+    salt-key -l acc| sort > /tmp/current_keys.txt &&
+    salt 'kvm*' cmd.run 'virsh list --name' | grep -v 'kvm'|sort|xargs -I {} fgrep {} /tmp/current_keys.txt
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 20, delay: 30}
+  skip_fail: false
+
+#########################################
+# Configure all running salt minion nodes
+#########################################
+
+- description: Hack resolv.conf on VCP nodes for internal services access
+  cmd: |
+    salt --hard-crash --state-output=mixed --state-verbose=False -C '* and not kvm* and not cmp* and not gtw* and not cfg*' cmd.run "echo 'nameserver 172.18.208.44' > /etc/resolv.conf;"
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Refresh pillars on all minions
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.refresh_pillar
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Sync all salt resources
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.sync_all && sleep 5
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Show  reclass-salt --top for generated nodes
+  cmd: reclass-salt --top -u /srv/salt/reclass/nodes/_generated/
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/sl.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/sl.yaml
new file mode 100644
index 0000000..87eb34c
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/sl.yaml
@@ -0,0 +1,239 @@
+{% from 'cookied-bm-mcp-ocata-contrail-nfv/underlay.yaml' import HOSTNAME_CFG01 with context %}
+
+# Install docker swarm
+- description: Configure docker service
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls docker.host
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Install docker swarm on master node
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Send grains to the swarm slave nodes
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls salt.minion.grains
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description:  Update mine
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' mine.update
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description:  Refresh modules
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' saltutil.refresh_modules; sleep 5;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description:  Rerun swarm on slaves to proper token population
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description:  Configure slave nodes
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:manager' state.sls docker.swarm -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description:  List registered Docker swarm nodes
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' cmd.run 'docker node ls'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Install keepalived on mon nodes
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'mon*' state.sls keepalived
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Check the VIP on mon nodes
+  cmd: |
+    SL_VIP=`salt-call --out=newline_values_only pillar.get _param:stacklight_monitor_address`;
+    echo "_param:stacklight_monitor_address (vip): ${SL_VIP}";
+    salt --hard-crash --state-output=mixed --state-verbose=False -C 'mon*' cmd.run "ip a | grep ${SL_VIP}" | grep -B1 ${SL_VIP}
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+# Install slv2 infra
+#Launch containers
+- description: launch prometheus containers
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master and I@prometheus:server' state.sls docker.client
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 10}
+  skip_fail: false
+
+- description: Check docker ps
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:server' cmd.run "docker ps"
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 10}
+  skip_fail: false
+
+- description: Install telegraf
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@telegraf:agent or I@telegraf:remote_agent' state.sls telegraf
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 10}
+  skip_fail: false
+
+- description: Configure Prometheus exporters, if pillar 'prometheus:exporters' exists on any server
+  cmd: |
+    if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' match.pillar 'prometheus:exporters' ; then
+      salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' state.sls prometheus
+    fi
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Install elasticsearch server
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:server' state.sls elasticsearch.server -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Install kibana server
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:server' state.sls kibana.server -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Install elasticsearch client
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:client' state.sls elasticsearch.client
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 30}
+  skip_fail: false
+
+- description: Install kibana client
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:client' state.sls kibana.client
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Check influix db
+  cmd: |
+    INFLUXDB_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' test.ping 1>/dev/null 2>&1 && echo true`;
+    echo "Influxdb service presence: ${INFLUXDB_SERVICE}";
+    if [[ "$INFLUXDB_SERVICE" == "true" ]]; then
+        salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' state.sls influxdb
+    fi
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: true
+
+# Install Prometheus LTS(optional if set in model)
+- description: Prometheus LTS(optional if set in model)
+  cmd: |
+    PROMETHEUS_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:relay' test.ping 1>/dev/null 2>&1 && echo true`;
+    echo "PROMETHEUS rely service presence: ${PROMETHEUS_SERVICE}";
+    if [[ "$PROMETHEUS_SERVICE" == "true" ]]; then
+        salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:relay' state.sls prometheus
+    fi
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: true
+
+# Install service for the log collection
+- description: Configure fluentd
+  cmd: |
+    FLUENTD_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@fluentd:agent' test.ping 1>/dev/null 2>&1 && echo true`;
+    echo "Fluentd service presence: ${FLUENTD_SERVICE}";
+    if [[ "$FLUENTD_SERVICE" == "true" ]]; then
+        salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@fluentd:agent' state.sls fluentd
+    else
+        salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:log_collector' state.sls heka.log_collector
+    fi
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+#Install heka ceilometer collector
+- description: Install heka ceilometer if they exists
+  cmd: |
+    CEILO=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:ceilometer_collector:enabled' test.ping 1>/dev/null 2>&1 && echo true`;
+    echo "Ceilometer service presence: ${CEILO}";
+    if [[ "$CEILO" == "true" ]]; then
+        salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:ceilometer_collector:enabled' state.sls heka.ceilometer_collector;
+        salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:ceilometer_collector:enabled' service.restart ceilometer_collector
+    fi
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+# Collect grains needed to configure the services
+
+- description: Get grains
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' state.sls salt.minion.grains
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Sync modules
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' saltutil.refresh_modules
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Update mine
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' mine.update; sleep 5;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 5, delay: 15}
+  skip_fail: false
+
+# Configure the services running in Docker Swarm
+- description: Configure prometheus in docker swarm
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:server' state.sls prometheus
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Configure Remote Collector in Docker Swarm for Openstack deployments
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:server' state.sls heka.remote_collector
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Install sphinx
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@sphinx:server' state.sls sphinx
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+
+#- description: Install prometheus alertmanager
+#  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls prometheus,heka.remote_collector -b 1
+#  node_name: {{ HOSTNAME_CFG01 }}
+#  retry: {count: 1, delay: 10}
+#  skip_fail: false
+
+#- description: run docker state
+#  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker
+#  node_name: {{ HOSTNAME_CFG01 }}
+#  retry: {count: 1, delay: 10}
+#  skip_fail: false
+#
+#- description: docker ps
+#  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' dockerng.ps
+#  node_name: {{ HOSTNAME_CFG01 }}
+#  retry: {count: 1, delay: 10}
+#  skip_fail: false
+
+- description: Configure Grafana dashboards and datasources
+  cmd: sleep 30;  salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@grafana:client' state.sls grafana.client
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 10}
+  skip_fail: false
+
+- description: Run salt minion to create cert files
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False "*" state.sls salt.minion
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/underlay--meta-data.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/underlay--meta-data.yaml
new file mode 100644
index 0000000..a594a53
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/underlay--meta-data.yaml
@@ -0,0 +1,4 @@
+| # All the data below will be stored as a string object

+  instance-id: iid-local1

+  hostname: {hostname}

+  local-hostname: {hostname}

diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/underlay--user-data-cfg01.yaml
new file mode 100644
index 0000000..7677268
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/underlay--user-data-cfg01.yaml
@@ -0,0 +1,108 @@
+| # All the data below will be stored as a string object

+  #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html

+

+  ssh_pwauth: True

+  users:

+   - name: root

+     sudo: ALL=(ALL) NOPASSWD:ALL

+     shell: /bin/bash

+     ssh_authorized_keys:

+     {% for key in config.underlay.ssh_keys %}

+      - ssh-rsa {{ key['public'] }}

+     {% endfor %}

+

+  disable_root: false

+  chpasswd:

+   list: |

+    root:r00tme

+   expire: False

+

+  bootcmd:

+   #   # Block access to SSH while node is preparing

+   #   - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP

+   # Enable root access

+   - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config

+   - service sshd restart

+  output:

+    all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'

+

+  runcmd:

+   # Configure dhclient

+   - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base

+   - sudo resolvconf -u

+

+   # Enable grub menu using updated config below

+   - update-grub

+

+   # Prepare network connection

+   - sudo ifdown ens3

+   - sudo ip r d default || true  # remove existing default route to get it from dhcp
+   - sudo ifup ens3

+   #- sudo route add default gw {gateway} {interface_name}

+

+   # Create swap

+   - fallocate -l 4G /swapfile

+   - chmod 600 /swapfile

+   - mkswap /swapfile

+   - swapon /swapfile

+   - echo "/swapfile   none    swap    defaults    0   0" >> /etc/fstab

+

+   ############## TCP Cloud cfg01 node ##################

+   #- sleep 120

+   #   - echo "Preparing base OS"

+

+   - echo "nameserver 172.18.208.44" > /etc/resolv.conf;

+   #   - which wget >/dev/null || (apt-get update; apt-get install -y wget);

+

+   # Configure Ubuntu mirrors
+   #   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
+   #   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
+   #   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
+
+   #   - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;

+   #   - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;

+   #   - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;

+   #   - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;

+

+   #   - apt-get clean

+   #   - apt-get update

+

+   # Install common packages

+   #   - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc

+

+   # Install salt-minion and stop it until it is configured

+   #   - eatmydata apt-get install -y salt-minion && service salt-minion stop

+

+   ########################################################

+   # Node is ready, allow SSH access

+   #   - echo "Allow SSH access ..."

+   #   - sudo iptables -D INPUT -p tcp --dport 22 -j DROP

+   ########################################################

+

+   # Install common packages
+   - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree sshuttle
+
+   # Use sshuttle to allow SSH access to the model-related control network 10.167.4.0/24 on baremetal/VM nodes from cfg01
+   - sshuttle -r {{ ETH0_IP_ADDRESS_KVM01 }} 10.167.8.0/24 -D
+
+  write_files:

+   - path: /etc/default/grub.d/97-enable-grub-menu.cfg

+     content: |

+         GRUB_RECORDFAIL_TIMEOUT=30

+         GRUB_TIMEOUT=3

+         GRUB_TIMEOUT_STYLE=menu

+

+   - path: /etc/network/interfaces

+     content: |

+          auto ens3

+          iface ens3 inet dhcp

+

+   - path: /root/.ssh/config

+     owner: root:root

+     permissions: '0600'

+     content: |

+          Host *

+            ServerAliveInterval 300

+            ServerAliveCountMax 10

+            StrictHostKeyChecking no

+            UserKnownHostsFile /dev/null

diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/underlay--user-data1604-hwe.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/underlay--user-data1604-hwe.yaml
new file mode 100644
index 0000000..106c3d5
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/underlay--user-data1604-hwe.yaml
@@ -0,0 +1,99 @@
+| # All the data below will be stored as a string object

+  #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html

+

+  ssh_pwauth: True

+  users:

+   - name: root

+     sudo: ALL=(ALL) NOPASSWD:ALL

+     shell: /bin/bash

+     ssh_authorized_keys:

+     {% for key in config.underlay.ssh_keys %}

+      - ssh-rsa {{ key['public'] }}

+     {% endfor %}

+

+  disable_root: false

+  chpasswd:

+   list: |

+    root:r00tme

+   expire: False

+

+  bootcmd:

+   #   # Block access to SSH while node is preparing

+   #   - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP

+   # Enable root access

+   - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config

+   - service sshd restart

+  output:

+    all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'

+

+  runcmd:

+   - export TERM=linux

+   - export LANG=C

+   # Configure dhclient

+   - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base

+   - sudo resolvconf -u

+

+   # Enable grub menu using updated config below

+   - update-grub

+

+   # Prepare network connection

+   - sudo ifup {interface_name}

+   #- sudo route add default gw {gateway} {interface_name}

+

+   # Create swap

+   - fallocate -l 4G /swapfile

+   - chmod 600 /swapfile

+   - mkswap /swapfile

+   - swapon /swapfile

+   - echo "/swapfile   none    swap    defaults   0   0" >> /etc/fstab

+

+

+   ############## TCP Cloud cfg01 node ##################

+   #- sleep 120

+   #   - echo "Preparing base OS"

+   - echo "nameserver 172.18.208.44" > /etc/resolv.conf;

+   #   - which wget >/dev/null || (apt-get update; apt-get install -y wget)

+

+   # Configure Ubuntu mirrors
+   #   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
+   #   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
+   #   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
+
+   #   - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;

+   #   - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;

+   #   - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;

+   #   - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;

+

+   #   - apt-get clean

+   #   - eatmydata apt-get update && apt-get -y upgrade

+

+   # Install common packages

+   #   - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc

+

+   # Install salt-minion and stop it until it is configured

+   #   - eatmydata apt-get install -y salt-minion && service salt-minion stop

+

+   # Install latest kernel

+   #   - eatmydata apt-get install -y {{ os_env('LINUX_KERNEL_HWE_PACKAGE_NAME', 'linux-image-extra-4.10.0-42-generic') }}
+

+   ########################################################

+   # Node is ready, allow SSH access

+   #- echo "Allow SSH access ..."

+   #- sudo iptables -D INPUT -p tcp --dport 22 -j DROP

+   #   - reboot

+   ########################################################

+

+  write_files:

+   - path: /etc/default/grub.d/97-enable-grub-menu.cfg

+     content: |

+         GRUB_RECORDFAIL_TIMEOUT=30

+         GRUB_TIMEOUT=3

+         GRUB_TIMEOUT_STYLE=menu

+

+   - path: /etc/network/interfaces

+     content: |

+          # The loopback network interface

+          auto lo

+          iface lo inet loopback

+          auto {interface_name}

+          iface {interface_name} inet dhcp

diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/underlay--user-data1604.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/underlay--user-data1604.yaml
new file mode 100644
index 0000000..915981e
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/underlay--user-data1604.yaml
@@ -0,0 +1,95 @@
+| # All the data below will be stored as a string object

+  #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html

+

+  ssh_pwauth: True

+  users:

+   - name: root

+     sudo: ALL=(ALL) NOPASSWD:ALL

+     shell: /bin/bash

+     ssh_authorized_keys:

+     {% for key in config.underlay.ssh_keys %}

+      - ssh-rsa {{ key['public'] }}

+     {% endfor %}

+

+  disable_root: false

+  chpasswd:

+   list: |

+    root:r00tme

+   expire: False

+

+  bootcmd:

+   # Block access to SSH while node is preparing

+   #   - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP

+   # Enable root access

+   - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config

+   - service sshd restart

+  output:

+    all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'

+

+  runcmd:

+   - export TERM=linux

+   - export LANG=C

+   # Configure dhclient

+   - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base

+   - sudo resolvconf -u

+

+   # Enable grub menu using updated config below

+   - update-grub

+

+   # Prepare network connection

+   - sudo ifup {interface_name}

+   #- sudo route add default gw {gateway} {interface_name}

+

+   # Create swap

+   - fallocate -l 4G /swapfile

+   - chmod 600 /swapfile

+   - mkswap /swapfile

+   - swapon /swapfile

+   - echo "/swapfile   none    swap    defaults   0   0" >> /etc/fstab

+

+

+   ############## TCP Cloud cfg01 node ##################

+   #- sleep 120

+   #   - echo "Preparing base OS"

+   - echo "nameserver 172.18.208.44" > /etc/resolv.conf;

+   #   - which wget >/dev/null || (apt-get update; apt-get install -y wget)

+

+   # Configure Ubuntu mirrors
+   #   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
+   #   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
+   #   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
+
+   #   - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;

+   #   - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;

+   #   - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;

+   #   - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;

+

+   #   - apt-get clean

+   #   - eatmydata apt-get update && apt-get -y upgrade

+

+   # Install common packages

+   #   - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc

+

+   # Install salt-minion and stop it until it is configured

+   #   - eatmydata apt-get install -y salt-minion && service salt-minion stop

+

+   ########################################################

+   # Node is ready, allow SSH access

+   #   - echo "Allow SSH access ..."

+   #   - sudo iptables -D INPUT -p tcp --dport 22 -j DROP

+   ########################################################

+

+  write_files:

+   - path: /etc/default/grub.d/97-enable-grub-menu.cfg

+     content: |

+         GRUB_RECORDFAIL_TIMEOUT=30

+         GRUB_TIMEOUT=3

+         GRUB_TIMEOUT_STYLE=menu

+

+   - path: /etc/network/interfaces

+     content: |

+          # The loopback network interface

+          auto lo

+          iface lo inet loopback

+          auto {interface_name}

+          iface {interface_name} inet dhcp

diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/underlay.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/underlay.yaml
new file mode 100644
index 0000000..be97ae6
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/underlay.yaml
@@ -0,0 +1,405 @@
+# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
+{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
+
+{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-bm-mcp-ocata-contrail-nfv') %}
+{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME + '.local') %}
+{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_KVM01 = os_env('HOSTNAME_KVM01', 'kvm01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_KVM02 = os_env('HOSTNAME_KVM02', 'kvm02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_KVM03 = os_env('HOSTNAME_KVM03', 'kvm03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP001 = os_env('HOSTNAME_CMP001', 'cmp001.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP002 = os_env('HOSTNAME_CMP002', 'cmp002.' + DOMAIN_NAME) %}
+
+{% set ETH1_IP_ADDRESS_CFG01 = os_env('ETH1_IP_ADDRESS_CFG01', '172.16.49.66') %}
+{% set ETH0_IP_ADDRESS_KVM01 = os_env('ETH0_IP_ADDRESS_KVM01', '172.16.49.67') %}
+{% set ETH0_IP_ADDRESS_KVM02 = os_env('ETH0_IP_ADDRESS_KVM02', '172.16.49.68') %}
+{% set ETH0_IP_ADDRESS_KVM03 = os_env('ETH0_IP_ADDRESS_KVM03', '172.16.49.69') %}
+{% set ETH0_IP_ADDRESS_CMP001 = os_env('ETH0_IP_ADDRESS_CMP001', '172.16.49.73') %}
+{% set ETH0_IP_ADDRESS_CMP002 = os_env('ETH0_IP_ADDRESS_CMP002', '172.16.49.74') %}
+
+{% import 'cookied-bm-mcp-ocata-contrail-nfv/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
+{% import 'cookied-bm-mcp-ocata-contrail-nfv/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
+{% import 'cookied-bm-mcp-ocata-contrail-nfv/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA with context %}
+{% import 'cookied-bm-mcp-ocata-contrail-nfv/underlay--user-data1604-hwe.yaml' as CLOUDINIT_USER_DATA_HWE with context %}
+
+---
+aliases:
+ - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
+ - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
+ - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
+ - &cloudinit_user_data {{ CLOUDINIT_USER_DATA }}
+ - &cloudinit_user_data_hwe {{ CLOUDINIT_USER_DATA_HWE }}
+
+template:
+  devops_settings:
+    env_name: {{ os_env('ENV_NAME', 'cookied-bm-mcp-ocata-contrail-nfv_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
+
+    address_pools:
+      admin-pool01:
+        net: {{ os_env('ADMIN_ADDRESS_POOL01', '172.16.49.64/26:26') }}
+        params:
+          ip_reserved:
+            gateway: +62
+            l2_network_device: +61
+            default_{{ HOSTNAME_CFG01 }}: {{ ETH1_IP_ADDRESS_CFG01 }}
+            default_{{ HOSTNAME_KVM01 }}: {{ ETH0_IP_ADDRESS_KVM01 }}
+            default_{{ HOSTNAME_KVM02 }}: {{ ETH0_IP_ADDRESS_KVM02 }}
+            default_{{ HOSTNAME_KVM03 }}: {{ ETH0_IP_ADDRESS_KVM03 }}
+            default_{{ HOSTNAME_CMP001 }}: {{ ETH0_IP_ADDRESS_CMP001 }}
+            default_{{ HOSTNAME_CMP002 }}: {{ ETH0_IP_ADDRESS_CMP002 }}
+            default_{{ HOSTNAME_CMP003 }}: {{ ETH0_IP_ADDRESS_CMP003 }}
+            virtual_{{ HOSTNAME_CFG01 }}: {{ ETH1_IP_ADDRESS_CFG01 }}
+            virtual_{{ HOSTNAME_KVM01 }}: {{ ETH0_IP_ADDRESS_KVM01 }}
+            virtual_{{ HOSTNAME_KVM02 }}: {{ ETH0_IP_ADDRESS_KVM02 }}
+            virtual_{{ HOSTNAME_KVM03 }}: {{ ETH0_IP_ADDRESS_KVM03 }}
+            virtual_{{ HOSTNAME_CMP001 }}: {{ ETH0_IP_ADDRESS_CMP001 }}
+            virtual_{{ HOSTNAME_CMP002 }}: {{ ETH0_IP_ADDRESS_CMP002 }}
+      private-pool01:
+        net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.167.8.0/24:24') }}
+        params:
+          ip_reserved:
+            gateway: +1
+            l2_network_device: +1
+
+      tenant-pool01:
+        net: {{ os_env('TENANT_ADDRESS_POOL01', '10.167.10.0/24:24') }}
+        params:
+          ip_reserved:
+            gateway: +1
+            l2_network_device: +1
+
+      external-pool01:
+        net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '172.17.42.192/26:26') }}
+        params:
+          ip_reserved:
+            gateway: +1
+            l2_network_device: -2
+
+    groups:
+      - name: virtual
+        driver:
+          name: devops.driver.libvirt
+          params:
+            connection_string: !os_env CONNECTION_STRING, qemu:///system
+            storage_pool_name: !os_env STORAGE_POOL_NAME, default
+            stp: False
+            hpet: False
+            enable_acpi: true
+            use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
+
+        network_pools:
+          admin: admin-pool01
+
+        l2_network_devices:
+          # Ironic management interface
+          admin:
+            address_pool: admin-pool01
+            dhcp: false
+            parent_iface:
+              phys_dev: !os_env IRONIC_LAB_PXE_IFACE_0
+
+        group_volumes:
+         - name: cloudimage1604    # This name is used for 'backing_store' option for node volumes.
+           source_image: !os_env IMAGE_PATH1604  # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
+           format: qcow2
+         - name: cfg01_day01_image               # Pre-configured day01 image
+           source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01', os_env('IMAGE_PATH1604')) }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
+           format: qcow2
+
+        nodes:
+          - name: {{ HOSTNAME_CFG01 }}
+            role: salt_master
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 4
+              memory: !os_env SLAVE_NODE_MEMORY, 8192
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: cfg01_day01_image
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_cfg01
+
+              interfaces:
+                - label: ens3
+                  l2_network_device: admin
+                  interface_model: *interface_model
+                  mac_address: !os_env ETH1_MAC_ADDRESS_CFG01
+              network_config:
+                ens3:
+                  networks:
+                    - admin
+
+      - name: default
+        driver:
+          name: devops_driver_ironic
+          params:
+            os_auth_token: fake-token
+            ironic_url: !os_env IRONIC_URL  # URL that will be used by fuel-devops
+                                            # to access Ironic API
+            # Agent URL that is accessible from deploying node when nodes
+            # are bootstrapped with PXE. Usually PXE/provision network address is used.
+            agent_kernel_url: !os_env IRONIC_AGENT_KERNEL_URL
+            agent_ramdisk_url: !os_env IRONIC_AGENT_RAMDISK_URL
+
+        network_pools:
+          admin: admin-pool01
+
+        nodes:
+          - name: {{ HOSTNAME_KVM01 }}
+            role: salt_minion
+            params:
+              ipmi_user: !os_env IPMI_USER
+              ipmi_password: !os_env IPMI_PASSWORD
+              ipmi_previlegies: OPERATOR
+              ipmi_host: !os_env IPMI_HOST_KVM01  # hostname or IP address
+              ipmi_lan_interface: lanplus
+              ipmi_port: 623
+
+              root_volume_name: system     # see 'volumes' below
+              cloud_init_volume_name: iso  # see 'volumes' below
+              cloud_init_iface_up: enp9s0f0  # see 'interfaces' below.
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 200
+
+                  # The same as for agent URL, here is an URL to the image that should be
+                  # used for deploy the node. It should also be accessible from deploying
+                  # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
+                  source_image: !os_env IRONIC_SOURCE_IMAGE_URL
+                  source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
+
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data
+
+              interfaces:
+                - label: enp9s0f0
+                  l2_network_device: admin
+                  mac_address: !os_env ETH0_MAC_ADDRESS_KVM01
+                - label: enp9s0f1
+                  mac_address: !os_env ETH1_MAC_ADDRESS_KVM01
+
+              network_config:
+                enp9s0f0:
+                  networks:
+                   - admin
+                bond0:
+                  networks:
+                   - control
+                  aggregation: active-backup
+                  parents:
+                   - enp9s0f1
+
+          - name: {{ HOSTNAME_KVM02 }}
+            role: salt_minion
+            params:
+              ipmi_user: !os_env IPMI_USER
+              ipmi_password: !os_env IPMI_PASSWORD
+              ipmi_previlegies: OPERATOR
+              ipmi_host: !os_env IPMI_HOST_KVM02  # hostname or IP address
+              ipmi_lan_interface: lanplus
+              ipmi_port: 623
+
+              root_volume_name: system     # see 'volumes' below
+              cloud_init_volume_name: iso  # see 'volumes' below
+              cloud_init_iface_up: enp9s0f0  # see 'interfaces' below.
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 200
+
+                  # The same as for agent URL, here is an URL to the image that should be
+                  # used for deploy the node. It should also be accessible from deploying
+                  # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
+                  source_image: !os_env IRONIC_SOURCE_IMAGE_URL
+                  source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
+
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data
+
+              interfaces:
+                - label: enp9s0f0
+                  l2_network_device: admin
+                  mac_address: !os_env ETH0_MAC_ADDRESS_KVM02
+                - label: enp9s0f1
+                  mac_address: !os_env ETH1_MAC_ADDRESS_KVM02
+
+              network_config:
+                enp9s0f0:
+                  networks:
+                   - admin
+                bond0:
+                  networks:
+                   - control
+                  aggregation: active-backup
+                  parents:
+                   - enp9s0f1
+
+          - name: {{ HOSTNAME_KVM03 }}
+            role: salt_minion
+            params:
+              ipmi_user: !os_env IPMI_USER
+              ipmi_password: !os_env IPMI_PASSWORD
+              ipmi_previlegies: OPERATOR
+              ipmi_host: !os_env IPMI_HOST_KVM03  # hostname or IP address
+              ipmi_lan_interface: lanplus
+              ipmi_port: 623
+
+              root_volume_name: system     # see 'volumes' below
+              cloud_init_volume_name: iso  # see 'volumes' below
+              cloud_init_iface_up: enp9s0f0  # see 'interfaces' below.
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 200
+
+                  # The same as for agent URL, here is an URL to the image that should be
+                  # used for deploy the node. It should also be accessible from deploying
+                  # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
+                  source_image: !os_env IRONIC_SOURCE_IMAGE_URL
+                  source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
+
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data
+
+              interfaces:
+                - label: enp9s0f0
+                  l2_network_device: admin
+                  mac_address: !os_env ETH0_MAC_ADDRESS_KVM03
+                - label: enp9s0f1
+                  mac_address: !os_env ETH1_MAC_ADDRESS_KVM03
+
+              network_config:
+                enp9s0f0:
+                  networks:
+                   - admin
+                bond0:
+                  networks:
+                   - control
+                  aggregation: active-backup
+                  parents:
+                   - enp9s0f1
+
+
+          - name: {{ HOSTNAME_CMP001 }}
+            role: salt_minion
+            params:
+              ipmi_user: !os_env IPMI_USER
+              ipmi_password: !os_env IPMI_PASSWORD
+              ipmi_previlegies: OPERATOR
+              ipmi_host: !os_env IPMI_HOST_CMP001  # hostname or IP address
+              ipmi_lan_interface: lanplus
+              ipmi_port: 623
+
+              root_volume_name: system     # see 'volumes' below
+              cloud_init_volume_name: iso  # see 'volumes' below
+              cloud_init_iface_up: enp2s0f1  # see 'interfaces' below.
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 200
+
+                  # The same as for agent URL, here is an URL to the image that should be
+                  # used for deploy the node. It should also be accessible from deploying
+                  # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
+                  source_image: !os_env IRONIC_SOURCE_IMAGE_URL
+                  source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
+
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_hwe
+
+              interfaces:
+                - label: enp2s0f0
+                  mac_address: !os_env ETH0_MAC_ADDRESS_CMP001
+                - label: enp2s0f1
+                  l2_network_device: admin
+                  mac_address: !os_env ETH1_MAC_ADDRESS_CMP001
+                - label: enp5s0f0
+                  mac_address: !os_env ETH2_MAC_ADDRESS_CMP001
+                  features: ['dpdk', 'dpdk_pci: 0000:05:00.0']
+                - label: enp5s0f1
+                  mac_address: !os_env ETH3_MAC_ADDRESS_CMP001
+                  features: ['dpdk', 'dpdk_pci: 0000:05:00.1']
+
+              network_config:
+                enp2s0f0:
+                  networks:
+                   - admin
+                bond0:
+                  networks:
+                   - control
+                  aggregation: active-backup
+                  parents:
+                   - enp5s0f0
+
+          - name: {{ HOSTNAME_CMP002 }}
+            role: salt_minion
+            params:
+              ipmi_user: !os_env IPMI_USER
+              ipmi_password: !os_env IPMI_PASSWORD
+              ipmi_previlegies: OPERATOR
+              ipmi_host: !os_env IPMI_HOST_CMP002  # hostname or IP address
+              ipmi_lan_interface: lanplus
+              ipmi_port: 623
+
+              root_volume_name: system     # see 'volumes' below
+              cloud_init_volume_name: iso  # see 'volumes' below
+              cloud_init_iface_up: enp2s0f1  # see 'interfaces' below.
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 200
+
+                  # The same as for agent URL, here is an URL to the image that should be
+                  # used for deploy the node. It should also be accessible from deploying
+                  # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
+                  source_image: !os_env IRONIC_SOURCE_IMAGE_URL
+                  source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
+
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_hwe
+
+              interfaces:
+                - label: enp2s0f0
+                  mac_address: !os_env ETH0_MAC_ADDRESS_CMP002
+                - label: enp2s0f1
+                  l2_network_device: admin
+                  mac_address: !os_env ETH1_MAC_ADDRESS_CMP002
+                - label: enp5s0f0
+                  mac_address: !os_env ETH2_MAC_ADDRESS_CMP002
+                  features: ['dpdk', 'dpdk_pci: 0000:05:00.0']
+                - label: enp5s0f1
+                  mac_address: !os_env ETH3_MAC_ADDRESS_CMP002
+                  features: ['dpdk', 'dpdk_pci: 0000:05:00.1']
+
+              network_config:
+                enp2s0f1:
+                  networks:
+                   - admin
+                bond0:
+                  networks:
+                   - control
+                  aggregation: active-backup
+                  parents:
+                   - enp5s0f0
diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/sl.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/sl.yaml
index c12344c..aaaa296 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/sl.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/sl.yaml
@@ -109,7 +109,7 @@
 - description: Install elasticsearch client
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:client' state.sls elasticsearch.client
   node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
+  retry: {count: 2, delay: 30}
   skip_fail: false
 
 - description: Install kibana client
diff --git a/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/openstack.yaml b/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/openstack.yaml
index 596c512..e47f36c 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/openstack.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/openstack.yaml
@@ -245,16 +245,10 @@
   retry: {count: 1, delay: 30}
   skip_fail: false
 
-#- description:  Allow all tcp
-#  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-#    '. /root/keystonercv3; nova secgroup-add-rule default tcp 1 65535 0.0.0.0/0'
-#  node_name: {{ HOSTNAME_CFG01 }}
-#  retry: {count: 1, delay: 30}
-#  skip_fail: false
-
-#- description:  Allow all icmp
-#  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-#    '. /root/keystonercv3; nova secgroup-add-rule default icmp -1 -1 0.0.0.0/0'
-#  node_name: {{ HOSTNAME_CFG01 }}
-#  retry: {count: 1, delay: 30}
-#  skip_fail: false
\ No newline at end of file
+- description: Temporary WR
+  cmd: |
+    rc=`salt "ctl01*" cmd.run 'cat /root/keystonercv3' | grep export`;
+    salt 'gtw01*' cmd.run "echo $rc > /root/keystonercv3";
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: true
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/salt-context-environment.yaml b/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/salt-context-environment.yaml
index 39363be..2b693ab 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/salt-context-environment.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/salt-context-environment.yaml
@@ -55,9 +55,9 @@
           deploy_address: 172.16.49.3
         enp3s0f1:
           role: single_vlan_ctl
-          tenant_address: 10.167.12.105
           single_address: 10.167.11.105
         enp5s0f0:
+          tenant_address: 10.167.12.105
           role: single_ovs_dpdk_prv
           dpdk_pci: "0000:05:00.0"
 
@@ -75,9 +75,9 @@
           deploy_address: 172.16.49.31
         enp3s0f1:
           role: single_vlan_ctl
-          tenant_address: 10.167.12.106
           single_address: 10.167.11.106
         enp5s0f0:
+          tenant_address: 10.167.12.106
           role: single_ovs_dpdk_prv
           dpdk_pci: "0000:05:00.0"
 
diff --git a/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/salt.yaml b/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/salt.yaml
index 7b3e2f4..e8eb622 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/salt.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/salt.yaml
@@ -146,3 +146,14 @@
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: true
+
+- description: Temporary WR
+  cmd: |
+    ssh-keygen -y -f /root/.ssh/id_rsa > /root/.ssh/id_rsa.pub;
+    pub_key=`cat /root/.ssh/id_rsa.pub`;
+    salt '*' cmd.run "echo $pub_key >> /root/.ssh/authorized_keys";
+    salt '*' cmd.run "service sshd restart";
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: true
+
diff --git a/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/underlay--user-data-cfg01.yaml
index d00841d..3f4f128 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/underlay--user-data-cfg01.yaml
@@ -56,9 +56,6 @@
    # Install common packages
    - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree sshuttle
 
-   # Use sshuttle to allow SSH access to the model-related control network 10.167.4.0/24 on baremetal/VM nodes from cfg01
-   - sshuttle -r {{ ETH0_IP_ADDRESS_KVM01 }} 10.167.4.0/24 -D
-
    ########################################################
    # Node is ready, allow SSH access
    #- echo "Allow SSH access ..."
diff --git a/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/lab04-physical-inventory.yaml b/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/lab04-physical-inventory.yaml
index d17abc9..2eb036b 100644
--- a/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/lab04-physical-inventory.yaml
+++ b/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/lab04-physical-inventory.yaml
@@ -9,45 +9,6 @@
           role: single_dhcp

     # Physical nodes

 

-    cid01.ocata-cicd.local:

-      reclass_storage_name: cicd_control_node01

-      roles:

-      - cicd_control_leader

-      - linux_system_codename_xenial

-      interfaces:

-        enp2s0f0:

-          role: single_mgm

-          deploy_address: 172.16.49.70

-        enp2s0f1:

-          role: single_vlan_ctl

-          single_address: 10.167.8.91

-

-    cid02.ocata-cicd.local:

-      reclass_storage_name: cicd_control_node02

-      roles:

-      - cicd_control_manager

-      - linux_system_codename_xenial

-      interfaces:

-        enp2s0f0:

-          role: single_mgm

-          deploy_address: 172.16.49.71

-        enp2s0f1:

-          role: single_vlan_ctl

-          single_address: 10.167.8.92

-

-    cid03.ocata-cicd.local:

-      reclass_storage_name: cicd_control_node03

-      roles:

-      - cicd_control_manager

-      - linux_system_codename_xenial

-      interfaces:

-        enp2s0f0:

-          role: single_mgm

-          deploy_address: 172.16.49.72

-        enp2s0f1:

-          role: single_vlan_ctl

-          single_address: 10.167.8.93

-

     kvm01.ocata-cicd.local:

       reclass_storage_name: infra_kvm_node01

       roles:

diff --git a/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/salt-context-environment.yaml b/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/salt-context-environment.yaml
index 9e8ef5d..1411196 100644
--- a/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/salt-context-environment.yaml
+++ b/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/salt-context-environment.yaml
@@ -1,6 +1,33 @@
 nodes:

     # Virtual Control Plane nodes

 

+    cid01.ocata-cicd.local:

+      reclass_storage_name: cicd_control_node01

+      roles:

+      - cicd_control_leader

+      - linux_system_codename_xenial

+      interfaces:

+        ens3:

+          role: single_ctl

+

+    cid02.ocata-cicd.local:

+      reclass_storage_name: cicd_control_node02

+      roles:

+      - cicd_control_manager

+      - linux_system_codename_xenial

+      interfaces:

+        ens3:

+          role: single_ctl

+

+    cid03.ocata-cicd.local:

+      reclass_storage_name: cicd_control_node03

+      roles:

+      - cicd_control_manager

+      - linux_system_codename_xenial

+      interfaces:

+        ens3:

+          role: single_ctl

+

     ctl01.ocata-cicd.local:

       reclass_storage_name: openstack_control_node01

       roles:

diff --git a/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/salt.yaml b/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/salt.yaml
index 40e8d68..0d2bdda 100644
--- a/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/salt.yaml
+++ b/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/salt.yaml
@@ -57,14 +57,6 @@
   retry: {count: 1, delay: 10}
   skip_fail: false
 
-- description: Remove cicd nodes from VCP, because we have baremetal cicd nodes
-  cmd: |
-    sed -i 's/\-\ system\.salt\.control\.cluster\.cicd\_control\_cluster//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/kvm.yml;
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-
 - description: "Workaround for PROD-16973"
   cmd: |
     set -e;
diff --git a/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/underlay.yaml b/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/underlay.yaml
index 90c6227..eb56414 100644
--- a/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/underlay.yaml
+++ b/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/underlay.yaml
@@ -7,9 +7,6 @@
 {% set HOSTNAME_KVM01 = os_env('HOSTNAME_KVM01', 'kvm01.' + DOMAIN_NAME) %}
 {% set HOSTNAME_KVM02 = os_env('HOSTNAME_KVM02', 'kvm02.' + DOMAIN_NAME) %}
 {% set HOSTNAME_KVM03 = os_env('HOSTNAME_KVM03', 'kvm03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CID01 = os_env('HOSTNAME_CID01', 'cid01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CID02 = os_env('HOSTNAME_CID02', 'cid02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CID03 = os_env('HOSTNAME_CID03', 'cid03.' + DOMAIN_NAME) %}
 {% set HOSTNAME_CMP001 = os_env('HOSTNAME_CMP001', 'cmp001.' + DOMAIN_NAME) %}
 {% set HOSTNAME_CMP002 = os_env('HOSTNAME_CMP002', 'cmp002.' + DOMAIN_NAME) %}
 {% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
@@ -19,9 +16,6 @@
 {% set ETH0_IP_ADDRESS_KVM01 = os_env('ETH0_IP_ADDRESS_KVM01', '172.16.49.67') %}
 {% set ETH0_IP_ADDRESS_KVM02 = os_env('ETH0_IP_ADDRESS_KVM02', '172.16.49.68') %}
 {% set ETH0_IP_ADDRESS_KVM03 = os_env('ETH0_IP_ADDRESS_KVM03', '172.16.49.69') %}
-{% set ETH0_IP_ADDRESS_CID01 = os_env('ETH0_IP_ADDRESS_CID01', '172.16.49.70') %}
-{% set ETH0_IP_ADDRESS_CID02 = os_env('ETH0_IP_ADDRESS_CID02', '172.16.49.71') %}
-{% set ETH0_IP_ADDRESS_CID03 = os_env('ETH0_IP_ADDRESS_CID03', '172.16.49.72') %}
 {% set ETH0_IP_ADDRESS_CMP001 = os_env('ETH0_IP_ADDRESS_CMP001', '172.16.49.73') %}
 {% set ETH0_IP_ADDRESS_CMP002 = os_env('ETH0_IP_ADDRESS_CMP002', '172.16.49.74') %}
 # {% set ETH0_IP_ADDRESS_CMP003 = os_env('ETH0_IP_ADDRESS_CMP003', '172.16.167.140') %}
@@ -55,9 +49,6 @@
             default_{{ HOSTNAME_KVM01 }}: {{ ETH0_IP_ADDRESS_KVM01 }}
             default_{{ HOSTNAME_KVM02 }}: {{ ETH0_IP_ADDRESS_KVM02 }}
             default_{{ HOSTNAME_KVM03 }}: {{ ETH0_IP_ADDRESS_KVM03 }}
-            default_{{ HOSTNAME_CID01 }}: {{ ETH0_IP_ADDRESS_CID01 }}
-            default_{{ HOSTNAME_CID02 }}: {{ ETH0_IP_ADDRESS_CID02 }}
-            default_{{ HOSTNAME_CID03 }}: {{ ETH0_IP_ADDRESS_CID03 }}
             default_{{ HOSTNAME_CMP001 }}: {{ ETH0_IP_ADDRESS_CMP001 }}
             default_{{ HOSTNAME_CMP002 }}: {{ ETH0_IP_ADDRESS_CMP002 }}
             default_{{ HOSTNAME_CMP003 }}: {{ ETH0_IP_ADDRESS_CMP003 }}
@@ -67,9 +58,6 @@
             virtual_{{ HOSTNAME_KVM01 }}: {{ ETH0_IP_ADDRESS_KVM01 }}
             virtual_{{ HOSTNAME_KVM02 }}: {{ ETH0_IP_ADDRESS_KVM02 }}
             virtual_{{ HOSTNAME_KVM03 }}: {{ ETH0_IP_ADDRESS_KVM03 }}
-            virtual_{{ HOSTNAME_CID01 }}: {{ ETH0_IP_ADDRESS_CID01 }}
-            virtual_{{ HOSTNAME_CID02 }}: {{ ETH0_IP_ADDRESS_CID02 }}
-            virtual_{{ HOSTNAME_CID03 }}: {{ ETH0_IP_ADDRESS_CID03 }}
             virtual_{{ HOSTNAME_CMP001 }}: {{ ETH0_IP_ADDRESS_CMP001 }}
             virtual_{{ HOSTNAME_CMP002 }}: {{ ETH0_IP_ADDRESS_CMP002 }}
             # virtual_{{ HOSTNAME_CMP003 }}: {{ ETH0_IP_ADDRESS_CMP003 }}
@@ -377,147 +365,6 @@
                   parents:
                    - enp9s0f1
 
-          - name: {{ HOSTNAME_CID01 }}
-            role: salt_minion
-            params:
-              ipmi_user: !os_env IPMI_USER
-              ipmi_password: !os_env IPMI_PASSWORD
-              ipmi_previlegies: OPERATOR
-              ipmi_host: !os_env IPMI_HOST_CID01  # hostname or IP address
-              ipmi_lan_interface: lanplus
-              ipmi_port: 623
-
-              root_volume_name: system     # see 'volumes' below
-              cloud_init_volume_name: iso  # see 'volumes' below
-              cloud_init_iface_up: enp2s0f0  # see 'interfaces' below.
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 200
-
-                  # The same as for agent URL, here is an URL to the image that should be
-                  # used for deploy the node. It should also be accessible from deploying
-                  # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
-                  source_image: !os_env IRONIC_SOURCE_IMAGE_URL
-                  source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
-
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data
-
-              interfaces:
-                - label: enp2s0f0
-                  l2_network_device: admin
-                  mac_address: !os_env ETH0_MAC_ADDRESS_CID01
-                - label: enp2s0f1
-                  mac_address: !os_env ETH1_MAC_ADDRESS_CID01
-
-              network_config:
-                enp2s0f0:
-                  networks:
-                   - admin
-                bond0:
-                  networks:
-                   - control
-                  aggregation: active-backup
-                  parents:
-                   - enp2s0f1
-
-          - name: {{ HOSTNAME_CID02}}
-            role: salt_minion
-            params:
-              ipmi_user: !os_env IPMI_USER
-              ipmi_password: !os_env IPMI_PASSWORD
-              ipmi_previlegies: OPERATOR
-              ipmi_host: !os_env IPMI_HOST_CID02  # hostname or IP address
-              ipmi_lan_interface: lanplus
-              ipmi_port: 623
-
-              root_volume_name: system     # see 'volumes' below
-              cloud_init_volume_name: iso  # see 'volumes' below
-              cloud_init_iface_up: enp2s0f0  # see 'interfaces' below.
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 200
-
-                  # The same as for agent URL, here is an URL to the image that should be
-                  # used for deploy the node. It should also be accessible from deploying
-                  # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
-                  source_image: !os_env IRONIC_SOURCE_IMAGE_URL
-                  source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
-
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data
-
-              interfaces:
-                - label: enp2s0f0
-                  l2_network_device: admin
-                  mac_address: !os_env ETH0_MAC_ADDRESS_CID02
-                - label: enp2s0f1
-                  mac_address: !os_env ETH1_MAC_ADDRESS_CID02
-
-              network_config:
-                enp2s0f0:
-                  networks:
-                   - admin
-                bond0:
-                  networks:
-                   - control
-                  aggregation: active-backup
-                  parents:
-                   - enp2s0f1
-
-          - name: {{ HOSTNAME_CID03 }}
-            role: salt_minion
-            params:
-              ipmi_user: !os_env IPMI_USER
-              ipmi_password: !os_env IPMI_PASSWORD
-              ipmi_previlegies: OPERATOR
-              ipmi_host: !os_env IPMI_HOST_CID03  # hostname or IP address
-              ipmi_lan_interface: lanplus
-              ipmi_port: 623
-
-              root_volume_name: system     # see 'volumes' below
-              cloud_init_volume_name: iso  # see 'volumes' below
-              cloud_init_iface_up: enp2s0f0  # see 'interfaces' below.
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 200
-
-                  # The same as for agent URL, here is an URL to the image that should be
-                  # used for deploy the node. It should also be accessible from deploying
-                  # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
-                  source_image: !os_env IRONIC_SOURCE_IMAGE_URL
-                  source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
-
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data
-
-              interfaces:
-                - label: enp2s0f0
-                  l2_network_device: admin
-                  mac_address: !os_env ETH0_MAC_ADDRESS_CID03
-                - label: enp2s0f1
-                  mac_address: !os_env ETH1_MAC_ADDRESS_CID03
-
-              network_config:
-                enp2s0f0:
-                  networks:
-                   - admin
-                bond0:
-                  networks:
-                   - control
-                  aggregation: active-backup
-                  parents:
-                   - enp2s0f1
-
           - name: {{ HOSTNAME_CMP001 }}
             role: salt_minion
             params:
diff --git a/tcp_tests/templates/cookied-mcp-mitaka-dvr/sl.yaml b/tcp_tests/templates/cookied-mcp-mitaka-dvr/sl.yaml
index 46cd024..06ff674 100644
--- a/tcp_tests/templates/cookied-mcp-mitaka-dvr/sl.yaml
+++ b/tcp_tests/templates/cookied-mcp-mitaka-dvr/sl.yaml
@@ -102,7 +102,7 @@
 - description: Install elasticsearch client
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:client' state.sls elasticsearch.client
   node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
+  retry: {count: 2, delay: 30}
   skip_fail: false
 
 - description: Install kibana client
diff --git a/tcp_tests/templates/cookied-mcp-mitaka-dvr/underlay.yaml b/tcp_tests/templates/cookied-mcp-mitaka-dvr/underlay.yaml
index 08f443e..7225c6d 100644
--- a/tcp_tests/templates/cookied-mcp-mitaka-dvr/underlay.yaml
+++ b/tcp_tests/templates/cookied-mcp-mitaka-dvr/underlay.yaml
@@ -162,7 +162,7 @@
             address_pool: external-pool01
             dhcp: true
             forward:
-              mode: nat
+              mode: route
 
 
         group_volumes:
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/salt.yaml b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/salt.yaml
index 2a86269..bd34cc8 100644
--- a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/salt.yaml
+++ b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/salt.yaml
@@ -25,8 +25,6 @@
 
 {{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
 
-{{ SHARED.ADJUST_SL_OPTS(OVERRIDES_FILENAME='/srv/salt/reclass/classes/environment/' + ENVIRONMENT_MODEL_INVENTORY_NAME + '/overrides.yml') }}
-
 - description: "Workaround for combined roles: remove unnecessary classes"
   cmd: |
     set -e;
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/sl.yaml b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/sl.yaml
index 03e3153..7bc48a4 100644
--- a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/sl.yaml
+++ b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/sl.yaml
@@ -110,7 +110,7 @@
 - description: Install elasticsearch client
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:client' state.sls elasticsearch.client
   node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
+  retry: {count: 2, delay: 30}
   skip_fail: false
 
 - description: Install kibana client
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/underlay.yaml b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/underlay.yaml
index fbbda4b..77b2573 100644
--- a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/underlay.yaml
+++ b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/underlay.yaml
@@ -212,7 +212,7 @@
             address_pool: external-pool01
             dhcp: false
             forward:
-              mode: nat
+              mode: route
 
 
         group_volumes:
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/sl.yaml b/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/sl.yaml
index 0a90afa..c71f82d 100644
--- a/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/sl.yaml
+++ b/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/sl.yaml
@@ -109,7 +109,7 @@
 - description: Install elasticsearch client
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:client' state.sls elasticsearch.client
   node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
+  retry: {count: 2, delay: 30}
   skip_fail: false
 
 - description: Install kibana client
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/underlay.yaml b/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/underlay.yaml
index cffa424..502997f 100644
--- a/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/underlay.yaml
+++ b/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/underlay.yaml
@@ -328,7 +328,7 @@
             address_pool: external-pool01
             dhcp: false
             forward:
-              mode: nat
+              mode: route
 
 
         group_volumes:
diff --git a/tcp_tests/templates/cookied-mcp-pike-dpdk/underlay.yaml b/tcp_tests/templates/cookied-mcp-pike-dpdk/underlay.yaml
index 1097d70..decee43 100644
--- a/tcp_tests/templates/cookied-mcp-pike-dpdk/underlay.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dpdk/underlay.yaml
@@ -178,7 +178,7 @@
             address_pool: external-pool01
             dhcp: true
             forward:
-              mode: nat
+              mode: route
 
 
         group_volumes:
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/salt.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/salt.yaml
index 38c0742..5d00f1b 100644
--- a/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/salt.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/salt.yaml
@@ -20,8 +20,6 @@
 
 {{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
 
-{{ SHARED.ADJUST_SL_OPTS(OVERRIDES_FILENAME='/srv/salt/reclass/classes/cluster/' + SHARED.CLUSTER_NAME + '/stacklight/server.yml') }}
-
 {{ SHARED.MACRO_GENERATE_INVENTORY() }}
 
 {{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/sl.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/sl.yaml
index 3e5f7fb..b34225f 100644
--- a/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/sl.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/sl.yaml
@@ -102,7 +102,7 @@
 - description: Install elasticsearch client
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:client' state.sls elasticsearch.client
   node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
+  retry: {count: 2, delay: 30}
   skip_fail: false
 
 - description: Install kibana client
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/underlay.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/underlay.yaml
index 8b6c716..4c8efd8 100644
--- a/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/underlay.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/underlay.yaml
@@ -152,7 +152,7 @@
             address_pool: external-pool01
             dhcp: true
             forward:
-              mode: nat
+              mode: route
 
 
         group_volumes:
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr/salt.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr/salt.yaml
index d1dfb9e..50c56e1 100644
--- a/tcp_tests/templates/cookied-mcp-pike-dvr/salt.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr/salt.yaml
@@ -20,8 +20,6 @@
 
 {{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
 
-{{ SHARED.ADJUST_SL_OPTS(OVERRIDES_FILENAME='/srv/salt/reclass/classes/cluster/' + SHARED.CLUSTER_NAME + '/stacklight/server.yml') }}
-
 {{ SHARED.MACRO_GENERATE_INVENTORY() }}
 
 {{ SHARED.MACRO_NETWORKING_WORKAROUNDS() }}
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr/sl.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr/sl.yaml
index b75dfe9..9f02e20 100644
--- a/tcp_tests/templates/cookied-mcp-pike-dvr/sl.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr/sl.yaml
@@ -109,7 +109,7 @@
 - description: Install elasticsearch client
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:client' state.sls elasticsearch.client
   node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
+  retry: {count: 2, delay: 30}
   skip_fail: false
 
 - description: Install kibana client
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr/underlay.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr/underlay.yaml
index 9aeabca..d8ed2c1 100644
--- a/tcp_tests/templates/cookied-mcp-pike-dvr/underlay.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr/underlay.yaml
@@ -192,7 +192,7 @@
             address_pool: external-pool01
             dhcp: false
             forward:
-              mode: nat
+              mode: route
 
 
         group_volumes:
diff --git a/tcp_tests/templates/cookied-mcp-pike-ovs/salt.yaml b/tcp_tests/templates/cookied-mcp-pike-ovs/salt.yaml
index 9a39b90..beb16de 100644
--- a/tcp_tests/templates/cookied-mcp-pike-ovs/salt.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-ovs/salt.yaml
@@ -20,8 +20,6 @@
 
 {{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
 
-{{ SHARED.ADJUST_SL_OPTS(OVERRIDES_FILENAME='/srv/salt/reclass/classes/cluster/' + SHARED.CLUSTER_NAME + '/stacklight/server.yml') }}
-
 {{ SHARED.MACRO_GENERATE_INVENTORY() }}
 
 {{ SHARED.MACRO_NETWORKING_WORKAROUNDS() }}
diff --git a/tcp_tests/templates/cookied-mcp-pike-ovs/sl.yaml b/tcp_tests/templates/cookied-mcp-pike-ovs/sl.yaml
index e237aa3..a23b74c 100644
--- a/tcp_tests/templates/cookied-mcp-pike-ovs/sl.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-ovs/sl.yaml
@@ -111,7 +111,7 @@
 - description: Install elasticsearch client
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:client' state.sls elasticsearch.client
   node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
+  retry: {count: 2, delay: 30}
   skip_fail: false
 
 - description: Install kibana client
diff --git a/tcp_tests/templates/cookied-mcp-pike-ovs/underlay.yaml b/tcp_tests/templates/cookied-mcp-pike-ovs/underlay.yaml
index fea38c9..89cf22b 100644
--- a/tcp_tests/templates/cookied-mcp-pike-ovs/underlay.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-ovs/underlay.yaml
@@ -182,7 +182,7 @@
             address_pool: external-pool01
             dhcp: false
             forward:
-              mode: nat
+              mode: route
 
 
         group_volumes:
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-mcp-ovs-dpdk.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-mcp-ovs-dpdk.yaml
index 35bb116..1f2017c 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-mcp-ovs-dpdk.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-mcp-ovs-dpdk.yaml
@@ -11,7 +11,8 @@
 {%- set ENVIRONMENT_CONTEXT_NAMES = ['salt-context-vcp-environment.yaml', 'salt-context-environment.yaml'] %}
 {%- set CONTROL_VLAN = os_env('CONTROL_VLAN', '2416') %}
 {%- set TENANT_VLAN = os_env('TENANT_VLAN', '2417') %}
-
+{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
+{% set SALT_REPOSITORY = os_env('SALT_REPOSITORY', "deb [arch=amd64] http://mirror.mirantis.com/" + REPOSITORY_SUITE+ "/saltstack-2016.3/${DISTRIB_CODENAME} ${DISTRIB_CODENAME} main") %}
 {% import 'shared-salt.yaml' as SHARED with context %}
 
 {{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL(CONTROL_VLAN=CONTROL_VLAN, TENANT_VLAN=TENANT_VLAN) }}
diff --git a/tcp_tests/templates/ironic_standalone/underlay--user-data-ironic.yaml b/tcp_tests/templates/ironic_standalone/underlay--user-data-ironic.yaml
index 23d5e37..b231ced 100644
--- a/tcp_tests/templates/ironic_standalone/underlay--user-data-ironic.yaml
+++ b/tcp_tests/templates/ironic_standalone/underlay--user-data-ironic.yaml
@@ -83,7 +83,7 @@
    - apt-get install -y docker.io gzip uuid-runtime cpio findutils grep gnupg make
    - service docker start
    - git clone https://git.openstack.org/openstack/ironic-python-agent /tmp/ironic-python-agent
-   - cd /tmp/ironic-python-agent/imagebuild/coreos; git checkout stable/newton; make
+   - cd /tmp/ironic-python-agent/imagebuild/coreos; git checkout newton-eol; make
    - cp /tmp/ironic-python-agent/imagebuild/coreos/UPLOAD/coreos_production_pxe_image-oem.cpio.gz /httpboot/
    - cp /tmp/ironic-python-agent/imagebuild/coreos/UPLOAD/coreos_production_pxe.vmlinuz /httpboot/
    - chmod a+r /httpboot/coreos_production_pxe*
diff --git a/tcp_tests/templates/mcp-ocata-local-repo-dvr/sl.yaml b/tcp_tests/templates/mcp-ocata-local-repo-dvr/sl.yaml
index b3a2679..9962efc 100644
--- a/tcp_tests/templates/mcp-ocata-local-repo-dvr/sl.yaml
+++ b/tcp_tests/templates/mcp-ocata-local-repo-dvr/sl.yaml
@@ -102,7 +102,7 @@
 - description: Install elasticsearch client
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:client' state.sls elasticsearch.client
   node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
+  retry: {count: 2, delay: 30}
   skip_fail: false
 
 - description: Install kibana client
diff --git a/tcp_tests/templates/mcp-ocata-local-repo-dvr/underlay.yaml b/tcp_tests/templates/mcp-ocata-local-repo-dvr/underlay.yaml
index d2d4778..9f2ce17 100644
--- a/tcp_tests/templates/mcp-ocata-local-repo-dvr/underlay.yaml
+++ b/tcp_tests/templates/mcp-ocata-local-repo-dvr/underlay.yaml
@@ -142,7 +142,7 @@
             address_pool: external-pool01
             dhcp: true
             forward:
-              mode: nat
+              mode: route
 
         group_volumes:
          - name: cloudimage1604    # This name is used for 'backing_store' option for node volumes.
diff --git a/tcp_tests/templates/physical-mcp-ocata-offline-ovs/salt.yaml b/tcp_tests/templates/physical-mcp-ocata-offline-ovs/salt.yaml
index 04185ea..19cc801 100644
--- a/tcp_tests/templates/physical-mcp-ocata-offline-ovs/salt.yaml
+++ b/tcp_tests/templates/physical-mcp-ocata-offline-ovs/salt.yaml
@@ -20,7 +20,32 @@
   skip_fail: false
 
 - description: Sync all salt resources on master node
-  cmd: salt-call --hard-crash --state-output=mixed --state-verbose=False saltutil.sync_all && sleep 5
+  cmd: sleep 60; salt-call --hard-crash --state-output=mixed --state-verbose=False saltutil.sync_all && sleep 5
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
   skip_fail: false
+
+- description: MaaS auth
+  cmd: maas logout mirantis && maas login mirantis http://localhost:5240/MAAS/api/2.0/ 'FTvqwe7ybBp68gPar2:5mcctTAXVL8mns4ef4:zrA9LZwu2tMc8BAZpsPUfwWwTyQnAtDN'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Set upstream dns in MaaS
+  cmd: sleep 30; maas mirantis maas set-config name=upstream_dns value='10.10.0.15 8.8.8.8 8.8.4.4'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Setup NTP
+  cmd: salt-call --hard-crash --state-output=mixed --state-verbose=False state.sls ntp.server
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Wait until salt is up
+  cmd: sleep 60
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
diff --git a/tcp_tests/templates/physical-mcp-ocata-offline-ovs/underlay--user-data-apt01.yaml b/tcp_tests/templates/physical-mcp-ocata-offline-ovs/underlay--user-data-apt01.yaml
index 6978bd3..838435c 100644
--- a/tcp_tests/templates/physical-mcp-ocata-offline-ovs/underlay--user-data-apt01.yaml
+++ b/tcp_tests/templates/physical-mcp-ocata-offline-ovs/underlay--user-data-apt01.yaml
@@ -48,6 +48,7 @@
    # Node is ready, allow SSH access
    - echo "Allow SSH access ..."
    - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
+   - touch /is_cloud_init_finish
    ########################################################
 
   write_files:
diff --git a/tcp_tests/templates/physical-mcp-ocata-offline-ovs/underlay--user-data-cfg01.yaml b/tcp_tests/templates/physical-mcp-ocata-offline-ovs/underlay--user-data-cfg01.yaml
index 6978bd3..b850283 100644
--- a/tcp_tests/templates/physical-mcp-ocata-offline-ovs/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/physical-mcp-ocata-offline-ovs/underlay--user-data-cfg01.yaml
@@ -19,10 +19,9 @@
 
   bootcmd:
    # Block access to SSH while node is preparing
-   - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
+   - cloud-init-per once sudo echo 'sshd:ALL' >> /etc/hosts.deny
    # Enable root access
    - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
-   - service sshd restart
   output:
     all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
 
@@ -39,6 +38,9 @@
    - echo "/swapfile   none    swap    defaults    0   0" >> /etc/fstab
 
    # Run user data script from config drive
+   - ifdown --force ens3; ifconfig ens3 down; ip a flush dev ens3; rm -f /var/run/network/ifstate.ens3
+   - ifdown --force ens4; ifconfig ens4 down; ip a flush dev ens4; rm -f /var/run/network/ifstate.ens4
+   - ifdown --force ens5; ifconfig ens5 down; ip a flush dev ens5; rm -f /var/run/network/ifstate.ens5
    - cd /root/config-drive && /bin/bash -xe ./user-data
 
    # Enable root access
@@ -47,7 +49,8 @@
    ########################################################
    # Node is ready, allow SSH access
    - echo "Allow SSH access ..."
-   - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
+   - "sed -i -e '/sshd:ALL/d' /etc/hosts.deny"
+   - touch /is_cloud_init_finish
    ########################################################
 
   write_files:
@@ -60,3 +63,13 @@
             ServerAliveCountMax 10
             StrictHostKeyChecking no
             UserKnownHostsFile /dev/null
+
+  ssh_pwauth: True
+  users:
+   - name: root
+     sudo: ALL=(ALL) NOPASSWD:ALL
+     shell: /bin/bash
+     ssh_authorized_keys:
+     {% for key in config.underlay.ssh_keys %}
+      - ssh-rsa {{ key['public'] }}
+     {% endfor %}
diff --git a/tcp_tests/templates/physical-mcp-ocata-offline-ovs/underlay--user-data.yaml b/tcp_tests/templates/physical-mcp-ocata-offline-ovs/underlay--user-data.yaml
index aab7cde..0c365ac 100644
--- a/tcp_tests/templates/physical-mcp-ocata-offline-ovs/underlay--user-data.yaml
+++ b/tcp_tests/templates/physical-mcp-ocata-offline-ovs/underlay--user-data.yaml
@@ -66,6 +66,7 @@
    #- sudo iptables -D INPUT -p tcp --dport 22 -j DROP
    - apt-get install linux-generic-hwe-16.04 -y
    - reboot
+   - touch /is_cloud_init_finish
    ########################################################
 
   write_files:
diff --git a/tcp_tests/templates/physical-mcp-ocata-offline-ovs/underlay.yaml b/tcp_tests/templates/physical-mcp-ocata-offline-ovs/underlay.yaml
index b0568d3..db9c992 100644
--- a/tcp_tests/templates/physical-mcp-ocata-offline-ovs/underlay.yaml
+++ b/tcp_tests/templates/physical-mcp-ocata-offline-ovs/underlay.yaml
@@ -131,7 +131,7 @@
 
     address_pools:
       managment-pool01:
-        net: {{ os_env('MGMT_ADDRESS_POOL01', '10.11.0.0/24:24') }}
+        net: {{ os_env('MGMT_ADDRESS_POOL01', '10.11.0.0/16:16') }}
         params:
           ip_reserved:
             gateway: +1
@@ -173,7 +173,7 @@
             default_{{ HOSTNAME_MTR03 }}: {{ ETH1_IP_ADDRESS_MTR04 }}
 
       admin-pool01:
-        net: {{ os_env('DEPLOY_ADDRESS_POOL01', '10.10.0.0/24:24') }}
+        net: {{ os_env('DEPLOY_ADDRESS_POOL01', '10.10.0.0/16:16') }}
         params:
           ip_reserved:
             gateway: +1
diff --git a/tcp_tests/templates/runtest.yml b/tcp_tests/templates/runtest.yml
index 573bd54..c1f1599 100644
--- a/tcp_tests/templates/runtest.yml
+++ b/tcp_tests/templates/runtest.yml
@@ -31,9 +31,6 @@
         max_microversion: 2.53
         min_compute_nodes: 2
         volume_device_name: 'vdc'
-      orchestration:
-        max_template_size: 5440000
-        max_resources_per_stack: 20000
       dns_feature_enabled:
         # Switch this to designate_admin_api_enabled once [1] is promoted to stable packages
         # [1] https://gerrit.mcp.mirantis.net/gitweb?p=salt-formulas/designate.git;a=commit;h=96a3f43f6cf1149559e54a00b5548bdf46333749
@@ -43,11 +40,6 @@
         api_v2_quotas: true
         api_v2_root_recordsets: true
         bug_1573141_fixed: true
-      volume-feature-enabled:
-        backup: false
-      volume:
-        storage_protocol: iSCSI
-        build_timeout: 300
       share:
         min_api_microversion: 2.0
         max_api_microversion: 2.40
diff --git a/tcp_tests/templates/shared-salt.yaml b/tcp_tests/templates/shared-salt.yaml
index 0a50d04..898433f 100644
--- a/tcp_tests/templates/shared-salt.yaml
+++ b/tcp_tests/templates/shared-salt.yaml
@@ -21,7 +21,7 @@
 {% set UBUNTU_REPOSITORY = os_env('UBUNTU_REPOSITORY', "deb [arch=amd64] http://mirror.mirantis.com/" + REPOSITORY_SUITE + "/ubuntu/ ${DISTRIB_CODENAME} main restricted universe") %}
 {% set UBUNTU_UPDATES_REPOSITORY = os_env('UBUNTU_UPDATES_REPOSITORY', "deb [arch=amd64] http://mirror.mirantis.com/" + REPOSITORY_SUITE + "/ubuntu/ ${DISTRIB_CODENAME}-updates main restricted universe") %}
 {% set UBUNTU_SECURITY_REPOSITORY = os_env('UBUNTU_SECURITY_REPOSITORY', "deb [arch=amd64] http://mirror.mirantis.com/" + REPOSITORY_SUITE + "/ubuntu/ ${DISTRIB_CODENAME}-security main restricted universe") %}
-{% set UBUNTU_KEY_SERVER = os_env('REPOSITORY_SUITE', 'keyserver.ubuntu') %}
+{% set UBUNTU_KEY_SERVER = os_env('UBUNTU_KEY_SERVER', 'keyserver.ubuntu.com') %}
 {% set UBUNTU_KEY_ID = os_env('UBUNTU_KEY_ID', '0E08A149DE57BFBE') %}
 
 {# Address pools for reclass cluster model are taken in the following order:
@@ -1058,6 +1058,8 @@
 - description: Include class with tempest template into cfg node
   cmd: |
     sed -i 's/classes\:/classes\:\n- cluster.{{ CLUSTER_NAME }}.infra.runtest/g' /srv/salt/reclass/nodes/_generated/cfg01.{{ DOMAIN_NAME }}.yml;
+    git clone https://gerrit.mcp.mirantis.net/salt-formulas/runtest;
+    cd /root/runtest && git checkout 2468b1f1008ba516fda31e00e588de71447b6fa7 && make install;
     salt 'cfg01*' saltutil.refresh_pillar;
     salt 'cfg01*' saltutil.sync_all;
   node_name: {{ HOSTNAME_CFG01 }}
@@ -1092,16 +1094,6 @@
   retry: {count: 1, delay: 5}
   skip_fail: false
 
-- description: Temp WR for PROD-19442
-  cmd: |
-    apt-get install crudini -y;
-    crudini --verbose --set /root/test/tempest.conf validation connect_method floating;
-    crudini --verbose --set /root/test/tempest.conf validation run_validation  True;
-    crudini --verbose --set /root/test/tempest.conf validation image_ssh_user cirros;
-  node_name: {{ HOSTNAME_GTW01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: true
-
 - description: Run tempest from new docker image
   cmd: |
     docker run -e ARGS="-r {{TEMPEST_PATTERN }} -w 4 {{ EXCLUDE_TEST_ARGS }}" -v /root/test/tempest.conf:/etc/tempest/tempest.conf -v /tmp/:/tmp/ -v /root/test:/root/tempest -v /etc/ssl/certs/:/etc/ssl/certs/ --rm docker-prod-virtual.docker.mirantis.net/mirantis/cicd/ci-tempest /bin/bash -c "run-tempest"
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ceph-offline/underlay.yaml b/tcp_tests/templates/virtual-mcp-ocata-ceph-offline/underlay.yaml
index 49445ae..1fd9b17 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-ceph-offline/underlay.yaml
+++ b/tcp_tests/templates/virtual-mcp-ocata-ceph-offline/underlay.yaml
@@ -165,7 +165,7 @@
             address_pool: external-pool01
             dhcp: true
             forward:
-              mode: nat
+              mode: route
 
         group_volumes:
          - name: cloudimage1604    # This name is used for 'backing_store' option for node volumes.
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr/sl.yaml b/tcp_tests/templates/virtual-mcp-ocata-dvr/sl.yaml
index eff861b..ff0e77a 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-dvr/sl.yaml
+++ b/tcp_tests/templates/virtual-mcp-ocata-dvr/sl.yaml
@@ -102,7 +102,7 @@
 - description: Install elasticsearch client
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:client' state.sls elasticsearch.client
   node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
+  retry: {count: 2, delay: 30}
   skip_fail: false
 
 - description: Install kibana client
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr/underlay.yaml b/tcp_tests/templates/virtual-mcp-ocata-dvr/underlay.yaml
index b9e03aa..de5427a 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-dvr/underlay.yaml
+++ b/tcp_tests/templates/virtual-mcp-ocata-dvr/underlay.yaml
@@ -162,7 +162,7 @@
             address_pool: external-pool01
             dhcp: true
             forward:
-              mode: nat
+              mode: route
 
 
         group_volumes:
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/underlay.yaml b/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/underlay.yaml
index c7aecc5..75fffe4 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/underlay.yaml
+++ b/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/underlay.yaml
@@ -162,7 +162,7 @@
             address_pool: external-pool01
             dhcp: true
             forward:
-              mode: nat
+              mode: route
 
 
         group_volumes:
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ovs/sl.yaml b/tcp_tests/templates/virtual-mcp-ocata-ovs/sl.yaml
index f581357..9ec64be 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-ovs/sl.yaml
+++ b/tcp_tests/templates/virtual-mcp-ocata-ovs/sl.yaml
@@ -101,7 +101,7 @@
 - description: Install elasticsearch client
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:client' state.sls elasticsearch.client
   node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
+  retry: {count: 2, delay: 30}
   skip_fail: false
 
 - description: Install kibana client
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ovs/underlay.yaml b/tcp_tests/templates/virtual-mcp-ocata-ovs/underlay.yaml
index 895ee4a..382dba4 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-ovs/underlay.yaml
+++ b/tcp_tests/templates/virtual-mcp-ocata-ovs/underlay.yaml
@@ -152,7 +152,7 @@
             address_pool: external-pool01
             dhcp: true
             forward:
-              mode: nat
+              mode: route
 
 
         group_volumes:
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/underlay.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/underlay.yaml
index 483940e..08c87ef 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/underlay.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/underlay.yaml
@@ -177,7 +177,7 @@
             address_pool: external-pool01
             dhcp: true
             forward:
-              mode: nat
+              mode: route
 
 
         group_volumes:
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl-barbican/openstack.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-ssl-barbican/openstack.yaml
index c43144a..17819f4 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl-barbican/openstack.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr-ssl-barbican/openstack.yaml
@@ -102,7 +102,6 @@
   retry: {count: 3, delay: 5}
   skip_fail: false
 
-
 - description: Install cinder
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False
     -C 'I@cinder:controller' state.sls cinder -b 1
@@ -290,79 +289,6 @@
   retry: {count: 1, delay: 30}
   skip_fail: false
 
-# Configure cinder-volume salt-call PROD-13167
-- description: Set disks 01
-  cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
-  node_name: {{ HOSTNAME_CTL01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Set disks 02
-  cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
-  node_name: {{ HOSTNAME_CTL02 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Set disks 03
-  cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
-  node_name: {{ HOSTNAME_CTL03 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Create partitions 01
-  cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
-  node_name: {{ HOSTNAME_CTL01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Create partitions 02
-  cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
-  node_name: {{ HOSTNAME_CTL02 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Create partitions 03
-  cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
-  node_name: {{ HOSTNAME_CTL03 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: create volume_group
-  cmd: salt "ctl*" cmd.run 'vgcreate cinder-volumes /dev/vdb1'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Install cinder-volume
-  cmd: salt 'ctl*' cmd.run 'apt-get install cinder-volume -y'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Install crudini
-  cmd: salt "ctl*" cmd.run 'apt-get install crudini -y'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Temporary WR set enabled backends value 01
-  cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
-  node_name: {{ HOSTNAME_CTL01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Temporary WR set enabled backends value 02
-  cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
-  node_name: {{ HOSTNAME_CTL02 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Temporary WR set enabled backends value 03
-  cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
-  node_name: {{ HOSTNAME_CTL03 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
 - description: Install docker.io on gtw
   cmd: salt-call cmd.run 'apt-get install docker.io -y'
   node_name: {{ HOSTNAME_GTW01 }}
@@ -375,25 +301,6 @@
   retry: {count: 1, delay: 30}
   skip_fail: false
 
-- description: Restart cinder volume
-  cmd: |
-    salt -C 'I@cinder:controller' service.restart cinder-volume;
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 5}
-  skip_fail: false
-
-- description: create rc file on cfg
-  cmd: scp ctl01:/root/keystonercv3 /root
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Copy rc file
-  cmd: scp /root/keystonercv3 gtw01:/root
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
 - description: Install manila-api on first node
   cmd: |
     salt -C 'I@manila:api and *01*' state.sls manila.api;
@@ -440,7 +347,7 @@
   cmd: |
     salt 'ctl01*' cmd.run '. /root/keystonercv3; manila type-create default false --create_share_from_snapshot_support true --revert_to_snapshot_support true --mount_snapshot_support true --snapshot_support true --is_public true'
   node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
+  retry: {count: 3, delay: 5}
   skip_fail: false
 
 - description: Create CIFS and NFS share and check it status
@@ -450,7 +357,7 @@
     sleep 5;
     salt 'ctl01*' cmd.run '. /root/keystonercv3; manila list';
   node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
+  retry: {count: 3, delay: 5}
   skip_fail: false
 
 {{ SHARED.RUN_NEW_TEMPEST() }}
\ No newline at end of file
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl-barbican/underlay.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-ssl-barbican/underlay.yaml
index 5db14e1..60aa2c8 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl-barbican/underlay.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr-ssl-barbican/underlay.yaml
@@ -152,7 +152,7 @@
             address_pool: external-pool01
             dhcp: true
             forward:
-              mode: nat
+              mode: route
 
 
         group_volumes:
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/openstack.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/openstack.yaml
index fab3ece..ed3bd67 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/openstack.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/openstack.yaml
@@ -218,23 +218,6 @@
   retry: {count: 10, delay: 30}
   skip_fail: false
 
-
-  # Upload cirros image
-
-- description: Upload cirros image on ctl01
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-    'wget http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-i386-disk.img'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 30}
-  skip_fail: false
-
-- description: Register image in glance
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-    '. /root/keystonercv3; glance --timeout 120 image-create --name cirros --visibility public --disk-format qcow2 --container-format bare --progress < /root/cirros-0.3.4-i386-disk.img'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
 - description: Create net04_external
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
     '. /root/keystonercv3; neutron net-create net04_ext --router:external True --provider:physical_network physnet1 --provider:network_type flat'
@@ -284,20 +267,6 @@
   retry: {count: 1, delay: 30}
   skip_fail: false
 
-#- description:  Allow all tcp
-#  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-#    '. /root/keystonercv3; nova secgroup-add-rule default tcp 1 65535 0.0.0.0/0'
-#  node_name: {{ HOSTNAME_CFG01 }}
-#  retry: {count: 1, delay: 30}
-#  skip_fail: false
-#
-#- description:  Allow all icmp
-#  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-#    '. /root/keystonercv3; nova secgroup-add-rule default icmp -1 -1 0.0.0.0/0'
-#  node_name: {{ HOSTNAME_CFG01 }}
-#  retry: {count: 1, delay: 30}
-#  skip_fail: false
-
 - description: sync time
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run
     'service ntp stop; ntpd -gq;  service ntp start'
@@ -305,79 +274,6 @@
   retry: {count: 1, delay: 30}
   skip_fail: false
 
-# Configure cinder-volume salt-call PROD-13167
-- description: Set disks 01
-  cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
-  node_name: {{ HOSTNAME_CTL01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Set disks 02
-  cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
-  node_name: {{ HOSTNAME_CTL02 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Set disks 03
-  cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
-  node_name: {{ HOSTNAME_CTL03 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Create partitions 01
-  cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
-  node_name: {{ HOSTNAME_CTL01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Create partitions 02
-  cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
-  node_name: {{ HOSTNAME_CTL02 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Create partitions 03
-  cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
-  node_name: {{ HOSTNAME_CTL03 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: create volume_group
-  cmd: salt "ctl*" cmd.run 'vgcreate cinder-volumes /dev/vdb1'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Install cinder-volume
-  cmd: salt 'ctl*' cmd.run 'apt-get install cinder-volume -y'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Install crudini
-  cmd: salt "ctl*" cmd.run 'apt-get install crudini -y'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Temporary WR set enabled backends value 01
-  cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
-  node_name: {{ HOSTNAME_CTL01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Temporary WR set enabled backends value 02
-  cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
-  node_name: {{ HOSTNAME_CTL02 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Temporary WR set enabled backends value 03
-  cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
-  node_name: {{ HOSTNAME_CTL03 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
 - description: Install docker.io on gtw
   cmd: salt-call cmd.run 'apt-get install docker.io -y'
   node_name: {{ HOSTNAME_GTW01 }}
@@ -390,25 +286,6 @@
   retry: {count: 1, delay: 30}
   skip_fail: false
 
-- description: Restart cinder volume
-  cmd: |
-    salt -C 'I@cinder:controller' service.restart cinder-volume;
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 5}
-  skip_fail: false
-
-- description: create rc file on cfg
-  cmd: scp ctl01:/root/keystonercv3 /root
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Copy rc file
-  cmd: scp /root/keystonercv3 gtw01:/root
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
 - description: Install manila-api on first node
   cmd: |
     salt -C 'I@manila:api and *01*' state.sls manila.api;
@@ -455,7 +332,7 @@
   cmd: |
     salt 'ctl01*' cmd.run '. /root/keystonercv3; manila type-create default false --create_share_from_snapshot_support true --revert_to_snapshot_support true --mount_snapshot_support true --snapshot_support true --is_public true'
   node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
+  retry: {count: 3, delay: 5}
   skip_fail: false
 
 - description: Create CIFS and NFS share and check it status
@@ -465,7 +342,7 @@
     sleep 5;
     salt 'ctl01*' cmd.run '. /root/keystonercv3; manila list';
   node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
+  retry: {count: 3, delay: 5}
   skip_fail: false
 
 {{ SHARED.RUN_NEW_TEMPEST() }}
\ No newline at end of file
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/underlay.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/underlay.yaml
index 380ac59..aaf67ba 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/underlay.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/underlay.yaml
@@ -152,7 +152,7 @@
             address_pool: external-pool01
             dhcp: true
             forward:
-              mode: nat
+              mode: route
 
 
         group_volumes:
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr/openstack.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr/openstack.yaml
index adb7336..cd2ab78 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr/openstack.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr/openstack.yaml
@@ -211,23 +211,6 @@
   retry: {count: 10, delay: 30}
   skip_fail: false
 
-
-  # Upload cirros image
-
-- description: Upload cirros image on ctl01
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-    'wget http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-i386-disk.img'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 30}
-  skip_fail: false
-
-- description: Register image in glance
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-    '. /root/keystonercv3; glance --timeout 120 image-create --name cirros --visibility public --disk-format qcow2 --container-format bare --progress < /root/cirros-0.3.4-i386-disk.img'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
 - description: Create net04_external
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
     '. /root/keystonercv3; neutron net-create net04_ext --router:external True --provider:physical_network physnet1 --provider:network_type flat'
@@ -251,7 +234,7 @@
 
 - description: Create subnet_net04
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-    '. /root/keystonercv3; neutron subnet-create net04 {{ IPV4_NET_TENANT_PREFIX }}.0/24 --name net04__subnet --allocation-pool start={{ IPV4_NET_TENANT_PREFIX }}.120,end={{ IPV4_NET_TENANT_PREFIX }}.240'
+    '. /root/keystonercv3; neutron subnet-create net04 {{ IPV4_NET_TENANT_PREFIX }}.0/24 --name net04__subnet --allocation-pool start={{ IPV4_NET_TENANT_PREFIX }}.120,end={{ IPV4_NET_TENANT_PREFIX }}.180'
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 30}
   skip_fail: false
@@ -277,20 +260,6 @@
   retry: {count: 1, delay: 30}
   skip_fail: false
 
-#- description:  Allow all tcp
-#  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-#    '. /root/keystonercv3; nova secgroup-add-rule default tcp 1 65535 0.0.0.0/0'
-#  node_name: {{ HOSTNAME_CFG01 }}
-#  retry: {count: 1, delay: 30}
-#  skip_fail: false
-#
-#- description:  Allow all icmp
-#  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-#    '. /root/keystonercv3; nova secgroup-add-rule default icmp -1 -1 0.0.0.0/0'
-#  node_name: {{ HOSTNAME_CFG01 }}
-#  retry: {count: 1, delay: 30}
-#  skip_fail: false
-
 - description: sync time
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run
     'service ntp stop; ntpd -gq;  service ntp start'
@@ -298,79 +267,6 @@
   retry: {count: 1, delay: 30}
   skip_fail: false
 
-# Configure cinder-volume salt-call PROD-13167
-- description: Set disks 01
-  cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
-  node_name: {{ HOSTNAME_CTL01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Set disks 02
-  cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
-  node_name: {{ HOSTNAME_CTL02 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Set disks 03
-  cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
-  node_name: {{ HOSTNAME_CTL03 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Create partitions 01
-  cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
-  node_name: {{ HOSTNAME_CTL01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Create partitions 02
-  cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
-  node_name: {{ HOSTNAME_CTL02 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Create partitions 03
-  cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
-  node_name: {{ HOSTNAME_CTL03 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: create volume_group
-  cmd: salt "ctl*" cmd.run 'vgcreate cinder-volumes /dev/vdb1'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Install cinder-volume
-  cmd: salt 'ctl*' cmd.run 'apt-get install cinder-volume -y'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Install crudini
-  cmd: salt "ctl*" cmd.run 'apt-get install crudini -y'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Temporary WR set enabled backends value 01
-  cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
-  node_name: {{ HOSTNAME_CTL01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Temporary WR set enabled backends value 02
-  cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
-  node_name: {{ HOSTNAME_CTL02 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Temporary WR set enabled backends value 03
-  cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
-  node_name: {{ HOSTNAME_CTL03 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
 - description: Install docker.io on gtw
   cmd: salt-call cmd.run 'apt-get install docker.io -y'
   node_name: {{ HOSTNAME_GTW01 }}
@@ -383,25 +279,6 @@
   retry: {count: 1, delay: 30}
   skip_fail: false
 
-- description: Restart cinder volume
-  cmd: |
-    salt -C 'I@cinder:controller' service.restart cinder-volume;
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 5}
-  skip_fail: false
-
-- description: create rc file on cfg
-  cmd: scp ctl01:/root/keystonercv3 /root
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Copy rc file
-  cmd: scp /root/keystonercv3 gtw01:/root
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
 - description: Install manila-api on first node
   cmd: |
     salt -C 'I@manila:api and *01*' state.sls manila.api;
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr/salt.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr/salt.yaml
index 056176c..6d5bcb3 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr/salt.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr/salt.yaml
@@ -14,7 +14,7 @@
 
 {{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
 
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "powerdns" "fluentd" "backupninja" "runtest"') }}
+{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "powerdns" "fluentd" "backupninja" "runtest" "neutron" ') }}
 
 {{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
 
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr/underlay.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr/underlay.yaml
index 8d6a20d..e1914b4 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr/underlay.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr/underlay.yaml
@@ -152,7 +152,7 @@
             address_pool: external-pool01
             dhcp: true
             forward:
-              mode: nat
+              mode: route
 
 
         group_volumes:
diff --git a/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/underlay.yaml b/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/underlay.yaml
index 44559f9..1ce697b 100644
--- a/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/underlay.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/underlay.yaml
@@ -162,7 +162,7 @@
             address_pool: external-pool01
             dhcp: true
             forward:
-              mode: nat
+              mode: route
 
 
         group_volumes:
diff --git a/tcp_tests/templates/virtual-mcp-pike-ovs-l2gw-bgpvpn/underlay.yaml b/tcp_tests/templates/virtual-mcp-pike-ovs-l2gw-bgpvpn/underlay.yaml
index 2ea28a8..578c2ad 100644
--- a/tcp_tests/templates/virtual-mcp-pike-ovs-l2gw-bgpvpn/underlay.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-ovs-l2gw-bgpvpn/underlay.yaml
@@ -158,7 +158,7 @@
             address_pool: external-pool01
             dhcp: true
             forward:
-              mode: nat
+              mode: route
 
 
         group_volumes:
diff --git a/tcp_tests/templates/virtual-mcp-pike-ovs/openstack.yaml b/tcp_tests/templates/virtual-mcp-pike-ovs/openstack.yaml
index 65013e7..3f15080 100644
--- a/tcp_tests/templates/virtual-mcp-pike-ovs/openstack.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-ovs/openstack.yaml
@@ -168,13 +168,6 @@
   retry: {count: 1, delay: 5}
   skip_fail: false
 
-#- description: Install gnocchi statsd (optional)
-#  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@gnocchi:statsd and *01*' state.sls gnocchi.statsd &&
-#       salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@gnocchi:statsd' state.sls gnocchi.statsd
-#  node_name: {{ HOSTNAME_CFG01 }}
-#  retry: {count: 1, delay: 5}
-#  skip_fail: false
-
 - description: Install panko server
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@panko:server and *01*' state.sls panko &&
        salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@panko:server' state.sls panko
@@ -216,7 +209,6 @@
   retry: {count: 1, delay: 5}
   skip_fail: true
 
-
 # Install compute node
 
 - description: Apply formulas for compute node
@@ -261,7 +253,7 @@
 
 - description: Create subnet_net04
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-    '. /root/keystonercv3; neutron subnet-create net04 {{ IPV4_NET_TENANT_PREFIX }}.0/24 --name net04__subnet --allocation-pool start={{ IPV4_NET_TENANT_PREFIX }}.120,end={{ IPV4_NET_TENANT_PREFIX }}.240'
+    '. /root/keystonercv3; neutron subnet-create net04 {{ IPV4_NET_TENANT_PREFIX }}.0/24 --name net04__subnet --allocation-pool start={{ IPV4_NET_TENANT_PREFIX }}.120,end={{ IPV4_NET_TENANT_PREFIX }}.180'
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 30}
   skip_fail: false
@@ -287,20 +279,6 @@
   retry: {count: 1, delay: 30}
   skip_fail: false
 
-#- description:  Allow all tcp
-#  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-#    '. /root/keystonercv3; openstack security group rule create --proto tcp --dst-port 22 default'
-#  node_name: {{ HOSTNAME_CFG01 }}
-#  retry: {count: 1, delay: 30}
-#  skip_fail: false
-#
-#- description:  Allow all icmp
-#  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-#    '. /root/keystonercv3; openstack security group rule create --proto icmp default'
-#  node_name: {{ HOSTNAME_CFG01 }}
-#  retry: {count: 1, delay: 30}
-#  skip_fail: false
-
 - description: sync time
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run
     'service ntp stop; ntpd -gq;  service ntp start'
@@ -308,86 +286,6 @@
   retry: {count: 1, delay: 30}
   skip_fail: false
 
-# Configure cinder-volume salt-call
-- description: Set disks 01
-  cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
-  node_name: {{ HOSTNAME_CTL01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Set disks 02
-  cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
-  node_name: {{ HOSTNAME_CTL02 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Set disks 03
-  cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
-  node_name: {{ HOSTNAME_CTL03 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Create partitions 01
-  cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
-  node_name: {{ HOSTNAME_CTL01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Create partitions 02
-  cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
-  node_name: {{ HOSTNAME_CTL02 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Create partitions 03
-  cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
-  node_name: {{ HOSTNAME_CTL03 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: create volume_group
-  cmd: salt "ctl*" cmd.run 'vgcreate cinder-volumes /dev/vdb1'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Install cinder-volume
-  cmd: salt 'ctl*' cmd.run 'apt-get install cinder-volume -y'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Install crudini
-  cmd: salt "ctl*" cmd.run 'apt-get install crudini -y'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Temporary WR set enabled backends value 01
-  cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
-  node_name: {{ HOSTNAME_CTL01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Temporary WR set enabled backends value 02
-  cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
-  node_name: {{ HOSTNAME_CTL02 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Temporary WR set enabled backends value 03
-  cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
-  node_name: {{ HOSTNAME_CTL03 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Restart cinder volume
-  cmd: |
-    salt -C 'I@cinder:controller' service.restart cinder-volume;
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 5}
-  skip_fail: false
-
 - description: Install docker.io on gtw
   cmd: salt-call cmd.run 'apt-get install docker.io -y'
   node_name: {{ HOSTNAME_GTW01 }}
@@ -400,18 +298,6 @@
   retry: {count: 1, delay: 30}
   skip_fail: false
 
-- description: create rc file on cfg
-  cmd: scp ctl01:/root/keystonercv3 /root
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Copy rc file
-  cmd: scp /root/keystonercv3 gtw01:/root
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
 - description: Install manila-api on first node
   cmd: |
     salt -C 'I@manila:api and *01*' state.sls manila.api;
diff --git a/tcp_tests/templates/virtual-mcp-pike-ovs/salt.yaml b/tcp_tests/templates/virtual-mcp-pike-ovs/salt.yaml
index 730e05f..d1d05d9 100644
--- a/tcp_tests/templates/virtual-mcp-pike-ovs/salt.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-ovs/salt.yaml
@@ -14,7 +14,7 @@
 
 {{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
 
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "fluentd" "backupninja" "runtest"') }}
+{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "fluentd" "backupninja" "runtest" "neutron" ') }}
 
 {{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
 
diff --git a/tcp_tests/templates/virtual-mcp-pike-ovs/underlay.yaml b/tcp_tests/templates/virtual-mcp-pike-ovs/underlay.yaml
index 8c45b68..816f9b1 100644
--- a/tcp_tests/templates/virtual-mcp-pike-ovs/underlay.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-ovs/underlay.yaml
@@ -157,7 +157,7 @@
             address_pool: external-pool01
             dhcp: true
             forward:
-              mode: nat
+              mode: route
 
 
         group_volumes:
diff --git a/tcp_tests/templates/virtual-mcp-sl-os/sl.yaml b/tcp_tests/templates/virtual-mcp-sl-os/sl.yaml
index e5c1ab2..dbbc9bc 100644
--- a/tcp_tests/templates/virtual-mcp-sl-os/sl.yaml
+++ b/tcp_tests/templates/virtual-mcp-sl-os/sl.yaml
@@ -101,7 +101,7 @@
 - description: Install elasticsearch client
   cmd: salt -C 'I@elasticsearch:client' state.sls elasticsearch.client
   node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
+  retry: {count: 2, delay: 30}
   skip_fail: false
 
 - description: Install kibana client
diff --git a/tcp_tests/templates/virtual-mcp-sl-os/underlay.yaml b/tcp_tests/templates/virtual-mcp-sl-os/underlay.yaml
index d4e064f..0782243 100644
--- a/tcp_tests/templates/virtual-mcp-sl-os/underlay.yaml
+++ b/tcp_tests/templates/virtual-mcp-sl-os/underlay.yaml
@@ -152,7 +152,7 @@
             address_pool: external-pool01
             dhcp: true
             forward:
-              mode: nat
+              mode: route
 
 
         group_volumes:
diff --git a/tcp_tests/templates/virtual-mcp11-dvr/underlay.yaml b/tcp_tests/templates/virtual-mcp11-dvr/underlay.yaml
index 7a37142..51f1b64 100644
--- a/tcp_tests/templates/virtual-mcp11-dvr/underlay.yaml
+++ b/tcp_tests/templates/virtual-mcp11-dvr/underlay.yaml
@@ -137,7 +137,7 @@
             address_pool: external-pool01
             dhcp: true
             forward:
-              mode: nat
+              mode: route
 
 
         group_volumes:
diff --git a/tcp_tests/templates/virtual-mcp11-k8s-calico/sl.yaml b/tcp_tests/templates/virtual-mcp11-k8s-calico/sl.yaml
index 831cded..0ee58bc 100644
--- a/tcp_tests/templates/virtual-mcp11-k8s-calico/sl.yaml
+++ b/tcp_tests/templates/virtual-mcp11-k8s-calico/sl.yaml
@@ -87,7 +87,7 @@
 - description: Install elasticsearch client
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:client' state.sls elasticsearch.client
   node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
+  retry: {count: 2, delay: 30}
   skip_fail: false
 
 - description: Install kibana client
diff --git a/tcp_tests/templates/virtual-mcp11-k8s-contrail/sl.yaml b/tcp_tests/templates/virtual-mcp11-k8s-contrail/sl.yaml
index a2cb88e..07f156b 100644
--- a/tcp_tests/templates/virtual-mcp11-k8s-contrail/sl.yaml
+++ b/tcp_tests/templates/virtual-mcp11-k8s-contrail/sl.yaml
@@ -110,7 +110,7 @@
 - description: Install elasticsearch client
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:client' state.sls elasticsearch.client
   node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
+  retry: {count: 2, delay: 30}
   skip_fail: false
 
 - description: Install kibana client
diff --git a/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/underlay.yaml b/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/underlay.yaml
index 54f48f4..dabe708 100644
--- a/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/underlay.yaml
+++ b/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/underlay.yaml
@@ -143,7 +143,7 @@
             address_pool: external-pool01
             dhcp: true
             forward:
-              mode: nat
+              mode: route
 
         group_volumes:
          - name: cloudimage1604    # This name is used for 'backing_store' option for node volumes.
diff --git a/tcp_tests/templates/virtual-offline-pike-ovs/underlay.yaml b/tcp_tests/templates/virtual-offline-pike-ovs/underlay.yaml
index e473292..569a96c 100644
--- a/tcp_tests/templates/virtual-offline-pike-ovs/underlay.yaml
+++ b/tcp_tests/templates/virtual-offline-pike-ovs/underlay.yaml
@@ -140,7 +140,7 @@
             address_pool: external-pool01
             dhcp: true
             forward:
-              mode: nat
+              mode: route
 
         group_volumes:
          - name: cloudimage1604    # This name is used for 'backing_store' option for node volumes.
diff --git a/tcp_tests/templates/virtual-offline-ssl/underlay.yaml b/tcp_tests/templates/virtual-offline-ssl/underlay.yaml
index 47b1c57..18c2bf1 100644
--- a/tcp_tests/templates/virtual-offline-ssl/underlay.yaml
+++ b/tcp_tests/templates/virtual-offline-ssl/underlay.yaml
@@ -160,7 +160,7 @@
             address_pool: external-pool01
             dhcp: true
             forward:
-              mode: nat
+              mode: route
 
 
         group_volumes:
diff --git a/tcp_tests/templates/virtual-pike-ovs-dpdk/underlay.yaml b/tcp_tests/templates/virtual-pike-ovs-dpdk/underlay.yaml
index a5673e2..25cfbc4 100644
--- a/tcp_tests/templates/virtual-pike-ovs-dpdk/underlay.yaml
+++ b/tcp_tests/templates/virtual-pike-ovs-dpdk/underlay.yaml
@@ -178,7 +178,7 @@
             address_pool: external-pool01
             dhcp: true
             forward:
-              mode: nat
+              mode: route
 
 
         group_volumes:
diff --git a/tcp_tests/tests/system/test_install_mcp_ovs_pike.py b/tcp_tests/tests/system/test_install_mcp_ovs_pike.py
index 59e348b..53dc1c1 100644
--- a/tcp_tests/tests/system/test_install_mcp_ovs_pike.py
+++ b/tcp_tests/tests/system/test_install_mcp_ovs_pike.py
@@ -260,3 +260,18 @@
             )
             openstack_actions.download_tempest_report()
         LOG.info("*************** DONE **************")
+
+    @pytest.mark.fail_snapshot
+    def test_bm_deploy(self, config, openstack_deployed,
+                       openstack_actions):
+        """Test for deploying an mcp environment on baremetal
+
+        """
+        openstack_actions._salt.local(
+            tgt='*', fun='cmd.run',
+            args='service ntp stop; ntpd -gq; service ntp start')
+
+        if settings.RUN_TEMPEST:
+            openstack_actions.run_tempest(pattern=settings.PATTERN)
+            openstack_actions.download_tempest_report()
+        LOG.info("*************** DONE **************")
diff --git a/tcp_tests/tests/system/test_offline.py b/tcp_tests/tests/system/test_offline.py
index e94188a..51757fd 100644
--- a/tcp_tests/tests/system/test_offline.py
+++ b/tcp_tests/tests/system/test_offline.py
@@ -12,9 +12,15 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 # import pytest
+import time
+
+from collections import Counter
 
 from tcp_tests import logger
 from tcp_tests.managers.jenkins.client import JenkinsClient
+from tcp_tests import settings
+
+from tcp_tests import managers
 
 LOG = logger.logger
 
@@ -22,119 +28,222 @@
 class TestOfflineDeployment(object):
     """docstring for TestOfflineDeployment"""
 
-    def test_deploy_day1(self, show_step, underlay, common_services_deployed,
-                         salt_deployed):
+    def test_deploy_day1(self, show_step, config, underlay, hardware,
+                         common_services_deployed, salt_deployed):
         """Test for deploying an mcp from day01 images
 
         Scenario:
-            1. Approve local ssh key to jenkins
-            2. Boot CFG and APT virtual machines
-            3. Setup jq
-            4. Wait salt master
-            5. Addition config of MaaS
-            6. Wait dhcpd server
-            7. Start comissioning node via MaaS
-            8. Wait of comissioning node by MaaS
-            9. Start deploing node via MaaS
-            10. Wait of deploing node by
-            11. Accept all keys
-            12. Run deploy OS job
+            1. Wait salt master
+            2. Addition config of MaaS
+            3. Wait dhcpd server
+            4. Start comissioning node via MaaS
+            5. Wait of comissioning node by MaaS
+            6. Start deploing node via MaaS
+            7. Wait of deploing node by
+            8. Accept all keys
+            9. Configure and baremetal nodes after MaaS deployment
+            10. Run deploy OS job
 
         """
         # group = hardware._get_default_node_group()
         nodes = underlay.node_names()
         LOG.info("Nodes - {}".format(nodes))
         cfg_node = 'cfg01.offline-ocata-vxlan.local'
+        tempest_node = 'gtw01.offline-ocata-vxlan.local'
         verbose = True
+        ssh_test_key = config.underlay.ssh_keys[0]['public']
 
-        # show_step(1)
-        # cmd = ("mkdir -p /var/lib/jenkins/.ssh && "
-        #        "ssh-keyscan cfg01 > /var/lib/jenkins/.ssh/known_hosts && "
-        #        "chown jenkins /var/lib/jenkins/.ssh/known_hosts")
+        show_step(1)
+        underlay.check_call(
+            node_name=cfg_node,
+            verbose=verbose,
+            cmd="""timeout 300s /bin/bash -c """
+                """'while ! salt-call test.ping; do """
+                """echo "salt master still isnt running"; sleep 10; done'"""
+        )  # noqa
+
+        show_step(2)
+        underlay.check_call(
+            node_name=cfg_node,
+            verbose=verbose,
+            cmd='salt-call saltutil.sync_all')
+
+        underlay.check_call(
+            node_name=cfg_node,
+            verbose=verbose,
+            cmd="salt '*' ssh.set_auth_key root '{}'".format(ssh_test_key))
+        underlay.check_call(
+            node_name=cfg_node,
+            verbose=verbose,
+            cmd='salt "*" ssh.set_auth_key root '
+                '"$(ssh-keygen -y -f ~/.ssh/id_rsa | cut -d " " -f 2)"')
+        underlay.check_call(
+            node_name=cfg_node,
+            verbose=verbose,
+            cmd="salt '*' ssh.set_auth_key ubuntu '{}'".format(ssh_test_key))
+        underlay.check_call(
+            node_name=cfg_node,
+            verbose=verbose,
+            cmd='salt "*" ssh.set_auth_key ubuntu '
+                '"$(ssh-keygen -y -f ~/.ssh/id_rsa | cut -d " " -f 2)"')
+
+        underlay.check_call(
+            node_name=cfg_node,
+            verbose=verbose,
+            cmd='salt-call state.sls maas.region')
+        underlay.check_call(
+            node_name=cfg_node,
+            verbose=verbose,
+            cmd='maas logout mirantis && '
+            'maas login mirantis '
+            'http://localhost:5240/MAAS/api/2.0/ '
+            'FTvqwe7ybBp68gPar2:5mcctTAXVL8mns4ef4:zrA9LZwu2tMc8BAZpsPUfwWwTyQnAtDN'  # noqa
+        )
+
+        underlay.check_call(
+            node_name=cfg_node,
+            verbose=verbose,
+            cmd="maas mirantis maas set-config "
+                "name=upstream_dns value='10.10.0.15 8.8.8.8 8.8.4.4'")
+
         # underlay.check_call(
-        #     node_name=cfg_node, verbose=verbose,
-        #     cmd=cmd)
+        #     node_name=cfg_node,
+        #     verbose=verbose,
+        #     cmd="maas mirantis ipranges create "
+        #         "type=dynamic start_ip=10.10.191.255 end_ip=10.10.255.254 "
+        #         "subnet=$(maas mirantis subnets read | jq '.[] | "
+        #         "select(.name==\"10.10.0.0/16\") | .id')")
 
-        # show_step(2)
-        # underlay.check_call(node_name=cfg_node, verbose=verbose,
-        #                     cmd='salt-key')
+        underlay.check_call(
+            node_name=cfg_node,
+            verbose=verbose,
+            cmd="maas mirantis vlan update "
+                "$(maas mirantis subnets read | jq '.[] | "
+                "select(.name==\"10.10.0.0/16\") | .vlan.fabric_id') "
+                "0 dhcp_on=True primary_rack='cfg01'")
 
-        # show_step(3)
-        # underlay.check_call(node_name=cfg_node, verbose=verbose,
-        #                     cmd='apt install -y jq')
+        underlay.check_call(
+            node_name=cfg_node,
+            verbose=verbose,
+            cmd="ssh-keygen -y -f ~root/.ssh/id_rsa > ~root/.ssh/id_rsa.pub")
+        underlay.check_call(
+            node_name=cfg_node,
+            verbose=verbose,
+            cmd='maas mirantis sshkeys create '
+                'key="$(cat ~root/.ssh/id_rsa.pub)"')
+
+        show_step(3)
+        underlay.check_call(
+            node_name=cfg_node,
+            verbose=verbose,
+            cmd="""timeout 90s /bin/bash -c 'while ! pidof dhcpd; do """
+                """echo "dhcpd still isnt running"; sleep 10; done'""")
 
         show_step(4)
         underlay.check_call(
             node_name=cfg_node,
             verbose=verbose,
-            cmd="""timeout 300s /bin/bash -c 'while ! salt-call test.ping; do echo "salt master still isnt running"; sleep 10; done'""")  # noqa
-
+            cmd='salt-call state.sls maas.machines')
         show_step(5)
-        underlay.check_call(node_name=cfg_node, verbose=verbose,
-                            cmd='salt-call saltutil.sync_all')
-        underlay.check_call(node_name=cfg_node, verbose=verbose,
-                            cmd='salt-call state.sls maas.region')
-        underlay.check_call(
-            node_name=cfg_node, verbose=verbose,
-            cmd='maas logout mirantis && '
-            'maas login mirantis '
-            'http://localhost/MAAS/api/2.0/ '
-            'FTvqwe7ybBp68gPar2:5mcctTAXVL8mns4ef4:zrA9LZwu2tMc8BAZpsPUfwWwTyQnAtDN') # noqa
-
-        underlay.check_call(
-            node_name=cfg_node, verbose=verbose,
-            cmd="maas mirantis ipranges create "
-            "type=dynamic start_ip=10.10.191.255 end_ip=10.10.255.254 "
-            "subnet=$(maas mirantis subnets read | jq '.[] | select(.name==\"10.10.0.0/16\") | .id')") # noqa
-        underlay.check_call(node_name=cfg_node, verbose=verbose,
-            cmd="maas mirantis vlan update "
-            "$(maas mirantis subnets read | jq '.[] | select(.name==\"10.10.0.0/16\") | .vlan.fabric_id') " # noqa
-            "0 dhcp_on=True primary_rack='cfg01'")
-
-        underlay.check_call(
-            node_name=cfg_node, verbose=verbose,
-            cmd="ssh-keygen -y -f ~root/.ssh/id_rsa > ~root/.ssh/id_rsa.pub")
-        underlay.check_call(
-            node_name=cfg_node, verbose=verbose,
-            cmd='maas mirantis sshkeys create '
-                'key="$(cat ~root/.ssh/id_rsa.pub)"')
-
-        show_step(6)
-        underlay.check_call(node_name=cfg_node, verbose=verbose,
-            cmd="""timeout 90s /bin/bash -c 'while ! pidof dhcpd; do  echo "dhcpd still isnt running"; sleep 10; done'""") # noqa
-
-        show_step(7)
-        underlay.check_call(node_name=cfg_node, verbose=verbose,
-                            cmd='salt-call state.sls maas.machines')
-        show_step(8)
-        cmd = """   timeout 600s bash -c 'hosts=$(maas mirantis nodes read | jq -r ".[] | select(.node_type_name==\\"Machine\\") | select(.status_name==\\"Ready\\") | .hostname "); while ! [ $(echo "$hosts" | wc -w) -eq 10 ]; do echo "Ready hosts:\n$hosts"; sleep 30; hosts=$(maas mirantis nodes read | jq -r ".[] | select(.node_type_name==\\"Machine\\") | select(.status_name==\\"Ready\\") | .hostname "); done '   """ # noqa
+        cmd = """   timeout 600s bash -c 'hosts=$(maas mirantis nodes read | jq -r ".[] | select(.node_type_name==\\"Machine\\") | select(.status_name==\\"Ready\\") | .hostname "); while ! [ $(echo "$hosts" | wc -w) -eq 10 ]; do echo "Ready hosts:\n$hosts"; sleep 30; hosts=$(maas mirantis nodes read | jq -r ".[] | select(.node_type_name==\\"Machine\\") | select(.status_name==\\"Ready\\") | .hostname "); done '   """  # noqa
         underlay.check_call(node_name=cfg_node, verbose=verbose, cmd=cmd)
-        underlay.check_call(node_name=cfg_node, verbose=verbose,
-                            cmd='salt-key')
-        show_step(9)
         underlay.check_call(
-            node_name=cfg_node, verbose=verbose,
+            node_name=cfg_node, verbose=verbose, cmd='salt-key')
+        underlay.check_call(
+            node_name=cfg_node,
+            verbose=verbose,
+            cmd='salt-call state.sls maas.machines.assign_ip')
+        show_step(6)
+        underlay.check_call(
+            node_name=cfg_node,
+            verbose=verbose,
             cmd='salt-call state.sls maas.machines.deploy')
-        show_step(10)
+        show_step(7)
         underlay.check_call(
-            node_name=cfg_node, verbose=verbose,
+            node_name=cfg_node,
+            verbose=verbose,
             cmd='salt-call state.sls maas.machines.wait_for_deployed')
-        underlay.check_call(node_name=cfg_node, verbose=verbose,
-                            cmd='salt-key')
-
-        show_step(11)
         underlay.check_call(
-            node_name=cfg_node, verbose=verbose, expected=[0, 1],
+            node_name=cfg_node, verbose=verbose, cmd='salt-key')
+
+        show_step(8)
+        underlay.check_call(
+            node_name=cfg_node,
+            verbose=verbose,
+            expected=[0, 1],
             cmd='salt-key -A -y --include-denied --include-rejected')
         underlay.check_call(
-            node_name=cfg_node, verbose=verbose,
-            cmd='salt-key')
+            node_name=cfg_node, verbose=verbose, cmd='salt-key')
+
+        show_step(9)
+        cmd = "salt '*' saltutil.refresh_pillar"
+        underlay.check_call(node_name=cfg_node, verbose=verbose, cmd=cmd)
+        cmd = "salt '*' saltutil.sync_all"
+        underlay.check_call(node_name=cfg_node, verbose=verbose, cmd=cmd)
+
+        underlay.check_call(
+            node_name=cfg_node, verbose=verbose, cmd="reclass-salt --top")
+
+        cmd = "salt -C " \
+              "'I@salt:control or I@nova:compute or I@neutron:gateway' " \
+              "cmd.run 'touch /run/is_rebooted'"
+        underlay.check_call(node_name=cfg_node, verbose=verbose, cmd=cmd)
+
+        cmd = "salt --async -C " \
+              "'I@salt:control' cmd.run 'salt-call state.sls " \
+              "linux.system.user,openssh,linux.network;reboot'"
+        underlay.check_call(node_name=cfg_node, verbose=verbose, cmd=cmd)
+
+        cmd = "salt --async -C " \
+              "'I@nova:compute' cmd.run 'salt-call state.sls " \
+              "linux.system.user,openssh,linux.network;reboot'"
+        underlay.check_call(node_name=cfg_node, verbose=verbose, cmd=cmd)
+
+        cmd = "salt --async -C " \
+              "'I@neutron:gateway' cmd.run 'salt-call state.sls " \
+              "linux.system.user,openssh,linux.network;reboot'"
+        underlay.check_call(node_name=cfg_node, verbose=verbose, cmd=cmd)
+
+        time.sleep(360)  # TODO: Add ssh waiter
+
+        cmd = "salt -C " \
+              "'I@salt:control or I@nova:compute or I@neutron:gateway'" \
+              " test.ping"
+        underlay.check_call(node_name=cfg_node, verbose=verbose, cmd=cmd)
+
+        cmd = """salt -C """ \
+              """'I@salt:control or I@nova:compute or I@neutron:gateway' """ \
+              """cmd.run '[ -f "/run/is_rebooted" ] && """ \
+              """echo "Has not been rebooted!" || echo "Rebooted"' """
+        ret = underlay.check_call(node_name=cfg_node, verbose=verbose, cmd=cmd)
+        count = Counter(ret['stdout_str'].split())
+
+        assert count['Rebooted'] == 10, "Should be rebooted 10 baremetal nodes"
+
+        underlay.check_call(
+            node_name=cfg_node,
+            verbose=verbose,
+            cmd="salt '*' ssh.set_auth_key root '{}'".format(ssh_test_key))
+        underlay.check_call(
+            node_name=cfg_node,
+            verbose=verbose,
+            cmd='salt "*" ssh.set_auth_key root '
+                '"$(ssh-keygen -y -f ~/.ssh/id_rsa | cut -d " " -f 2)"')
+        underlay.check_call(
+            node_name=cfg_node,
+            verbose=verbose,
+            cmd="salt '*' ssh.set_auth_key ubuntu '{}'".format(ssh_test_key))
+        underlay.check_call(
+            node_name=cfg_node,
+            verbose=verbose,
+            cmd='salt "*" ssh.set_auth_key ubuntu '
+                '"$(ssh-keygen -y -f ~/.ssh/id_rsa | cut -d " " -f 2)"')
 
         salt_api = \
             salt_deployed.get_pillar(cfg_node, '_param:jenkins_salt_api_url')
         salt_api = salt_api[0].get(cfg_node)
 
-        show_step(12)
+        show_step(10)
         jenkins = JenkinsClient(
             host='http://172.16.44.33:8081',
             username='admin',
@@ -144,11 +253,56 @@
         build = jenkins.run_build('deploy_openstack', params)
 
         jenkins.wait_end_of_build(
-            name=build[0],
-            build_id=build[1],
-            timeout=60 * 60 * 2)
+            name=build[0], build_id=build[1], timeout=60 * 60 * 2)
+
+        with open("{path}/cfg01_jenkins_deploy_openstack_console.log".format(
+                path=settings.LOGS_DIR), 'w') as f:
+            LOG.info("Save jenkins console log")
+            console_log = \
+                jenkins.get_build_output('deploy_openstack', build[1])
+            f.write(console_log)
 
         assert \
             jenkins.build_info(
                 name=build[0], build_id=build[1])['result'] == 'SUCCESS', \
             "Deploy openstack was failed"
+
+        underlay.check_call(
+            node_name=cfg_node,
+            verbose=verbose,
+            cmd="salt '*' ssh.set_auth_key root '{}'".format(ssh_test_key))
+        underlay.check_call(
+            node_name=cfg_node,
+            verbose=verbose,
+            cmd='salt "*" ssh.set_auth_key root '
+                '"$(ssh-keygen -y -f ~/.ssh/id_rsa | cut -d " " -f 2)"')
+        underlay.check_call(
+            node_name=cfg_node,
+            verbose=verbose,
+            cmd="salt '*' ssh.set_auth_key ubuntu '{}'".format(ssh_test_key))
+        underlay.check_call(
+            node_name=cfg_node,
+            verbose=verbose,
+            cmd='salt "*" ssh.set_auth_key ubuntu '
+                '"$(ssh-keygen -y -f ~/.ssh/id_rsa | cut -d " " -f 2)"')
+
+        salt_nodes = salt_deployed.get_ssh_data()
+        nodes_list = \
+            [node for node in salt_nodes
+             if not any(node['node_name'] == n['node_name']
+                        for n in config.underlay.ssh)]
+        config.underlay.ssh = config.underlay.ssh + nodes_list
+        underlay.add_config_ssh(nodes_list)
+
+        time.sleep(120)  # debug sleep
+        cmd = "salt '*' test.ping"
+        underlay.check_call(node_name=cfg_node, verbose=verbose, cmd=cmd)
+
+        openstack = managers.openstack_manager.OpenstackManager(
+            config, underlay, hardware, salt_deployed)
+
+        if settings.RUN_TEMPEST:
+            openstack.run_tempest(
+                pattern=settings.PATTERN,
+                node_name=tempest_node)
+            openstack.download_tempest_report()