Merge "Update queens models and context"
diff --git a/jobs/pipelines/swarm-bootstrap-salt-cluster-devops.groovy b/jobs/pipelines/swarm-bootstrap-salt-cluster-devops.groovy
index 6b9370e..f4c8765 100644
--- a/jobs/pipelines/swarm-bootstrap-salt-cluster-devops.groovy
+++ b/jobs/pipelines/swarm-bootstrap-salt-cluster-devops.groovy
@@ -50,12 +50,6 @@
                 """)
             }
 
-            if (env.TCP_QA_REFS) {
-                stage("Update working dir to patch ${TCP_QA_REFS}") {
-                    shared.update_working_dir()
-                }
-            }
-
             stage("Create an environment ${ENV_NAME} in disabled state") {
                 // deploy_hardware.xml
                 shared.run_cmd("""\
@@ -103,7 +97,7 @@
             }
 
           } catch (e) {
-              common.printMsg("Job is failed", "red")
+              common.printMsg("Job is failed: " + e.message, "red")
               throw e
           } finally {
             // TODO(ddmitriev): analyze the "def currentResult = currentBuild.result ?: 'SUCCESS'"
diff --git a/jobs/pipelines/swarm-deploy-cicd.groovy b/jobs/pipelines/swarm-deploy-cicd.groovy
index 538f5ea..d067e07 100644
--- a/jobs/pipelines/swarm-deploy-cicd.groovy
+++ b/jobs/pipelines/swarm-deploy-cicd.groovy
@@ -35,12 +35,6 @@
                 error "'STACK_INSTALL' must contain one or more comma separated stack names for [deploy_openstack] pipeline"
             }
 
-            if (env.TCP_QA_REFS) {
-                stage("Update working dir to patch ${TCP_QA_REFS}") {
-                    shared.update_working_dir()
-                }
-            }
-
             // Install core and cicd
             def stack
             def timeout
@@ -66,7 +60,7 @@
             }
 
         } catch (e) {
-            common.printMsg("Job is failed", "red")
+            common.printMsg("Job is failed: " + e.message, "red")
             throw e
         } finally {
             // TODO(ddmitriev): analyze the "def currentResult = currentBuild.result ?: 'SUCCESS'"
diff --git a/jobs/pipelines/swarm-deploy-platform.groovy b/jobs/pipelines/swarm-deploy-platform.groovy
index 78e363f..54bc43d 100644
--- a/jobs/pipelines/swarm-deploy-platform.groovy
+++ b/jobs/pipelines/swarm-deploy-platform.groovy
@@ -35,12 +35,6 @@
                 error "'STACK_INSTALL' must contain one or more comma separated stack names for [deploy_openstack] pipeline"
             }
 
-            if (env.TCP_QA_REFS) {
-                stage("Update working dir to patch ${TCP_QA_REFS}") {
-                    shared.update_working_dir()
-                }
-            }
-
             // Install the cluster
             def stack
             def timeout
@@ -66,7 +60,7 @@
             }
 
         } catch (e) {
-            common.printMsg("Job is failed", "red")
+            common.printMsg("Job is failed:" + e.message, "red")
             throw e
         } finally {
             // TODO(ddmitriev): analyze the "def currentResult = currentBuild.result ?: 'SUCCESS'"
diff --git a/jobs/pipelines/swarm-run-pytest.groovy b/jobs/pipelines/swarm-run-pytest.groovy
index 5d7bd8d..553b8a2 100644
--- a/jobs/pipelines/swarm-run-pytest.groovy
+++ b/jobs/pipelines/swarm-run-pytest.groovy
@@ -36,16 +36,9 @@
     dir("${PARENT_WORKSPACE}") {
         try {
 
-            if (env.TCP_QA_REFS) {
-                stage("Update working dir to patch ${TCP_QA_REFS}") {
-                    shared.update_working_dir()
-                }
-            }
-
             stage("Run tests") {
                 def steps = shared.get_steps_list(PASSED_STEPS)
                 def sources = """\
-                    cd ${PARENT_WORKSPACE}
                     export ENV_NAME=${ENV_NAME}
                     . ./tcp_tests/utils/env_salt"""
                 if (steps.contains('k8s')) {
@@ -59,7 +52,7 @@
                 def installed = steps.collect {"""\
                     export ${it}_installed=true"""}.join("\n")
 
-                shared.run_sh(sources + installed + """
+                shared.run_cmd(sources + installed + """
                     export TESTS_CONFIGS=${ENV_NAME}_salt_deployed.ini
                     export MANAGER=devops  # use 'hardware' fixture to manage fuel-devops environment
                     export salt_master_host=\$SALT_MASTER_IP  # skip salt_deployed fixture
@@ -75,7 +68,7 @@
             }
 
         } catch (e) {
-            common.printMsg("Job is failed", "red")
+            common.printMsg("Job is failed" + e.message, "red")
             throw e
         } finally {
             // TODO(ddmitriev): analyze the "def currentResult = currentBuild.result ?: 'SUCCESS'"
diff --git a/jobs/pipelines/swarm-testrail-report.groovy b/jobs/pipelines/swarm-testrail-report.groovy
index e7ffe92..333547a 100644
--- a/jobs/pipelines/swarm-testrail-report.groovy
+++ b/jobs/pipelines/swarm-testrail-report.groovy
@@ -31,13 +31,6 @@
     }
     dir("${PARENT_WORKSPACE}") {
         try {
-
-            if (env.TCP_QA_REFS) {
-                stage("Update working dir to patch ${TCP_QA_REFS}") {
-                    shared.update_working_dir()
-                }
-            }
-
             def report_name = ''
             def testSuiteName = ''
             def methodname = ''
@@ -118,7 +111,7 @@
             }
 
         } catch (e) {
-            common.printMsg("Job is failed", "red")
+            common.printMsg("Job is failed: " + e.message, "red")
             throw e
         } finally {
             // reporting is failed for some reason
diff --git a/src/com/mirantis/system_qa/SharedPipeline.groovy b/src/com/mirantis/system_qa/SharedPipeline.groovy
index 108ee0c..e4779cd 100644
--- a/src/com/mirantis/system_qa/SharedPipeline.groovy
+++ b/src/com/mirantis/system_qa/SharedPipeline.groovy
@@ -2,19 +2,6 @@
 
 import groovy.xml.XmlUtil
 
-def run_sh(String cmd) {
-    // run shell script without catching any output
-    def common = new com.mirantis.mk.Common()
-    common.printMsg("Run shell command:\n" + cmd, "blue")
-    def VENV_PATH='/home/jenkins/fuel-devops30'
-    script = """\
-        set -ex;
-        . ${VENV_PATH}/bin/activate;
-        bash -c '${cmd.stripIndent()}'
-    """
-    return sh(script: script)
-}
-
 def run_cmd(String cmd, Boolean returnStdout=false) {
     def common = new com.mirantis.mk.Common()
     common.printMsg("Run shell command:\n" + cmd, "blue")
@@ -128,17 +115,6 @@
         """)
 }
 
-def update_working_dir() {
-        // Use to fetch a patchset from gerrit to the working dir
-        run_cmd("""\
-            if [ -n "$TCP_QA_REFS" ]; then
-                set -e
-                git fetch https://review.gerrithub.io/Mirantis/tcp-qa $TCP_QA_REFS && git checkout FETCH_HEAD || exit \$?
-            fi
-            pip install -r tcp_tests/requirements.txt
-        """)
-}
-
 def swarm_bootstrap_salt_cluster_devops() {
         def common = new com.mirantis.mk.Common()
         def cookiecutter_template_commit = env.COOKIECUTTER_TEMPLATE_COMMIT ?: env.MCP_VERSION
diff --git a/tcp_tests/fixtures/runtest_fixtures.py b/tcp_tests/fixtures/runtest_fixtures.py
index 3e1a45b..5a43b42 100644
--- a/tcp_tests/fixtures/runtest_fixtures.py
+++ b/tcp_tests/fixtures/runtest_fixtures.py
@@ -18,7 +18,7 @@
 
 
 @pytest.fixture(scope='function')
-def tempest_actions(config, underlay_actions, salt_actions):
+def tempest_actions(underlay_actions, salt_actions):
     """
     Run tempest tests
     """
@@ -28,7 +28,6 @@
     domain_name = settings.DOMAIN_NAME
     target = settings.TEMPEST_TARGET
     runtest = RuntestManager(
-        config,
         underlay_actions, salt_actions,
         cluster_name=cluster_name,
         domain_name=domain_name,
diff --git a/tcp_tests/helpers/ext.py b/tcp_tests/helpers/ext.py
index 7abc53e..19bdf08 100644
--- a/tcp_tests/helpers/ext.py
+++ b/tcp_tests/helpers/ext.py
@@ -34,11 +34,8 @@
 
 
 NETWORK_TYPE = enum(
-    'admin',
-    'control',
-    'tenant',
-    'storage',
-    'external',
+    'private',
+    'admin'
 )
 
 
diff --git a/tcp_tests/managers/envmanager_devops.py b/tcp_tests/managers/envmanager_devops.py
index bd4024c..7424a49 100644
--- a/tcp_tests/managers/envmanager_devops.py
+++ b/tcp_tests/managers/envmanager_devops.py
@@ -71,7 +71,6 @@
             self._create_environment()
         self.set_dns_config()
         self.set_address_pools_config()
-        self.set_dhcp_ranges_config()
 
     @property
     def _devops_config(self):
@@ -523,14 +522,3 @@
         """Store address pools CIDRs in config object"""
         for ap in self.__env.get_address_pools():
             self.__config.underlay.address_pools[ap.name] = ap.net
-
-    def set_dhcp_ranges_config(self):
-        """Store DHCP ranges in config object"""
-        for ap in self.__env.get_address_pools():
-            if "gateway" in ap.ip_reserved and "dhcp" in ap.ip_ranges:
-                self.__config.underlay.dhcp_ranges[ap.name] = {
-                    "cidr": ap.net,
-                    "start": ap.ip_range_start("dhcp"),
-                    "end": ap.ip_range_end("dhcp"),
-                    "gateway": ap.gateway,
-                }
diff --git a/tcp_tests/managers/execute_commands.py b/tcp_tests/managers/execute_commands.py
index 193153c..adb76dc 100644
--- a/tcp_tests/managers/execute_commands.py
+++ b/tcp_tests/managers/execute_commands.py
@@ -87,7 +87,6 @@
         retry_count = retry.get('count', 1)
         retry_delay = retry.get('delay', 1)
         skip_fail = step.get('skip_fail', False)
-        timeout = step.get('timeout', None)
 
         with self.__underlay.remote(node_name=node_name) as remote:
 
@@ -102,7 +101,7 @@
                 LOG.info("\n\n{0}\n{1}".format(
                     msg + retry_msg, '=' * len(msg + retry_msg)))
 
-                result = remote.execute(cmd, timeout=timeout, verbose=True)
+                result = remote.execute(cmd, verbose=True)
                 if return_res:
                     return result
 
@@ -149,7 +148,6 @@
         retry_count = retry.get('count', 1)
         retry_delay = retry.get('delay', 1)
         skip_fail = step.get('skip_fail', False)
-        timeout = step.get('timeout', None)
 
         if not bool(state) ^ bool(states):
             raise ValueError("You should use state or states in step")
@@ -167,7 +165,7 @@
 
             method = getattr(self._salt, self._salt._map[do])
             command_ret = method(tgt=target, state=state or states,
-                                 args=args, kwargs=kwargs, timeout=timeout)
+                                 args=args, kwargs=kwargs)
             command_ret = command_ret if \
                 isinstance(command_ret, list) else [command_ret]
             results = [(r['return'][0], f) for r, f in command_ret]
diff --git a/tcp_tests/managers/rallymanager.py b/tcp_tests/managers/rallymanager.py
index 589e1ee..5b7fd4c 100644
--- a/tcp_tests/managers/rallymanager.py
+++ b/tcp_tests/managers/rallymanager.py
@@ -97,7 +97,7 @@
                    " docker pull {image}:{version}".format(image=image,
                                                            version=version))
             self._underlay.check_call(cmd, node_name=self._node_name)
-        except Exception:
+        except Exception as e:
             LOG.debug('Cannot install docker-ce')
             cmd = ("apt-get -y install docker.io &&"
                    " docker pull {image}:{version}".format(image=image,
diff --git a/tcp_tests/managers/runtestmanager.py b/tcp_tests/managers/runtestmanager.py
index 036f415..c400556 100644
--- a/tcp_tests/managers/runtestmanager.py
+++ b/tcp_tests/managers/runtestmanager.py
@@ -14,6 +14,9 @@
 
 import json
 import os
+import time
+
+from devops.helpers import helpers
 
 from tcp_tests import logger
 from tcp_tests import settings
@@ -22,6 +25,71 @@
 
 TEMPEST_CFG_DIR = '/tmp/test'
 
+CONFIG = {
+    'classes': ['service.runtest.tempest',
+                'service.runtest.tempest.services.manila.glance'],
+    'parameters': {
+        '_param': {
+            'runtest_tempest_cfg_dir': TEMPEST_CFG_DIR,
+            'runtest_tempest_cfg_name': 'tempest.conf',
+            'runtest_tempest_public_net': 'net04_ext',
+            'tempest_test_target': 'gtw01*'
+        },
+        'neutron': {
+            'client': {
+                'enabled': True
+            }
+        },
+        'runtest': {
+            'enabled': True,
+            'keystonerc_node': 'ctl01*',
+            'tempest': {
+                'enabled': True,
+                'cfg_dir': '${_param:runtest_tempest_cfg_dir}',
+                'cfg_name': '${_param:runtest_tempest_cfg_name}',
+                'DEFAULT': {
+                    'log_file': 'tempest.log'
+                },
+                'compute': {
+                    'build_timeout': 600,
+                    'max_microversion': 2.53,
+                    'min_compute_nodes': 2,
+                    'min_microversion': 2.1,
+                    'volume_device_name': 'vdc'
+                },
+                'convert_to_uuid': {
+                    'network': {
+                        'public_network_id':
+                        '${_param:runtest_tempest_public_net}'
+                    }
+                },
+                'dns_feature_enabled': {
+                    'api_admin': False,
+                    'api_v1': False,
+                    'api_v2': True,
+                    'api_v2_quotas': True,
+                    'api_v2_root_recordsets': True,
+                    'bug_1573141_fixed': True
+                },
+                'heat_plugin': {
+                    'floating_network_name':
+                    '${_param:runtest_tempest_public_net}'
+                },
+                'network': {
+                    'floating_network_name':
+                    '${_param:runtest_tempest_public_net}'
+                },
+                'share': {
+                    'capability_snapshot_support': True,
+                    'run_driver_assisted_migration_tests': False,
+                    'run_manage_unmanage_snapshot_tests': False,
+                    'run_manage_unmanage_tests': False,
+                    'run_migration_with_preserve_snapshots_tests': False,
+                    'run_quota_tests': True,
+                    'run_replication_tests': False,
+                    'run_snapshot_tests': True,
+                }}}}}
+
 
 class RuntestManager(object):
     """Helper manager for execution tempest via runtest-formula"""
@@ -30,117 +98,60 @@
     image_version = settings.TEMPEST_IMAGE_VERSION
     container_name = 'run-tempest-ci'
     master_host = "cfg01"
-    control_host = "ctl01"
+    master_tgt = "{}*".format(master_host)
     class_name = "runtest"
     run_cmd = '/bin/bash -c "run-tempest"'
 
-    def __init__(self, config, underlay, salt_api, cluster_name,
+    def __init__(self, underlay, salt_api, cluster_name,
                  domain_name, tempest_threads,
                  tempest_pattern=settings.TEMPEST_PATTERN,
                  run_cmd=None, target='gtw01'):
-        self.__config = config
         self.underlay = underlay
         self.__salt_api = salt_api
+        self.target = target
         self.cluster_name = cluster_name
         self.domain_name = domain_name
         self.tempest_threads = tempest_threads
         self.tempest_pattern = tempest_pattern
         self.run_cmd = run_cmd or self.run_cmd
-        self.target_name = self.underlay.get_target_node_names(target)[0]
-        self.master_name = self.underlay.get_target_node_names(
-            self.master_host)[0]
-        self.control_name = self.underlay.get_target_node_names(
-            self.control_host)[0]
 
     @property
     def salt_api(self):
         return self.__salt_api
 
-    @property
-    def runtest_pillar(self):
-        public_net = self.__config.underlay.dhcp_ranges[
-            settings.EXTERNAL_ADDRESS_POOL_NAME]
-        public_gateway = public_net["gateway"].encode("ascii")
-        public_cidr = public_net["cidr"].encode("ascii")
-        public_allocation_start = public_net["start"].encode("ascii")
-        public_allocation_end = public_net["end"].encode("ascii")
+    def install_python_lib(self):
+        return self.salt_api.local(
+            "{}*".format(self.target),
+            'pip.install', 'docker'), None
 
-        return {
-            'classes': ['service.runtest.tempest',
-                        'service.runtest.tempest.public_net',
-                        'service.runtest.tempest.services.manila.glance'],
-            'parameters': {
-                '_param': {
-                    'runtest_tempest_cfg_dir': TEMPEST_CFG_DIR,
-                    'runtest_tempest_cfg_name': 'tempest.conf',
-                    'runtest_tempest_public_net': 'public',
-                    'openstack_public_neutron_subnet_gateway': public_gateway,
-                    'openstack_public_neutron_subnet_cidr': public_cidr,
-                    'openstack_public_neutron_subnet_allocation_start':
-                        public_allocation_start,
-                    'openstack_public_neutron_subnet_allocation_end':
-                        public_allocation_end,
-                    'tempest_test_target': self.target_name.encode("ascii"),
-                },
-                'neutron': {
-                    'client': {
-                        'enabled': True
-                    }
-                },
-                'runtest': {
-                    'enabled': True,
-                    'keystonerc_node': 'ctl01*',
-                    'tempest': {
-                        'enabled': True,
-                        'cfg_dir': '${_param:runtest_tempest_cfg_dir}',
-                        'cfg_name': '${_param:runtest_tempest_cfg_name}',
-                        'DEFAULT': {
-                            'log_file': 'tempest.log'
-                        },
-                        'compute': {
-                            'build_timeout': 600,
-                            'max_microversion': 2.53,
-                            'min_compute_nodes': 2,
-                            'min_microversion': 2.1,
-                            'volume_device_name': 'vdc'
-                        },
-                        'convert_to_uuid': {
-                            'network': {
-                                'public_network_id':
-                                '${_param:runtest_tempest_public_net}'
-                            }
-                        },
-                        'dns_feature_enabled': {
-                            'api_admin': False,
-                            'api_v1': False,
-                            'api_v2': True,
-                            'api_v2_quotas': True,
-                            'api_v2_root_recordsets': True,
-                            'bug_1573141_fixed': True
-                        },
-                        'heat_plugin': {
-                            'floating_network_name':
-                            '${_param:runtest_tempest_public_net}'
-                        },
-                        'network': {
-                            'floating_network_name':
-                            '${_param:runtest_tempest_public_net}'
-                        },
-                        'share': {
-                            'capability_snapshot_support': True,
-                            'run_driver_assisted_migration_tests': False,
-                            'run_manage_unmanage_snapshot_tests': False,
-                            'run_manage_unmanage_tests': False,
-                            'run_migration_with_preserve_snapshots_tests':
-                                False,
-                            'run_quota_tests': True,
-                            'run_replication_tests': False,
-                            'run_snapshot_tests': True,
-                        }}}}}
+    def run_salt_minion_state(self):
+        return self.salt_api.local('cfg01*', 'state.sls', 'salt.minion')
+
+    def create_networks(self):
+        return self.salt_api.local('cfg01*', 'state.sls', 'neutron.client')
+
+    def create_flavors(self):
+        return self.salt_api.local('cfg01*', 'state.sls', 'nova.client')
+
+    def set_property(self):
+        return self.salt_api.local(
+            tgt='ctl01*',
+            fun='cmd.run',
+            args='. /root/keystonercv3; openstack '
+                 'flavor set m1.tiny_test  '
+                 '--property hw:mem_page_size=small')
+
+    def create_cirros(self):
+        return self.salt_api.local('cfg01*', 'state.sls', 'glance.client')
+
+    def generate_config(self):
+        return self.salt_api.local('cfg01*', 'state.sls', 'runtest')
 
     def fetch_arficats(self, username=None, file_format='xml'):
-        with self.underlay.remote(node_name=self.target_name,
-                                  username=None) as tgt:
+        target_name = next(node_name for node_name
+                           in self.underlay.node_names() if
+                           self.target in node_name)
+        with self.underlay.remote(node_name=target_name, username=None) as tgt:
             result = tgt.execute('find {} -name "report_*.{}"'.format(
                 TEMPEST_CFG_DIR, file_format))
             LOG.debug("Find result {0}".format(result))
@@ -152,38 +163,42 @@
                 destination=report,  # noqa
                 target=os.getcwd())
 
-    def store_runtest_model(self, runtest_pillar=None):
+    def store_runtest_model(self, config=CONFIG):
+        master_name = next(node_name for node_name
+                           in self.underlay.node_names() if
+                           self.master_host in node_name)
         with self.underlay.yaml_editor(
                 file_path="/srv/salt/reclass/classes/cluster/"
                           "{cluster_name}/infra/"
                           "{class_name}.yml".format(
                               cluster_name=self.cluster_name,
                               class_name=self.class_name),
-                node_name=self.master_name) as editor:
-            editor.content = runtest_pillar or self.runtest_pillar
+                node_name=master_name) as editor:
+            editor.content = config
         with self.underlay.yaml_editor(
                 file_path="/srv/salt/reclass/nodes/_generated/"
                           "cfg01.{domain_name}.yml".format(
                               domain_name=self.domain_name),
-                node_name=self.master_name) as editor:
+                node_name=master_name) as editor:
             editor.content['classes'].append(
                 'cluster.{cluster_name}.infra.{class_name}'.format(
                     cluster_name=self.cluster_name,
                     class_name=self.class_name))
 
+        self.salt_api.local('*', 'saltutil.refresh_pillar')
+        self.salt_api.local('*', 'saltutil.sync_all')
+
     def save_runtime_logs(self, logs=None, inspect=None):
         if logs:
             with open("{path}/{target}_tempest_run.log".format(
-                    path=settings.LOGS_DIR,
-                    target=self.target_name), 'w') as f:
+                    path=settings.LOGS_DIR, target=self.target), 'w') as f:
                 LOG.info("Save tempest console log")
                 container_log = logs
                 f.write(container_log.encode('ascii', 'ignore'))
 
         if inspect:
             with open("{path}/{target}_tempest_container_info.json.log".format(
-                    path=settings.LOGS_DIR,
-                    target=self.target_name), 'w') as f:
+                    path=settings.LOGS_DIR, target=self.target), 'w') as f:
                 LOG.info("Save tempest container inspect data")
 
                 container_inspect = json.dumps(inspect,
@@ -193,123 +208,104 @@
     def prepare(self, dpdk=None):
         self.store_runtest_model()
 
-        salt_cmd = "salt -l info --hard-crash --state-output=mixed "
-        salt_call_cmd = "salt-call -l info --hard-crash --state-output=mixed "
-        commands = [
-            {
-                'description': "Sync salt objects for runtest model",
-                'node_name': self.master_name,
-                'cmd': ("set -ex;" +
-                        salt_cmd + "'*' saltutil.refresh_pillar && " +
-                        salt_cmd + "'*' saltutil.sync_all")},
-            {
-                'description': ("Install docker.io package and "
-                                "enable packets forwarding"),
-                'node_name': self.target_name,
-                'cmd': ("set -ex;" +
-                        salt_call_cmd + " pkg.install docker.io && " +
-                        " iptables --policy FORWARD ACCEPT")},
-            {
-                'description': "Install PyPI docker package",
-                'node_name': self.target_name,
-                'cmd': ("set -ex;" +
-                        salt_call_cmd + " pip.install setuptools && " +
-                        salt_call_cmd + " pip.install docker")},
-            {
-                'description': "Run salt.minion state for runtest formula",
-                'node_name': self.master_name,
-                'cmd': ("set -ex;" +
-                        salt_call_cmd + " state.sls salt.minion && "
-                        " sleep 20")},
-            {
-                'description': "Enforce keystone state for neutronv2",
-                'node_name': self.master_name,
-                'cmd': ("set -ex;" +
-                        salt_call_cmd + " state.sls keystone.client")},
-            {
-                'description': "Create networks for Tempest tests",
-                'node_name': self.master_name,
-                'cmd': ("set -ex;" +
-                        salt_call_cmd + " state.sls neutron.client")},
-            {
-                'description': "Create flavors for Tempest tests",
-                'node_name': self.master_name,
-                'cmd': ("set -ex;" +
-                        salt_call_cmd + " state.sls nova.client")},
-            {
-                'description': "Create cirros image for Tempest",
-                'node_name': self.master_name,
-                'cmd': ("set -ex;" +
-                        salt_call_cmd + " state.sls glance.client")},
-            {
-                'description': "Generate config for Tempest",
-                'node_name': self.master_name,
-                'cmd': ("set -ex;" +
-                        salt_call_cmd + " state.sls runtest")},
-        ]
+        res = self.install_python_lib()
+        LOG.info(json.dumps(res, indent=4))
 
+        res = self.run_salt_minion_state()
+        LOG.info(json.dumps(res, indent=4))
+        time.sleep(20)
+
+        res = self.create_networks()
+        LOG.info(json.dumps(res, indent=4))
+        time.sleep(20)
+
+        res = self.create_flavors()
+        LOG.info(json.dumps(res, indent=4))
+        time.sleep(20)
         if dpdk:
-            commands.append({
-                'description': "Configure flavor for DPDK",
-                'node_name': self.control_name,
-                'cmd': ("set -ex;" +
-                        salt_call_cmd + " cmd.run "
-                        " '. /root/keystonercv3;"
-                        "  openstack flavor set m1.tiny_test"
-                        "  --property hw:mem_page_size=small'")},
-            )
+            res = self.set_property()
+            LOG.info('Update flavor property')
+            LOG.info(json.dumps(res, indent=4))
+            time.sleep(20)
 
-        self.__salt_api.execute_commands(commands=commands,
-                                         label="Prepare for Tempest")
+        res = self.create_cirros()
+        LOG.info(json.dumps(res, indent=4))
+        time.sleep(20)
+
+        res = self.generate_config()
+        LOG.info(json.dumps(res, indent=4))
+        time.sleep(20)
 
     def run_tempest(self, timeout=600):
-        tgt = self.target_name
-        image_nameversion = "{}:{}".format(self.image_name, self.image_version)
+        tgt = "{}*".format(self.target)
+        params = {
+            "name": self.container_name,
+            "image": "{}:{}".format(self.image_name, self.image_version),
+            "environment": {
+                "ARGS": "-r {tempest_pattern} -w "
+                        "{tempest_threads} ".format(
+                            tempest_pattern=self.tempest_pattern,
+                            tempest_threads=self.tempest_threads)  # noqa
+            },
+            "binds": [
+                "{cfg_dir}/tempest.conf:/etc/tempest/tempest.conf".format(cfg_dir=TEMPEST_CFG_DIR),  # noqa
+                "/tmp/:/tmp/",
+                "{cfg_dir}:/root/tempest".format(cfg_dir=TEMPEST_CFG_DIR),
+                "/etc/ssl/certs/:/etc/ssl/certs/"
+            ],
+            "auto_remove": False,
+            "cmd": self.run_cmd
+        }
 
-        docker_args = (
-            " --name {container_name} "
-            " -e ARGS=\"-r {tempest_pattern} -w {tempest_threads}\""
-            " -v {cfg_dir}/tempest.conf:/etc/tempest/tempest.conf"
-            " -v /tmp/:/tmp/"
-            " -v {cfg_dir}:/root/tempest"
-            " -v /etc/ssl/certs/:/etc/ssl/certs/"
-            " --rm"
-            " {image_nameversion} {run_cmd}"
-            .format(
-                container_name=self.container_name,
-                image_nameversion=image_nameversion,
-                tempest_pattern=self.tempest_pattern,
-                tempest_threads=self.tempest_threads,
-                cfg_dir=TEMPEST_CFG_DIR,
-                run_cmd=self.run_cmd,
-            ))
+        res = self.salt_api.local(tgt, 'dockerng.pull', "{}:{}".format(
+            self.image_name, self.image_version))
+        LOG.info("Tempest image has beed pulled- \n{}".format(
+            json.dumps(res, indent=4)))
 
-        commands = [
-            {
-                'description': "Run Tempest tests {0}".format(
-                    image_nameversion),
-                'node_name': self.target_name,
-                'cmd': ("set -ex;" +
-                        " docker rm --force {container_name} || true;"
-                        " docker run {docker_args}"
-                        .format(container_name=self.container_name,
-                                docker_args=docker_args)),
-                'timeout': timeout},
-        ]
+        res = self.salt_api.local(tgt, 'dockerng.create', kwargs=params)
+        LOG.info("Tempest container has been created - \n{}".format(
+            json.dumps(res, indent=4)))
 
-        self.__salt_api.execute_commands(commands=commands,
-                                         label="Run Tempest tests")
+        res = self.salt_api.local(tgt, 'dockerng.start', self.container_name)
+        LOG.info("Tempest container has been started - \n{}".format(
+            json.dumps(res, indent=4)))
+
+        def wait_status(s):
+            inspect_res = self.salt_api.local(tgt,
+                                              'dockerng.inspect',
+                                              self.container_name)
+            if 'return' in inspect_res:
+                inspect = inspect_res['return']
+                inspect = inspect[0]
+                inspect = next(inspect.iteritems())[1]
+                status = inspect['State']['Status']
+
+                return status.lower() == s.lower()
+
+            return False
+
+        helpers.wait(lambda: wait_status('exited'),
+                     timeout=timeout,
+                     timeout_msg=('Tempest run didnt finished '
+                                  'in {}'.format(timeout)))
 
         inspect_res = self.salt_api.local(tgt,
                                           'dockerng.inspect',
                                           self.container_name)
         inspect = inspect_res['return'][0]
         inspect = next(inspect.iteritems())[1]
+        if inspect['State']['ExitCode'] != 0:
+            LOG.error("Tempest running failed")
+        LOG.info("Tempest tests have been finished - \n{}".format(
+            json.dumps(res, indent=4)))
+
         logs_res = self.salt_api.local(tgt,
                                        'dockerng.logs',
                                        self.container_name)
         logs = logs_res['return'][0]
         logs = next(logs.iteritems())[1]
+        LOG.info("Tempest result - \n{}".format(
+            logs.encode('ascii', 'ignore')))
 
         res = self.salt_api.local(tgt, 'dockerng.rm', self.container_name)
         LOG.info("Tempest container was removed".format(
diff --git a/tcp_tests/managers/saltmanager.py b/tcp_tests/managers/saltmanager.py
index 6fad0e4..b5d5f04 100644
--- a/tcp_tests/managers/saltmanager.py
+++ b/tcp_tests/managers/saltmanager.py
@@ -104,12 +104,11 @@
         self.__session_start = login()
         return self.__api
 
-    def local(self, tgt, fun, args=None, kwargs=None, timeout=None):
-        return self.api.local(tgt, fun, args, kwargs, timeout=timeout,
-                              expr_form='compound')
+    def local(self, tgt, fun, args=None, kwargs=None):
+        return self.api.local(tgt, fun, args, kwargs, expr_form='compound')
 
-    def local_async(self, tgt, fun, args=None, kwargs=None, timeout=None):
-        return self.api.local_async(tgt, fun, args, kwargs, timeout=timeout)
+    def local_async(self, tgt, fun, args=None, kwargs=None):
+        return self.api.local_async(tgt, fun, args, kwargs)
 
     def lookup_result(self, jid):
         return self.api.lookup_jid(jid)
@@ -139,27 +138,25 @@
 
         return fails if fails else None
 
-    def enforce_state(self, tgt, state, args=None, kwargs=None, timeout=None):
-        r = self.local(tgt=tgt, fun='state.sls', args=state, timeout=timeout)
+    def enforce_state(self, tgt, state, args=None, kwargs=None):
+        r = self.local(tgt=tgt, fun='state.sls', args=state)
         f = self.check_result(r)
         return r, f
 
-    def enforce_states(self, tgt, state, args=None, kwargs=None, timeout=None):
+    def enforce_states(self, tgt, state, args=None, kwargs=None):
         rets = []
         for s in state:
-            r = self.enforce_state(tgt=tgt, state=s, timeout=timeout)
+            r = self.enforce_state(tgt=tgt, state=s)
             rets.append(r)
         return rets
 
-    def run_state(self, tgt, state, args=None, kwargs=None, timeout=None):
-        return self.local(tgt=tgt, fun=state, args=args, kwargs=kwargs,
-                          timeout=timeout), None
+    def run_state(self, tgt, state, args=None, kwargs=None):
+        return self.local(tgt=tgt, fun=state, args=args, kwargs=kwargs), None
 
-    def run_states(self, tgt, state, args=None, kwargs=None, timeout=None):
+    def run_states(self, tgt, state, args=None, kwargs=None):
         rets = []
         for s in state:
-            r = self.run_state(tgt=tgt, state=s, args=args, kwargs=kwargs,
-                               timeout=timeout)
+            r = self.run_state(tgt=tgt, state=s, args=args, kwargs=kwargs)
             rets.append(r)
         return rets
 
diff --git a/tcp_tests/settings.py b/tcp_tests/settings.py
index 9ebdf22..0d79cc4 100644
--- a/tcp_tests/settings.py
+++ b/tcp_tests/settings.py
@@ -85,6 +85,3 @@
 SL_TEST_REPO = os.environ.get('SL_TEST_REPO',
                               'https://github.com/Mirantis/stacklight-pytest')
 SL_TEST_COMMIT = os.environ.get('SL_TEST_COMMIT', 'master')
-
-EXTERNAL_ADDRESS_POOL_NAME = os.environ.get('EXTERNAL_ADDRESS_POOL_NAME',
-                                            'external-pool01')
diff --git a/tcp_tests/settings_oslo.py b/tcp_tests/settings_oslo.py
index 009eb24..037dbd8 100644
--- a/tcp_tests/settings_oslo.py
+++ b/tcp_tests/settings_oslo.py
@@ -123,22 +123,6 @@
                    tests or during the deployment process.
                    {'pool_name1': '<cidr>', 'pool_name2': '<cidr>', ...}""",
            default={}),
-    ct.Cfg('dhcp_ranges', ct.JSONDict(),
-           help="""DHCP ranges allocated for the address pools.
-                   This is extended object comparing to 'address_pools'.
-                   May be used to determine DHCP range start/end/gateway for a
-                   specific network from tests or during the deployment
-                   process.
-                   {'pool_name1': {'cidr': 'n.n.n.n/m',
-                                   'start': 'x.x.x.x',
-                                   'end': 'y.y.y.y',
-                                   'gateway': 'z.z.z.z'},
-                    'pool_name2': {'cidr': 'n.n.n.n/m',
-                                   'start': x.x.x.x,
-                                   'end': 'y.y.y.y',
-                                   'gateway': 'z.z.z.z'},
-                    ...}""",
-           default={}),
     ct.Cfg('ssh_keys', ct.JSONList(), default=[],
            help="SSH key pair(s) for root. If the option is left empty, "
                 "then a key pair will be generated automatically"),
diff --git a/tcp_tests/templates/cookied-bm-dpdk-pipeline/underlay.yaml b/tcp_tests/templates/cookied-bm-dpdk-pipeline/underlay.yaml
index 084a922..612299f 100644
--- a/tcp_tests/templates/cookied-bm-dpdk-pipeline/underlay.yaml
+++ b/tcp_tests/templates/cookied-bm-dpdk-pipeline/underlay.yaml
@@ -73,9 +73,8 @@
         net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '172.17.42.128/26:26') }}
         params:
           ip_reserved:
-            gateway: '172.17.42.129'
-          ip_ranges:
-              dhcp: ['172.17.42.130', '172.17.42.180']
+            gateway: +1
+            l2_network_device: -2
 
 
     groups:
diff --git a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/openstack.yaml b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/openstack.yaml
index 42f9b99..9e8dd5e 100644
--- a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/openstack.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/openstack.yaml
@@ -33,3 +33,53 @@
 
 {{ SHARED_OPENSTACK.MACRO_INSTALL_COMPUTE() }}
 
+{{ SHARED.INSTALL_DOCKER_ON_GTW() }}
+
+- description: Create net04_external
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron net-create net04_ext --router:external True --provider:physical_network physnet1 --provider:network_type flat'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Create subnet_external
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron subnet-create net04_ext 172.17.42.0/26 --name net04_ext__subnet --disable-dhcp --allocation-pool start=172.17.42.10,end=172.17.42.60 --gateway 172.17.42.1'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Create net04
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron net-create net04'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Create subnet_net04
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron subnet-create net04 192.168.0.0/24 --name net04__subnet --allocation-pool start=192.168.0.120,end=192.168.0.240'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Create router
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron router-create net04_router01'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Set geteway
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron router-gateway-set net04_router01 net04_ext'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description:  Add interface
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron router-interface-add net04_router01 net04__subnet'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/underlay.yaml b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/underlay.yaml
index a7308e9..25c98bc 100644
--- a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/underlay.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/underlay.yaml
@@ -73,9 +73,8 @@
         net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '172.17.42.0/26:26') }}
         params:
           ip_reserved:
-            gateway: '172.17.42.1'
-          ip_ranges:
-              dhcp: ['172.17.42.10', '172.17.42.60']
+            gateway: +1
+            l2_network_device: -2
 
 
     groups:
diff --git a/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/openstack.yaml b/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/openstack.yaml
index 7895eed..f7dab55 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/openstack.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/openstack.yaml
@@ -58,3 +58,60 @@
   retry: {count: 10, delay: 30}
   skip_fail: false
 
+- description: Create net04_external
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron net-create net04_ext --router:external True --provider:physical_network physnet1 --provider:network_type flat'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Create subnet_external
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron subnet-create net04_ext 172.17.42.128/26 --name net04_ext__subnet --disable-dhcp --allocation-pool start=172.17.42.130,end=172.17.42.180 --gateway 172.17.42.129'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Create net04
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron net-create net04 --provider:network_type vxlan'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Create subnet_net04
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron subnet-create net04 10.167.12.0/24 --name net04__subnet --allocation-pool start=10.167.12.150,end=10.167.12.240'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Create router
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron router-create net04_router01 --ha False'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Set geteway
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron router-gateway-set net04_router01 net04_ext'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description:  Add interface
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron router-interface-add net04_router01 net04__subnet'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: temp WR
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
+    'ifdown br-prv; ifup br-prv'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+{{ SHARED.INSTALL_DOCKER_ON_GTW() }}
diff --git a/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/underlay.yaml b/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/underlay.yaml
index 15e22ba..23eb24c 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/underlay.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/underlay.yaml
@@ -73,9 +73,9 @@
         net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '172.17.42.128/26:26') }}
         params:
           ip_reserved:
-            gateway: '172.17.42.129'
-          ip_ranges:
-              dhcp: ['172.17.42.130', '172.17.42.180']
+            gateway: +1
+            l2_network_device: -2
+
 
     groups:
       - name: virtual
diff --git a/tcp_tests/templates/cookied-cicd-pike-dvr-sl/underlay.yaml b/tcp_tests/templates/cookied-cicd-pike-dvr-sl/underlay.yaml
index 94199a0..a964d2b 100644
--- a/tcp_tests/templates/cookied-cicd-pike-dvr-sl/underlay.yaml
+++ b/tcp_tests/templates/cookied-cicd-pike-dvr-sl/underlay.yaml
@@ -161,7 +161,8 @@
             default_{{ HOSTNAME_GTW01 }}: +110
             default_{{ HOSTNAME_PRX01 }}: +121
           ip_ranges:
-            dhcp: [+180, +220]
+            dhcp: [+10, -10]
+
 
     groups:
       - name: default
diff --git a/tcp_tests/templates/cookied-mcp-pike-dpdk/openstack.yaml b/tcp_tests/templates/cookied-mcp-pike-dpdk/openstack.yaml
index 8e9ac5a..69fbc1a 100644
--- a/tcp_tests/templates/cookied-mcp-pike-dpdk/openstack.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dpdk/openstack.yaml
@@ -61,3 +61,66 @@
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 10, delay: 30}
   skip_fail: false
+
+- description: TMP step related to PROD-17975 for cmp
+  cmd: salt "cmp*" cmd.run ' systemctl restart openvswitch-switch.service'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: TMP step related to PROD-17975 for cmp
+  cmd: salt "gtw*" cmd.run ' systemctl restart openvswitch-switch.service'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Create net04_external
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron net-create net04_ext --router:external True --provider:physical_network physnet1 --provider:network_type flat'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Create subnet_external
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron subnet-create net04_ext {{ IPV4_NET_EXTERNAL_PREFIX }}.0/24 --name net04_ext__subnet --disable-dhcp --allocation-pool start={{ IPV4_NET_EXTERNAL_PREFIX }}.150,end={{ IPV4_NET_EXTERNAL_PREFIX }}.180 --gateway {{ IPV4_NET_EXTERNAL_PREFIX }}.1'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Create net04
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron net-create net04 --provider:network_type vxlan'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Create subnet_net04
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron subnet-create net04 {{ IPV4_NET_TENANT_PREFIX }}.0/24 --name net04__subnet --allocation-pool start={{ IPV4_NET_TENANT_PREFIX }}.120,end={{ IPV4_NET_TENANT_PREFIX }}.240'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Create router
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron router-create net04_router01 --ha False'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Set geteway
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron router-gateway-set net04_router01 net04_ext'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description:  Add interface
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron router-interface-add net04_router01 net04__subnet'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+{{ SHARED.INSTALL_DOCKER_ON_GTW() }}
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr/openstack.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr/openstack.yaml
index 0530b46..21ffdc3 100644
--- a/tcp_tests/templates/cookied-mcp-pike-dvr/openstack.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr/openstack.yaml
@@ -96,4 +96,62 @@
     'ip a'
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 10, delay: 30}
-  skip_fail: false
\ No newline at end of file
+  skip_fail: false
+
+- description: Create net04_external
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron net-create net04_ext --router:external True --provider:physical_network physnet1 --provider:network_type flat'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Create subnet_external
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron subnet-create net04_ext {{ IPV4_NET_EXTERNAL_PREFIX }}.0/24 --name net04_ext__subnet --disable-dhcp --allocation-pool start={{ IPV4_NET_EXTERNAL_PREFIX }}.150,end={{ IPV4_NET_EXTERNAL_PREFIX }}.180 --gateway {{ IPV4_NET_EXTERNAL_PREFIX }}.1'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Create net04
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron net-create net04'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Create subnet_net04
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron subnet-create net04 {{ IPV4_NET_TENANT_PREFIX }}.0/24 --name net04__subnet --allocation-pool start={{ IPV4_NET_TENANT_PREFIX }}.120,end={{ IPV4_NET_TENANT_PREFIX }}.240'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Create router
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron router-create net04_router01'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Set geteway
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron router-gateway-set net04_router01 net04_ext'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description:  Add interface
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron router-interface-add net04_router01 net04__subnet'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: sync time
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run
+    'service ntp stop; ntpd -gq;  service ntp start'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+{{ SHARED.INSTALL_DOCKER_ON_GTW() }}
diff --git a/tcp_tests/templates/cookied-mcp-pike-ovs/openstack.yaml b/tcp_tests/templates/cookied-mcp-pike-ovs/openstack.yaml
index 1f778e5..8eb1d57 100644
--- a/tcp_tests/templates/cookied-mcp-pike-ovs/openstack.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-ovs/openstack.yaml
@@ -76,3 +76,61 @@
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 10, delay: 30}
   skip_fail: false
+
+- description: Create net04_external
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron net-create net04_ext --router:external True --provider:physical_network physnet1 --provider:network_type flat'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Create subnet_external
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron subnet-create net04_ext {{ IPV4_NET_EXTERNAL_PREFIX }}.0/24 --name net04_ext__subnet --disable-dhcp --allocation-pool start={{ IPV4_NET_EXTERNAL_PREFIX }}.150,end={{ IPV4_NET_EXTERNAL_PREFIX }}.180 --gateway {{ IPV4_NET_EXTERNAL_PREFIX }}.1'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Create net04
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron net-create net04'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Create subnet_net04
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron subnet-create net04 {{ IPV4_NET_TENANT_PREFIX }}.0/24 --name net04__subnet --allocation-pool start={{ IPV4_NET_TENANT_PREFIX }}.120,end={{ IPV4_NET_TENANT_PREFIX }}.240'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Create router
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron router-create net04_router01'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Set geteway
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron router-gateway-set net04_router01 net04_ext'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description:  Add interface
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron router-interface-add net04_router01 net04__subnet'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: sync time
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run
+    'service ntp stop; ntpd -gq;  service ntp start'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+{{ SHARED.INSTALL_DOCKER_ON_GTW() }}
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/cookiecutter-context-dvr-ceph.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/cookiecutter-context-dvr-ceph.yaml
new file mode 100644
index 0000000..5e8b4d5
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/cookiecutter-context-dvr-ceph.yaml
@@ -0,0 +1,232 @@
+default_context:
+  barbican_backend: dogtag
+  barbican_enabled: 'False'
+  auditd_enabled: 'False'
+  backend_network_netmask: 255.255.255.0
+  backend_network_subnet: 10.167.4.0/24
+  backend_vlan: '10'
+  backup_private_key: |-
+    -----BEGIN RSA PRIVATE KEY-----
+    MIIEpQIBAAKCAQEAuY7v++mza4e75f80GYE2iIdZ30d7yvT6Xym00iD/OxRWNtXe
+    rIh7M0X30Q0F2D3hVvPz57axTheOK3xFRVvPoIZjm3fVgwNzQmTyfAZz4TOdTtWx
+    9cye8Bo20qlRpq8wFQMSDhgRv0J1iX6LjJsr8pM1A8q3e4GYnv0DrLBZ1Iq7+T/k
+    qzzsT7PuvGvEK63J/DaE6BI73QG+0W0MvblddznwXvXLo/VlBXajWOv37YHiMFMT
+    Zap7lTvGVEyxByVEM04Bo7ABF2PEPwGrGL9nOpJ1LSxBCcryNVyZbveFF/e8A1Cj
+    178rD+W4H5p2Agr5A/y3LZpTkyhnTtWXzwT3YwIDAQABAoIBACiUNa8lgHM3j8PQ
+    d5hMRZy93M2TWGMeB9Lf0AdT5/0HiYMcakHY5vhjiLpS2sBbZ/gYCXLW5Rdq11Bz
+    MMLmPRWhzg6lui+YhZAze0PcNWM+YlxnJy/Vu7xOP0b6eDy3exBdR4mFgfwNkJ6s
+    6d+p34aA4ssdfdqokLPUKQWO21Y7UVYbht6Tv55nd3YMGXHxJ0phitf7/dFsEX9Z
+    sPSdWqkYMP2UWQBrFSjxV9Q+kE8OQ1VYDFCRa/9a5QHMrFo/0dOxLkZosTcCHM8A
+    H2RHPcKrxFWn7A3eAiA4VCvtM8RX239Bi7Gdvfl1HflSkQwBDUV8F2RZLHM2NU2T
+    EGBQcuECgYEA4ZBwZAtJIQ0R35prGLFj+drb/IKr+x2WD9WOZ83cheGSwdCRk/he
+    zZ5cCKgmSqg9cDJ4/vraoqmAlwQ4uj4e1TudgHPwdDUPuwoveIbUfUyzdIZMt0s4
+    fe61AUhEniIOi09H+E2yHz6OWSw3uA4SKkNsMT4RZc4Nag3Fo86Rrj8CgYEA0piY
+    HMYPHposfjVNM0PMU9F1lwQJMdx3a55JYgUc8cMvrsZPzvwJqrGCMNjP4lPwl/AS
+    x73yaxcxEYGiG6wDkvx+hujjyAx+sal62EB9ofJGDI7u8L2/0voW53RWvTUBsy8e
+    +xOQTewCAAYGLIJnGfEyVqEAu9IPwz3pep8xtd0CgYEAruTusDOr9SuMI0M5LQFG
+    UpHnJogvT1smYoqki0osZcZ8ozjT19aps2bJV5EBd7uxP5BzDsl0wtEIuo90aLwH
+    7i/2NIYw9/m4g78nBZ4NnkXdk0karLhvSf3PbPoa8j3X5x6G4DlmFiHL/8pwPY7z
+    eL+kYR4OIVC+R+/7wcJGZMMCgYEAqOLg0epvw53mYoxCTgNoACvw/lupOAhS6MY2
+    mVn6XVOnkKTO6fIrmmziOGQXSq0APAi2NuL4XrNpkV2BcGmhMCY3Hd/0k8CZdcax
+    km0dk1skm/ugWQYCqKIQ7irZSMESjO0UDkwhJKxI6lXqa5VkM2S/dsOFQBp0s6GZ
+    9NFn3y0CgYEAogzKchxouu4BgqHn76W0IB/XeTuiCDSGRv+IwMoghxbPoT6lO920
+    OHWoo+bX3VuxpCFkN2fFH6V8WncUrv4ItAgxGftL8h9BhMRKiatwOBAw0vG/CO2G
+    CIyvmjhIvpIdAl8i1jIJw1sn/ZVYm8+ZKy4VAqPevc3Ze7WGoMUkFyg=
+    -----END RSA PRIVATE KEY-----
+  backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC5ju/76bNrh7vl/zQZgTaIh1nfR3vK9PpfKbTSIP87FFY21d6siHszRffRDQXYPeFW8/PntrFOF44rfEVFW8+ghmObd9WDA3NCZPJ8BnPhM51O1bH1zJ7wGjbSqVGmrzAVAxIOGBG/QnWJfouMmyvykzUDyrd7gZie/QOssFnUirv5P+SrPOxPs+68a8Qrrcn8NoToEjvdAb7RbQy9uV13OfBe9cuj9WUFdqNY6/ftgeIwUxNlqnuVO8ZUTLEHJUQzTgGjsAEXY8Q/AasYv2c6knUtLEEJyvI1XJlu94UX97wDUKPXvysP5bgfmnYCCvkD/LctmlOTKGdO1ZfPBPdj
+  bmk_enabled: 'False'
+  ceph_cluster_network: 10.167.4.0/24
+  ceph_enabled: 'True'
+  ceph_hyper_converged: 'False'
+  ceph_mon_node01_address: 10.167.4.66
+  ceph_mon_node01_hostname: cmn01
+  ceph_mon_node02_address: 10.167.4.67
+  ceph_mon_node02_hostname: cmn02
+  ceph_mon_node03_address: 10.167.4.68
+  ceph_mon_node03_hostname: cmn03
+  ceph_osd_backend: bluestore
+  ceph_osd_block_db_size: '10'
+  ceph_osd_bond_mode: active-backup
+  ceph_osd_count: '2'
+  ceph_osd_data_disks: /dev/vdb
+  ceph_osd_journal_or_block_db_disks: /dev/vdc
+  ceph_osd_node_count: '2'
+  ceph_osd_journal_size: '10'
+  ceph_osd_primary_first_nic: eth1
+  ceph_osd_primary_second_nic: eth2
+  ceph_osd_rack01_backend_subnet: 10.167.4
+  ceph_osd_rack01_hostname: osd
+  ceph_osd_rack01_single_subnet: 10.167.4
+  ceph_osd_single_address_ranges: 10.167.4.94-10.167.4.95
+  ceph_osd_deploy_address_ranges: 10.167.5.94-10.167.5.95
+  ceph_osd_backend_address_ranges: 10.167.4.94-10.167.4.95
+  ceph_public_network: 10.167.4.0/24
+  ceph_rgw_address: 10.167.4.75
+  ceph_rgw_hostname: rgw
+  ceph_rgw_node01_address: 10.167.4.76
+  ceph_rgw_node01_hostname: rgw01
+  ceph_rgw_node02_address: 10.167.4.77
+  ceph_rgw_node02_hostname: rgw02
+  ceph_rgw_node03_address: 10.167.4.78
+  ceph_rgw_node03_hostname: rgw03
+  ceph_version: luminous
+  cicd_enabled: 'False'
+  cluster_domain: cookied-mcp-queens-dvr-ceph.local
+  cluster_name: cookied-mcp-queens-dvr-ceph
+  compute_bond_mode: active-backup
+  compute_primary_first_nic: eth1
+  compute_primary_second_nic: eth2
+  context_seed: tekHhhWzn3YrxKbXGMvtWYj1usHGrRBYd2gfFwWNCnRentwCu1QKANHvpIeZCRvz
+  control_network_netmask: 255.255.255.0
+  control_network_subnet: 10.167.4.0/24
+  control_vlan: '10'
+  cookiecutter_template_branch: 'proposed'
+  cookiecutter_template_credentials: gerrit
+  cookiecutter_template_url: https://gerrit.mcp.mirantis.net/mk/cookiecutter-templates.git
+  deploy_network_gateway: 10.167.5.1
+  deploy_network_netmask: 255.255.255.0
+  deploy_network_subnet: 10.167.5.0/24
+  deployment_type: physical
+  dns_server01: 172.18.176.6
+  dns_server02: 172.18.208.44
+  email_address: obutenko@mirantis.com
+  gainsight_service_enabled: 'False'
+  gateway_primary_first_nic: eth1
+  gateway_primary_second_nic: eth2
+  infra_bond_mode: active-backup
+  infra_deploy_nic: eth0
+  infra_kvm01_control_address: 10.167.4.11
+  infra_kvm01_deploy_address: 10.167.5.11
+  infra_kvm01_hostname: kvm01
+  infra_kvm02_control_address: 10.167.4.12
+  infra_kvm02_deploy_address: 10.167.5.12
+  infra_kvm02_hostname: kvm02
+  infra_kvm03_control_address: 10.167.4.13
+  infra_kvm03_deploy_address: 10.167.5.13
+  infra_kvm03_hostname: kvm03
+  infra_kvm_vip_address: 10.167.4.10
+  infra_primary_first_nic: eth1
+  infra_primary_second_nic: eth2
+  internal_proxy_enabled: 'False'
+  kubernetes_ctl_on_kvm: 'False'
+  kubernetes_enabled: 'False'
+  local_repositories: 'False'
+  maas_deploy_address: 10.167.5.15
+  maas_deploy_network_name: deploy_network
+  maas_deploy_range_end: 10.167.5.230
+  maas_deploy_range_start: 10.167.5.20
+  maas_deploy_vlan: '0'
+  maas_enabled: 'False'
+  maas_fabric_name: deploy_fabric
+  maas_hostname: cfg01
+  mcp_common_scripts_branch: 'proposed'
+  mcp_version: proposed
+  no_platform: 'False'
+  offline_deployment: 'False'
+  opencontrail_enabled: 'False'
+  openssh_groups: ''
+  openstack_benchmark_node01_address: 10.167.4.95
+  openstack_benchmark_node01_hostname: bmk01
+  openstack_cluster_size: compact
+  openstack_compute_count: '2'
+  openstack_compute_rack01_hostname: cmp
+  openstack_compute_rack01_single_subnet: 10.167.4
+  openstack_compute_rack01_tenant_subnet: 10.167.6
+  openstack_compute_single_address_ranges: 10.167.4.105-10.167.4.150
+  openstack_compute_deploy_address_ranges: 10.167.5.105-10.167.5.150
+  openstack_compute_tenant_address_ranges: 10.167.6.105-10.167.6.150
+  openstack_compute_backend_address_ranges: 10.167.6.105-10.167.6.150
+  openstack_dns_hostname: dns
+  openstack_dns_node01_address: 10.167.4.111
+  openstack_dns_node01_hostname: dns01
+  openstack_dns_node02_address: 10.167.4.112
+  openstack_dns_node02_hostname: dns02
+  openstack_control_address: 10.167.4.10
+  openstack_control_hostname: ctl
+  openstack_control_node01_address: 10.167.4.11
+  openstack_control_node01_hostname: ctl01
+  openstack_control_node02_address: 10.167.4.12
+  openstack_control_node02_hostname: ctl02
+  openstack_control_node03_address: 10.167.4.13
+  openstack_control_node03_hostname: ctl03
+  openstack_database_address: 10.167.4.10
+  openstack_database_hostname: ctl
+  openstack_database_node01_address: 10.167.4.11
+  openstack_database_node01_hostname: ctl01
+  openstack_database_node02_address: 10.167.4.12
+  openstack_database_node02_hostname: ctl02
+  openstack_database_node03_address: 10.167.4.13
+  openstack_database_node03_hostname: ctl03
+  openstack_enabled: 'True'
+  openstack_gateway_node01_address: 10.167.4.224
+  openstack_gateway_node01_hostname: gtw01
+  openstack_gateway_node01_tenant_address: 10.167.6.6
+  openstack_gateway_node02_address: 10.167.4.225
+  openstack_gateway_node02_hostname: gtw02
+  openstack_gateway_node02_tenant_address: 10.167.6.7
+  openstack_gateway_node03_address: 10.167.4.226
+  openstack_gateway_node03_hostname: gtw03
+  openstack_gateway_node03_tenant_address: 10.167.6.8
+  openstack_message_queue_address: 10.167.4.10
+  openstack_message_queue_hostname: ctl
+  openstack_message_queue_node01_address: 10.167.4.11
+  openstack_message_queue_node01_hostname: ctl01
+  openstack_message_queue_node02_address: 10.167.4.12
+  openstack_message_queue_node02_hostname: ctl02
+  openstack_message_queue_node03_address: 10.167.4.13
+  openstack_message_queue_node03_hostname: ctl03
+  openstack_network_engine: ovs
+  openstack_neutron_bgp_vpn: 'False'
+  openstack_neutron_bgp_vpn_driver: bagpipe
+  openstack_neutron_qos: 'False'
+  openstack_neutron_vlan_aware_vms: 'False'
+  openstack_nfv_dpdk_enabled: 'False'
+  openstack_nfv_sriov_enabled: 'False'
+  openstack_nova_compute_nfv_req_enabled: 'False'
+  openstack_nova_compute_reserved_host_memory_mb: '900'
+  openstack_ovs_dvr_enabled: 'True'
+  openstack_ovs_encapsulation_type: vxlan
+  openstack_proxy_address: 10.167.4.80
+  openstack_proxy_hostname: prx
+  openstack_proxy_node01_address: 10.167.4.81
+  openstack_proxy_node01_hostname: prx01
+  openstack_proxy_node02_address: 10.167.4.82
+  openstack_proxy_node02_hostname: prx02
+  openstack_upgrade_node01_address: 10.167.4.19
+  openstack_version: queens
+  osd_padding_with_zeros: 'False'
+  oss_enabled: 'False'
+  platform: openstack_enabled
+  public_host: ${_param:openstack_proxy_address}
+  publication_method: email
+  reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
+  salt_api_password: BX7ium4MaRPIWBdyhj4LTbiedwg3yLep
+  salt_api_password_hash: $6$qYqzkiRP$MiqA5ZMfsmdXJcuTTyeCgNPv9CBGO5nSH4HwRKPGUh0MFXcEa8JDCUEtS8xLHCkol7CMdq.l6CG7of0iaUJ.u.
+  salt_master_address: 10.167.4.15
+  salt_master_hostname: cfg01
+  salt_master_management_address: 10.167.5.15
+  shared_reclass_branch: 'proposed'
+  shared_reclass_url: https://github.com/Mirantis/reclass-system-salt-model.git
+  sriov_network_subnet: 10.55.0.0/16
+  stacklight_enabled: 'False'
+  stacklight_version: '2'
+  static_ips_on_deploy_network_enabled: 'False'
+  tenant_network_gateway: 10.167.6.1
+  tenant_network_netmask: 255.255.255.0
+  tenant_network_subnet: 10.167.6.0/24
+  tenant_vlan: '20'
+  upstream_proxy_enabled: 'False'
+  use_default_network_scheme: 'True'
+  version: proposed
+  vnf_onboarding_enabled: 'False'
+  rsync_fernet_rotation: 'True'
+  compute_padding_with_zeros: False
+  designate_backend: bind
+  designate_enabled: 'False'
+  nova_vnc_tls_enabled: 'False'
+  galera_ssl_enabled: 'False'
+  openstack_mysql_x509_enabled: 'False'
+  rabbitmq_ssl_enabled: 'False'
+  openstack_rabbitmq_x509_enabled: 'False'
+  tenant_telemetry_enabled: 'False'
+  gnocchi_aggregation_storage: file
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/core.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/core.yaml
new file mode 100644
index 0000000..72c6fb8
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/core.yaml
@@ -0,0 +1,118 @@
+{% from 'cookied-mcp-queens-dvr-ceph/underlay.yaml' import HOSTNAME_CFG01 with context %}
+
+# Install support services
+- description: Install keepalived on ctl01
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keepalived:cluster and *01*' state.sls keepalived
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: true
+
+- description: Install keepalived
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keepalived:cluster' state.sls keepalived
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: true
+
+- description: Install glusterfs
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@glusterfs:server' state.sls glusterfs.server.service
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Setup glusterfs on primary controller
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 5}
+  skip_fail: false
+
+- description: Check the gluster status
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Install RabbitMQ on ctl01
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Install RabbitMQ
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@rabbitmq:server' state.sls rabbitmq
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Check the rabbitmq status
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Install Galera on first server
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@galera:master' state.sls galera
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Install Galera on other servers
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@galera:slave' state.sls galera -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Check mysql status
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: true
+
+
+- description: Install haproxy
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@haproxy:proxy' state.sls haproxy
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Check haproxy status
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@haproxy:proxy' service.status haproxy
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Restart rsyslog
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@haproxy:proxy' service.restart rsyslog
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Install memcached on all controllers
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@memcached:server' state.sls memcached
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Check the VIP
+  cmd: |
+    OPENSTACK_CONTROL_ADDRESS=`salt-call --out=newline_values_only pillar.get _param:openstack_control_address`;
+    echo "_param:openstack_control_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
+    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 3, delay: 10}
+  skip_fail: false
+
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/openstack.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/openstack.yaml
new file mode 100644
index 0000000..261218c
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/openstack.yaml
@@ -0,0 +1,36 @@
+{% from 'cookied-mcp-pike-dvr-ceph/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-mcp-pike-dvr-ceph/underlay.yaml' import HOSTNAME_CTL01 with context %}
+{% from 'cookied-mcp-pike-dvr-ceph/underlay.yaml' import HOSTNAME_CTL02 with context %}
+{% from 'cookied-mcp-pike-dvr-ceph/underlay.yaml' import HOSTNAME_CTL03 with context %}
+{% from 'cookied-mcp-pike-dvr-ceph/underlay.yaml' import HOSTNAME_GTW01 with context %}
+{% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
+{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+{% import 'shared-openstack.yaml' as SHARED_OPENSTACK with context %}
+
+{% import 'shared-ceph.yaml' as SHARED_CEPH with context %}
+
+{{ SHARED_CEPH.MACRO_INSTALL_CEPH_MONS() }}
+
+{{ SHARED_CEPH.MACRO_INSTALL_CEPH_MGR() }}
+
+{{ SHARED_CEPH.MACRO_INSTALL_CEPH_OSD_AND_RADOSGW() }}
+
+{{ SHARED_CEPH.CONNECT_CEPH_TO_SERVICES() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE(USE_ORCHESTRATE=false) }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_GLANCE() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_NOVA() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_CINDER() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_NEUTRON() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_HEAT() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_HORIZON() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_COMPUTE() }}
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/salt.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/salt.yaml
new file mode 100644
index 0000000..90ea532
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/salt.yaml
@@ -0,0 +1,30 @@
+{% from 'cookied-mcp-queens-dvr-ceph/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-mcp-queens-dvr-ceph/underlay.yaml' import HOSTNAME_CMP01 with context %}
+{% from 'cookied-mcp-queens-dvr-ceph/underlay.yaml' import HOSTNAME_CMP02 with context %}
+{% from 'cookied-mcp-queens-dvr-ceph/underlay.yaml' import HOSTNAME_GTW01 with context %}
+{% from 'cookied-mcp-queens-dvr-ceph/underlay.yaml' import LAB_CONFIG_NAME with context %}
+{% from 'cookied-mcp-queens-dvr-ceph/underlay.yaml' import DOMAIN_NAME with context %}
+
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+# Other salt model repository parameters see in shared-salt.yaml
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+
+{{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
+
+{{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
+
+{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='\*') }}
+{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
+
+{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
+
+{{ SHARED.MACRO_GENERATE_INVENTORY() }}
+
+{{ SHARED.MACRO_NETWORKING_WORKAROUNDS() }}
+
+{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
+
+{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
+
+{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/underlay--meta-data.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/underlay--meta-data.yaml
new file mode 100644
index 0000000..3699401
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/underlay--meta-data.yaml
@@ -0,0 +1,4 @@
+| # All the data below will be stored as a string object
+  instance-id: iid-local1
+  hostname: {hostname}
+  local-hostname: {hostname}
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/underlay--user-data-cfg01.yaml
new file mode 100644
index 0000000..da7908d
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/underlay--user-data-cfg01.yaml
@@ -0,0 +1,61 @@
+| # All the data below will be stored as a string object
+  #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+
+  ssh_pwauth: True
+  users:
+   - name: root
+     sudo: ALL=(ALL) NOPASSWD:ALL
+     shell: /bin/bash
+     ssh_authorized_keys:
+     {% for key in config.underlay.ssh_keys %}
+      - ssh-rsa {{ key['public'] }}
+     {% endfor %}
+
+  disable_root: false
+  chpasswd:
+   list: |
+    root:r00tme
+   expire: False
+
+  bootcmd:
+   # Enable root access
+   - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+   - service sshd restart
+  output:
+    all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
+
+  runcmd:
+   # Configure dhclient
+   - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
+   - sudo resolvconf -u
+
+   # Prepare network connection
+   - sudo ifdown ens3
+   - sudo ip r d default || true  # remove existing default route to get it from dhcp
+   - sudo ifup ens3
+   #- sudo route add default gw {gateway} {interface_name}
+
+   # Create swap
+   - fallocate -l 4G /swapfile
+   - chmod 600 /swapfile
+   - mkswap /swapfile
+   - swapon /swapfile
+   - echo "/swapfile   none    swap    defaults    0   0" >> /etc/fstab
+
+   - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
+
+  write_files:
+   - path: /etc/network/interfaces
+     content: |
+          auto ens3
+          iface ens3 inet dhcp
+
+   - path: /root/.ssh/config
+     owner: root:root
+     permissions: '0600'
+     content: |
+          Host *
+            ServerAliveInterval 300
+            ServerAliveCountMax 10
+            StrictHostKeyChecking no
+            UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/underlay--user-data1604.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/underlay--user-data1604.yaml
new file mode 100644
index 0000000..979424f
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/underlay--user-data1604.yaml
@@ -0,0 +1,49 @@
+| # All the data below will be stored as a string object
+  #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+
+  ssh_pwauth: True
+  users:
+   - name: root
+     sudo: ALL=(ALL) NOPASSWD:ALL
+     shell: /bin/bash
+     ssh_authorized_keys:
+     {% for key in config.underlay.ssh_keys %}
+      - ssh-rsa {{ key['public'] }}
+     {% endfor %}
+
+  disable_root: false
+  chpasswd:
+   list: |
+    root:r00tme
+   expire: False
+
+  bootcmd:
+   # Enable root access
+   - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+   - service sshd restart
+  output:
+    all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
+
+  runcmd:
+   - export TERM=linux
+   - export LANG=C
+   # Configure dhclient
+   - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
+   - sudo resolvconf -u
+
+   # Prepare network connection
+   - sudo ifup ens3
+   #- sudo route add default gw {gateway} {interface_name}
+
+   # Create swap
+   - fallocate -l 4G /swapfile
+   - chmod 600 /swapfile
+   - mkswap /swapfile
+   - swapon /swapfile
+   - echo "/swapfile   none    swap    defaults   0   0" >> /etc/fstab
+
+  write_files:
+   - path: /etc/network/interfaces
+     content: |
+          auto ens3
+          iface ens3 inet dhcp
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/underlay.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/underlay.yaml
new file mode 100644
index 0000000..39b6c0a
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/underlay.yaml
@@ -0,0 +1,674 @@
+{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'proposed') %}
+
+{% import 'cookied-mcp-queens-dvr-ceph/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
+{% import 'cookied-mcp-queens-dvr-ceph/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
+{% import 'cookied-mcp-queens-dvr-ceph/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
+
+---
+aliases:
+ - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
+ - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
+ - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
+ - &cloudinit_user_data_1604 {{ CLOUDINIT_USER_DATA_1604 }}
+
+{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-mcp-queens-dvr-ceph') %}
+{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME) + '.local' %}
+{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp1.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp2.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMN01 = os_env('HOSTNAME_CMN01', 'cmn01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMN02 = os_env('HOSTNAME_CMN02', 'cmn02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMN03 = os_env('HOSTNAME_CMN03', 'cmn03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_RGW01 = os_env('HOSTNAME_CMN01', 'rgw01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_RGW02 = os_env('HOSTNAME_CMN02', 'rgw02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_RGW03 = os_env('HOSTNAME_CMN03', 'rgw03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_OSD01 = os_env('HOSTNAME_OSD01', 'osd1.' + DOMAIN_NAME) %}
+{% set HOSTNAME_OSD02 = os_env('HOSTNAME_OSD02', 'osd2.' + DOMAIN_NAME) %}
+{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
+
+template:
+  devops_settings:
+    env_name: {{ os_env('ENV_NAME', 'cookied-mcp-queens-dvr-ceph_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
+
+    address_pools:
+      private-pool01:
+        net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.60.0.0/16:24') }}
+        params:
+          ip_reserved:
+            gateway: +1
+            l2_network_device: +1
+            default_{{ HOSTNAME_CFG01 }}: +15
+            default_{{ HOSTNAME_CTL01 }}: +11
+            default_{{ HOSTNAME_CTL02 }}: +12
+            default_{{ HOSTNAME_CTL03 }}: +13
+            default_{{ HOSTNAME_CMP01 }}: +105
+            default_{{ HOSTNAME_CMP02 }}: +106
+            default_{{ HOSTNAME_OSD01 }}: +94
+            default_{{ HOSTNAME_OSD02 }}: +95
+            default_{{ HOSTNAME_CMN01 }}: +96
+            default_{{ HOSTNAME_CMN02 }}: +97
+            default_{{ HOSTNAME_CMN03 }}: +98
+            default_{{ HOSTNAME_RGW01 }}: +76
+            default_{{ HOSTNAME_RGW02 }}: +77
+            default_{{ HOSTNAME_RGW03 }}: +78
+            default_{{ HOSTNAME_GTW01 }}: +110
+            default_{{ HOSTNAME_PRX01 }}: +121
+          ip_ranges:
+            dhcp: [+70, -10]
+
+      admin-pool01:
+        net: {{ os_env('ADMIN_ADDRESS_POOL01', '10.70.0.0/16:24') }}
+        params:
+          ip_reserved:
+            gateway: +1
+            l2_network_device: +1
+            default_{{ HOSTNAME_CFG01 }}: +15
+            default_{{ HOSTNAME_CTL01 }}: +11
+            default_{{ HOSTNAME_CTL02 }}: +12
+            default_{{ HOSTNAME_CTL03 }}: +13
+            default_{{ HOSTNAME_CMP01 }}: +105
+            default_{{ HOSTNAME_CMP02 }}: +106
+            default_{{ HOSTNAME_OSD01 }}: +94
+            default_{{ HOSTNAME_OSD02 }}: +95
+            default_{{ HOSTNAME_CMN01 }}: +96
+            default_{{ HOSTNAME_CMN02 }}: +97
+            default_{{ HOSTNAME_CMN03 }}: +98
+            default_{{ HOSTNAME_RGW01 }}: +76
+            default_{{ HOSTNAME_RGW02 }}: +77
+            default_{{ HOSTNAME_RGW03 }}: +78
+            default_{{ HOSTNAME_GTW01 }}: +110
+            default_{{ HOSTNAME_PRX01 }}: +121
+          ip_ranges:
+            dhcp: [+70, -10]
+
+      tenant-pool01:
+        net: {{ os_env('TENANT_ADDRESS_POOL01', '10.80.0.0/16:24') }}
+        params:
+          ip_reserved:
+            gateway: +1
+            l2_network_device: +1
+            default_{{ HOSTNAME_CFG01 }}: +15
+            default_{{ HOSTNAME_CTL01 }}: +11
+            default_{{ HOSTNAME_CTL02 }}: +12
+            default_{{ HOSTNAME_CTL03 }}: +13
+            default_{{ HOSTNAME_CMP01 }}: +105
+            default_{{ HOSTNAME_CMP02 }}: +106
+            default_{{ HOSTNAME_OSD01 }}: +94
+            default_{{ HOSTNAME_OSD02 }}: +95
+            default_{{ HOSTNAME_CMN01 }}: +96
+            default_{{ HOSTNAME_CMN02 }}: +97
+            default_{{ HOSTNAME_CMN03 }}: +98
+            default_{{ HOSTNAME_RGW01 }}: +76
+            default_{{ HOSTNAME_RGW02 }}: +77
+            default_{{ HOSTNAME_RGW03 }}: +78
+            default_{{ HOSTNAME_GTW01 }}: +110
+            default_{{ HOSTNAME_PRX01 }}: +121
+          ip_ranges:
+            dhcp: [+10, -10]
+
+      external-pool01:
+        net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '10.90.0.0/16:24') }}
+        params:
+          ip_reserved:
+            gateway: +1
+            l2_network_device: +1
+            default_{{ HOSTNAME_CFG01 }}: +15
+            default_{{ HOSTNAME_CTL01 }}: +11
+            default_{{ HOSTNAME_CTL02 }}: +12
+            default_{{ HOSTNAME_CTL03 }}: +13
+            default_{{ HOSTNAME_CMP01 }}: +105
+            default_{{ HOSTNAME_CMP02 }}: +106
+            default_{{ HOSTNAME_OSD01 }}: +94
+            default_{{ HOSTNAME_OSD02 }}: +95
+            default_{{ HOSTNAME_CMN01 }}: +96
+            default_{{ HOSTNAME_CMN02 }}: +97
+            default_{{ HOSTNAME_CMN03 }}: +98
+            default_{{ HOSTNAME_RGW01 }}: +76
+            default_{{ HOSTNAME_RGW02 }}: +77
+            default_{{ HOSTNAME_RGW03 }}: +78
+            default_{{ HOSTNAME_GTW01 }}: +110
+            default_{{ HOSTNAME_PRX01 }}: +121
+          ip_ranges:
+            dhcp: [+130, +230]
+
+    groups:
+      - name: default
+        driver:
+          name: devops.driver.libvirt
+          params:
+            connection_string: !os_env CONNECTION_STRING, qemu:///system
+            stp: False
+            hpet: False
+            enable_acpi: true
+            use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
+            use_hugepages: !os_env DRIVER_USE_HUGEPAGES, false
+
+        network_pools:
+          admin: admin-pool01
+          private: private-pool01
+          tenant: tenant-pool01
+          external: external-pool01
+
+        l2_network_devices:
+          private:
+            address_pool: private-pool01
+            dhcp: false
+            forward:
+              mode: route
+
+          admin:
+            address_pool: admin-pool01
+            dhcp: true
+            forward:
+              mode: nat
+
+          tenant:
+            address_pool: tenant-pool01
+            dhcp: false
+
+          external:
+            address_pool: external-pool01
+            dhcp: true
+            forward:
+              mode: route
+
+        group_volumes:
+         - name: cloudimage1604    # This name is used for 'backing_store' option for node volumes.
+           source_image: !os_env IMAGE_PATH1604  # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
+           format: qcow2
+         - name: cfg01_day01_image               # Pre-configured day01 image
+           source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01', os_env('IMAGE_PATH1604')) }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
+           format: qcow2
+         - name: mcp_ubuntu_1604_image           # Pre-configured image for control plane
+           source_image: !os_env MCP_IMAGE_PATH1604
+           format: qcow2
+
+        nodes:
+          - name: {{ HOSTNAME_CFG01 }}
+            role: salt_master
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 8192
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: cfg01_day01_image
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_cfg01
+
+              interfaces:
+                - label: ens3
+                  l2_network_device: admin
+                  interface_model: *interface_model
+                - label: ens4
+                  l2_network_device: private
+                  interface_model: *interface_model
+              network_config:
+                ens3:
+                  networks:
+                    - admin
+                ens4:
+                  networks:
+                    - private
+
+          - name: {{ HOSTNAME_CTL01 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 16384
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: cinder
+                  capacity: 50
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: &interfaces
+                - label: ens3
+                  l2_network_device: admin
+                  interface_model: *interface_model
+                - label: ens4
+                  l2_network_device: private
+                  interface_model: *interface_model
+              network_config: &network_config
+                ens3:
+                  networks:
+                    - admin
+                ens4:
+                  networks:
+                    - private
+
+          - name: {{ HOSTNAME_CTL02 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 16384
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: cinder
+                  capacity: 50
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_CTL03 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 16384
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: cinder
+                  capacity: 50
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_CMN01 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 2048
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: cloudimage1604
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_CMN02 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 2048
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: cloudimage1604
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_CMN03 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 2048
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: cloudimage1604
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_OSD01 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 2048
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: cloudimage1604
+                  format: qcow2
+                - name: cinder
+                  capacity: 50
+                  format: qcow2
+                - name: ceph
+                  capacity: 50
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_OSD02 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 2048
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: cloudimage1604
+                  format: qcow2
+                - name: cinder
+                  capacity: 50
+                  format: qcow2
+                - name: ceph
+                  capacity: 50
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_RGW01 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 2048
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: cloudimage1604
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_RGW02 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 2048
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: cloudimage1604
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: *interfaces
+              network_config: *network_config
+          - name: {{ HOSTNAME_RGW03 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 2048
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: cloudimage1604
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_PRX01 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 1
+              memory: !os_env SLAVE_NODE_MEMORY, 2048
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_CMP01 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 3
+              memory: !os_env SLAVE_NODE_MEMORY, 4096
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: cloudimage1604
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+
+              interfaces: &all_interfaces
+                - label: ens3
+                  l2_network_device: admin
+                  interface_model: *interface_model
+                - label: ens4
+                  l2_network_device: private
+                  interface_model: *interface_model
+                - label: ens5
+                  l2_network_device: tenant
+                  interface_model: *interface_model
+                - label: ens6
+                  l2_network_device: external
+                  interface_model: *interface_model
+              network_config: &all_network_config
+                ens3:
+                  networks:
+                    - admin
+                ens4:
+                  networks:
+                    - private
+                ens5:
+                  networks:
+                    - tenant
+                ens6:
+                  networks:
+                    - external
+
+          - name: {{ HOSTNAME_CMP02 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 3
+              memory: !os_env SLAVE_NODE_MEMORY, 4096
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: cloudimage1604
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: *all_interfaces
+              network_config: *all_network_config
+
+          - name: {{ HOSTNAME_GTW01 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 4
+              memory: !os_env SLAVE_NODE_MEMORY, 4096
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: cloudimage1604
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: *all_interfaces
+              network_config: *all_network_config
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/vcp-context-environment.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/vcp-context-environment.yaml
new file mode 100644
index 0000000..f279b44
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/vcp-context-environment.yaml
@@ -0,0 +1,172 @@
+nodes:
+    cfg01.cookied-mcp-queens-dvr-ceph.local:
+      reclass_storage_name: infra_config_node01
+      roles:
+      - infra_config
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    ctl01.cookied-mcp-queens-dvr-ceph.local:
+      reclass_storage_name: openstack_control_node01
+      roles:
+      - infra_kvm
+      - openstack_control_leader
+      - openstack_database_leader
+      - openstack_message_queue
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    ctl02.cookied-mcp-queens-dvr-ceph.local:
+      reclass_storage_name: openstack_control_node02
+      roles:
+      - infra_kvm
+      - openstack_control
+      - openstack_database
+      - openstack_message_queue
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    ctl03.cookied-mcp-queens-dvr-ceph.local:
+      reclass_storage_name: openstack_control_node03
+      roles:
+      - infra_kvm
+      - openstack_control
+      - openstack_database
+      - openstack_message_queue
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    prx01.cookied-mcp-queens-dvr-ceph.local:
+      reclass_storage_name: openstack_proxy_node01
+      roles:
+      - openstack_proxy
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    # Generator-based computes. For compatibility only
+    cmp<<count>>.cookied-mcp-queens-dvr-ceph.local:
+      reclass_storage_name: openstack_compute_rack01
+      roles:
+      - openstack_compute
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+        ens5:
+          role: bond0_ab_ovs_vxlan_mesh
+        ens6:
+          role: bond1_ab_ovs_floating
+
+    gtw01.cookied-mcp-queens-dvr-ceph.local:
+      reclass_storage_name: openstack_gateway_node01
+      roles:
+      - openstack_gateway
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+        ens5:
+          role: bond0_ab_ovs_vxlan_mesh
+        ens6:
+          role: bond1_ab_ovs_floating
+
+    osd<<count>>.cookied-mcp-queens-dvr-ceph.local:
+      reclass_storage_name: ceph_osd_rack01
+      roles:
+      - ceph_osd
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    cmn01.cookied-mcp-queens-dvr-ceph.local:
+      reclass_storage_name: ceph_mon_node01
+      roles:
+      - ceph_mon
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    cmn02.cookied-mcp-queens-dvr-ceph.local:
+      reclass_storage_name: ceph_mon_node02
+      roles:
+      - ceph_mon
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    cmn03.cookied-mcp-queens-dvr-ceph.local:
+      reclass_storage_name: ceph_mon_node03
+      roles:
+      - ceph_mon
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    rgw01.cookied-mcp-queens-dvr-ceph.local:
+      reclass_storage_name: ceph_rgw_node01
+      roles:
+      - ceph_rgw
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    rgw02.cookied-mcp-queens-dvr-ceph.local:
+      reclass_storage_name: ceph_rgw_node02
+      roles:
+      - ceph_rgw
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    rgw03.cookied-mcp-queens-dvr-ceph.local:
+      reclass_storage_name: ceph_rgw_node03
+      roles:
+      - ceph_rgw
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-queens-dvr-ceph.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-queens-dvr-ceph.yaml
new file mode 100644
index 0000000..e5a1623
--- /dev/null
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-queens-dvr-ceph.yaml
@@ -0,0 +1,57 @@
+{% from 'cookied-model-generator/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-model-generator/underlay.yaml' import DOMAIN_NAME with context %}
+
+{% set LAB_CONFIG_NAME = 'cookied-mcp-queens-dvr-ceph' %}
+# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
+{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME', LAB_CONFIG_NAME) %}
+# Path to the context files used to render Cluster and Environment models
+{%- set CLUSTER_CONTEXT_NAME = 'cookiecutter-context-dvr-ceph.yaml' %}
+{%- set ENVIRONMENT_CONTEXT_NAMES = ['vcp-context-environment.yaml'] %}
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+
+{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CFG01) }}
+{{ SHARED.MACRO_INSTALL_FORMULAS('\*') }}
+
+{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL() }}
+
+{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
+
+- description: "Workaround for combined roles: remove unnecessary classes"
+  cmd: |
+    set -e;
+    sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    sed -i '/system.salt.control.placement.openstack.compact/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/kvm.yml;
+
+    # Start compute node addresses from .105 , as in static models
+    sed -i 's/start: 101/start: 105/g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+
+    . /root/venv-reclass-tools/bin/activate;
+    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Temporary workaround !! Fix or debug
+  cmd: |
+    sed -i 's/pg_num: 128/pg_num: 4/g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/ceph/setup.yml;
+    sed -i 's/pgp_num: 128/pgp_num: 4/g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/ceph/setup.yml;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: true
+
+{{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}
+
+
+
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/ceph.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/ceph.yaml
index e2573e8..e55c9f8 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/ceph.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/ceph.yaml
@@ -168,3 +168,4 @@
   skip_fail: false
 
 {{ BACKUP.MACRO_BACKUP_CEPH() }}
+{{ SHARED.INSTALL_DOCKER_ON_GTW() }}
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/openstack.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/openstack.yaml
index a6e94e5..18f18ee 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/openstack.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/openstack.yaml
@@ -61,6 +61,62 @@
   retry: {count: 10, delay: 30}
   skip_fail: false
 
+- description: Create net04_external
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron net-create net04_ext --router:external True --provider:physical_network physnet1 --provider:network_type flat'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Create subnet_external
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron subnet-create net04_ext {{ IPV4_NET_EXTERNAL_PREFIX }}.0/24 --name net04_ext__subnet --disable-dhcp --allocation-pool start={{ IPV4_NET_EXTERNAL_PREFIX }}.150,end={{ IPV4_NET_EXTERNAL_PREFIX }}.180 --gateway {{ IPV4_NET_EXTERNAL_PREFIX }}.1'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Create net04
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron net-create net04'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Create subnet_net04
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron subnet-create net04 {{ IPV4_NET_TENANT_PREFIX }}.0/24 --name net04__subnet --allocation-pool start={{ IPV4_NET_TENANT_PREFIX }}.120,end={{ IPV4_NET_TENANT_PREFIX }}.240'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Create router
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron router-create net04_router01'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Set geteway
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron router-gateway-set net04_router01 net04_ext'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description:  Add interface
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron router-interface-add net04_router01 net04__subnet'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: sync time
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run
+    'service ntp stop; ntpd -gq;  service ntp start'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
 {{ BACKUP.MACRO_WR_NGINX_MASTER() }}
 {{ BACKUP.MACRO_BACKUP_BACKUPNINJA() }}
 {{ BACKUP.MACRO_BACKUP_XTRABACKUP() }}
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl-barbican/openstack.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-ssl-barbican/openstack.yaml
index e487cff..1d04d4d 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl-barbican/openstack.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr-ssl-barbican/openstack.yaml
@@ -120,6 +120,62 @@
   retry: {count: 10, delay: 30}
   skip_fail: false
 
+- description: Create net04_external
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron net-create net04_ext --router:external True --provider:physical_network physnet1 --provider:network_type flat'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Create subnet_external
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron subnet-create net04_ext {{ IPV4_NET_EXTERNAL_PREFIX }}.0/24 --name net04_ext__subnet --disable-dhcp --allocation-pool start={{ IPV4_NET_EXTERNAL_PREFIX }}.150,end={{ IPV4_NET_EXTERNAL_PREFIX }}.180 --gateway {{ IPV4_NET_EXTERNAL_PREFIX }}.1'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Create net04
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron net-create net04'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Create subnet_net04
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron subnet-create net04 {{ IPV4_NET_TENANT_PREFIX }}.0/24 --name net04__subnet --allocation-pool start={{ IPV4_NET_TENANT_PREFIX }}.120,end={{ IPV4_NET_TENANT_PREFIX }}.240'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Create router
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron router-create net04_router01'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Set geteway
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron router-gateway-set net04_router01 net04_ext'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description:  Add interface
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron router-interface-add net04_router01 net04__subnet'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: sync time
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run
+    'service ntp stop; ntpd -gq;  service ntp start'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
 - description: Install manila-api on first node
   cmd: |
     salt -C 'I@manila:api and *01*' state.sls manila.api;
@@ -179,3 +235,5 @@
   retry: {count: 3, delay: 5}
   skip_fail: false
 
+{{ SHARED.INSTALL_DOCKER_ON_GTW() }}
+
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/openstack.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/openstack.yaml
index c06bbfd..ceffe32 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/openstack.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/openstack.yaml
@@ -103,6 +103,62 @@
   retry: {count: 10, delay: 30}
   skip_fail: false
 
+- description: Create net04_external
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron net-create net04_ext --router:external True --provider:physical_network physnet1 --provider:network_type flat'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Create subnet_external
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron subnet-create net04_ext {{ IPV4_NET_EXTERNAL_PREFIX }}.0/24 --name net04_ext__subnet --disable-dhcp --allocation-pool start={{ IPV4_NET_EXTERNAL_PREFIX }}.150,end={{ IPV4_NET_EXTERNAL_PREFIX }}.180 --gateway {{ IPV4_NET_EXTERNAL_PREFIX }}.1'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Create net04
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron net-create net04'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Create subnet_net04
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron subnet-create net04 {{ IPV4_NET_TENANT_PREFIX }}.0/24 --name net04__subnet --allocation-pool start={{ IPV4_NET_TENANT_PREFIX }}.120,end={{ IPV4_NET_TENANT_PREFIX }}.240'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Create router
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron router-create net04_router01'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Set geteway
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron router-gateway-set net04_router01 net04_ext'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description:  Add interface
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron router-interface-add net04_router01 net04__subnet'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: sync time
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run
+    'service ntp stop; ntpd -gq;  service ntp start'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
 - description: Install manila-api on first node
   cmd: |
     salt -C 'I@manila:api and *01*' state.sls manila.api;
@@ -162,3 +218,5 @@
   retry: {count: 3, delay: 5}
   skip_fail: false
 
+{{ SHARED.INSTALL_DOCKER_ON_GTW() }}
+
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr/openstack.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr/openstack.yaml
index 78126ea..db79c12 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr/openstack.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr/openstack.yaml
@@ -98,6 +98,62 @@
   retry: {count: 10, delay: 30}
   skip_fail: false
 
+- description: Create net04_external
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron net-create net04_ext --router:external True --provider:physical_network physnet1 --provider:network_type flat'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Create subnet_external
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron subnet-create net04_ext {{ IPV4_NET_EXTERNAL_PREFIX }}.0/24 --name net04_ext__subnet --disable-dhcp --allocation-pool start={{ IPV4_NET_EXTERNAL_PREFIX }}.150,end={{ IPV4_NET_EXTERNAL_PREFIX }}.180 --gateway {{ IPV4_NET_EXTERNAL_PREFIX }}.1'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Create net04
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron net-create net04'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Create subnet_net04
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron subnet-create net04 {{ IPV4_NET_TENANT_PREFIX }}.0/24 --name net04__subnet --allocation-pool start={{ IPV4_NET_TENANT_PREFIX }}.120,end={{ IPV4_NET_TENANT_PREFIX }}.180'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Create router
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron router-create net04_router01'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Set geteway
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron router-gateway-set net04_router01 net04_ext'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description:  Add interface
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron router-interface-add net04_router01 net04__subnet'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: sync time
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run
+    'service ntp stop; ntpd -gq;  service ntp start'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
 - description: Install manila-api on first node
   cmd: |
     salt -C 'I@manila:api and *01*' state.sls manila.api;
@@ -159,4 +215,5 @@
 
 {{ BACKUP.MACRO_WR_NGINX_MASTER() }}
 {{ BACKUP.MACRO_BACKUP_BACKUPNINJA() }}
-{{ BACKUP.MACRO_BACKUP_XTRABACKUP() }}
\ No newline at end of file
+{{ BACKUP.MACRO_BACKUP_XTRABACKUP() }}
+{{ SHARED.INSTALL_DOCKER_ON_GTW() }}
diff --git a/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/ceph.yaml b/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/ceph.yaml
index 887d5eb..c97a270 100644
--- a/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/ceph.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/ceph.yaml
@@ -167,3 +167,5 @@
   retry: {count: 2, delay: 5}
   skip_fail: false
 
+{{ SHARED.INSTALL_DOCKER_ON_GTW() }}
+
diff --git a/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/openstack.yaml b/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/openstack.yaml
index ee15340..1387114 100644
--- a/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/openstack.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/openstack.yaml
@@ -59,3 +59,61 @@
   retry: {count: 10, delay: 30}
   skip_fail: false
 
+
+  # Upload cirros image
+
+- description: Create net04_external
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron net-create net04_ext --router:external True --provider:physical_network physnet1 --provider:network_type flat'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Create subnet_external
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron subnet-create net04_ext {{ IPV4_NET_EXTERNAL_PREFIX }}.0/24 --name net04_ext__subnet --disable-dhcp --allocation-pool start={{ IPV4_NET_EXTERNAL_PREFIX }}.150,end={{ IPV4_NET_EXTERNAL_PREFIX }}.180 --gateway {{ IPV4_NET_EXTERNAL_PREFIX }}.1'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Create net04
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron net-create net04'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Create subnet_net04
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron subnet-create net04 {{ IPV4_NET_TENANT_PREFIX }}.0/24 --name net04__subnet --allocation-pool start={{ IPV4_NET_TENANT_PREFIX }}.120,end={{ IPV4_NET_TENANT_PREFIX }}.240'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Create router
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron router-create net04_router01'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Set geteway
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron router-gateway-set net04_router01 net04_ext'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description:  Add interface
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron router-interface-add net04_router01 net04__subnet'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: sync time
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run
+    'service ntp stop; ntpd -gq;  service ntp start'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp-pike-ovs/openstack.yaml b/tcp_tests/templates/virtual-mcp-pike-ovs/openstack.yaml
index 930d019..54514ff 100644
--- a/tcp_tests/templates/virtual-mcp-pike-ovs/openstack.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-ovs/openstack.yaml
@@ -124,6 +124,62 @@
   retry: {count: 10, delay: 30}
   skip_fail: false
 
+- description: Create net04_external
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron net-create net04_ext --router:external True --provider:physical_network physnet1 --provider:network_type flat'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Create subnet_external
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron subnet-create net04_ext {{ IPV4_NET_EXTERNAL_PREFIX }}.0/24 --name net04_ext__subnet --disable-dhcp --allocation-pool start={{ IPV4_NET_EXTERNAL_PREFIX }}.150,end={{ IPV4_NET_EXTERNAL_PREFIX }}.180 --gateway {{ IPV4_NET_EXTERNAL_PREFIX }}.1'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Create net04
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron net-create net04'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Create subnet_net04
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron subnet-create net04 {{ IPV4_NET_TENANT_PREFIX }}.0/24 --name net04__subnet --allocation-pool start={{ IPV4_NET_TENANT_PREFIX }}.120,end={{ IPV4_NET_TENANT_PREFIX }}.180'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Create router
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron router-create net04_router01'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Set geteway
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron router-gateway-set net04_router01 net04_ext'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description:  Add interface
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron router-interface-add net04_router01 net04__subnet'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: sync time
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run
+    'service ntp stop; ntpd -gq;  service ntp start'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
 - description: Install manila-api on first node
   cmd: |
     salt -C 'I@manila:api and *01*' state.sls manila.api;
@@ -185,4 +241,5 @@
 
 {{ BACKUP.MACRO_WR_NGINX_MASTER() }}
 {{ BACKUP.MACRO_BACKUP_BACKUPNINJA() }}
-{{ BACKUP.MACRO_BACKUP_XTRABACKUP() }}
\ No newline at end of file
+{{ BACKUP.MACRO_BACKUP_XTRABACKUP() }}
+{{ SHARED.INSTALL_DOCKER_ON_GTW() }}
diff --git a/tcp_tests/tests/unit/test_yaml_templates.py b/tcp_tests/tests/unit/test_yaml_templates.py
index a9f10fb..54eb5b3 100644
--- a/tcp_tests/tests/unit/test_yaml_templates.py
+++ b/tcp_tests/tests/unit/test_yaml_templates.py
@@ -29,16 +29,6 @@
     "tenant-pool01": "10.80.0.0/24",
     "external-pool01": "10.90.0.0/24"
 }
-config.underlay.dhcp_ranges = {
-    "admin-pool01": {"cidr": "10.70.0.0/24",
-                     "start": "10.70.0.10",
-                     "end": "10.70.0.200",
-                     "gateway": "10.70.0.1"},
-    "external-pool01": {"cidr": "10.90.0.0/24",
-                        "start": "10.90.0.10",
-                        "end": "10.90.0.200",
-                        "gateway": "10.90.0.1"},
-}
 config.underlay.ssh_keys = [
     {"public": "AAAARRRGGHHHhh", "private": "--- BLABLA-KEY ---"}
 ]
diff --git a/tcp_tests/utils/get_jenkins_job_stages.py b/tcp_tests/utils/get_jenkins_job_stages.py
index 361b8d1..143e1a2 100755
--- a/tcp_tests/utils/get_jenkins_job_stages.py
+++ b/tcp_tests/utils/get_jenkins_job_stages.py
@@ -15,7 +15,6 @@
 import argparse
 import os
 import sys
-import time
 
 sys.path.append(os.getcwd())
 try:
@@ -108,17 +107,8 @@
                                 for line in log["text"].splitlines()))
         return res
 
-    for _ in range(3):
-        wf = jenkins.get_workflow(opts.job_name, opts.build_number)
-        info = jenkins.build_info(opts.job_name, int(wf['id']))
-        if info is not None:
-            break
-        time.sleep(3)
-
-    if not info:
-        raise("Cannot get info for the job {0}:{1}".format(opts.job_name,
-                                                           opts.build_number))
-
+    wf = jenkins.get_workflow(opts.job_name, opts.build_number)
+    info = jenkins.build_info(opts.job_name, int(wf['id']))
     build_description = ("[" + info['fullDisplayName'] + "] " +
                          info['url'] + " : " + info['result'])
     stages = get_stages(wf['stages'], 0)