Remove old virtual templates

Change-Id: Id7b6266e71cd04822f64aa20162a9c7546e8dd46
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ceph-offline/ceph.yaml b/tcp_tests/templates/virtual-mcp-ocata-ceph-offline/ceph.yaml
deleted file mode 100644
index 8705250..0000000
--- a/tcp_tests/templates/virtual-mcp-ocata-ceph-offline/ceph.yaml
+++ /dev/null
@@ -1,162 +0,0 @@
-{% from 'virtual-mcp-ocata-ceph-offline/underlay.yaml' import HOSTNAME_CFG01 with context %}
-
-
-# Install ceph mons
-- description: Update grains
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@ceph:common' state.sls salt.minion.grains
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 3, delay: 20}
-  skip_fail: false
-
-- description: Generate keyrings
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@ceph:mon:keyring:mon or I@ceph:common:keyring:admin' state.sls ceph.mon
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-- description: Sync grains on ceph mon nodes
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@ceph:mon' saltutil.sync_grains
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-- description: Update mine on ceph mons
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@ceph:mon:keyring:mon or I@ceph:common:keyring:admin' mine.update
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-- description: Install ceph mon
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@ceph:mon' state.sls ceph.mon
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install ceph mgr if defined(needed only for Luminious)
-  cmd: |
-    if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:mgr' match.pillar 'ceph:mgr' ; then
-      salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:mgr' state.sls ceph.mgr
-    fi
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 5}
-  skip_fail: false
-
-- description: Install ceph osd
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@ceph:osd' state.sls ceph.osd
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Sync grains
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@ceph:osd' saltutil.sync_grains
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install ceph osd nodes
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@ceph:osd' state.sls ceph.osd.custom
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: true
-
-- description: Sync grains
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@ceph:osd' saltutil.sync_grains
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Update mine on ceph osd
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@ceph:osd' mine.update
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-- description: Setup pools, keyrings and maybe crush
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@ceph:setup' state.sls ceph.setup
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-- description: Install ceph clinet
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@ceph:setup' state.sls ceph.setup
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-- description: Install radosgw if exists
-  cmd: |
-    if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:radosgw' match.pillar 'ceph:radosgw' ; then
-      salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:radosgw' saltutil.sync_grains;
-      salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:radosgw' state.sls ceph.radosgw;
-      salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keystone:client' state.sls keystone.client;
-    fi
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 5}
-  skip_fail: false
-
-- description: Install ceph clinet
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@ceph:setup' state.sls ceph.setup
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-- description: Connect ceph to glance
-  cmd: |
-    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@glance:server' state.sls ceph.common,ceph.setup.keyring,glance;
-    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@glance:server' service.restart glance-api;
-    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@glance:server' service.restart glance-glare;
-    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@glance:server' service.restart glance-registry;
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 5}
-  skip_fail: false
-
-- description: Connect ceph to cinder and nova
-  cmd: |
-    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@cinder:controller' state.sls ceph.common,ceph.setup.keyring,cinder;
-    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@nova:compute' state.sls ceph.common,ceph.setup.keyring;
-    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@nova:compute' saltutil.sync_grains;
-    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@nova:compute' state.sls nova;
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 5}
-  skip_fail: false
-
-- description: Restart cinder volume
-  cmd: |
-    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@cinder:controller' service.restart cinder-volume;
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 5}
-  skip_fail: false
-
-- description: Restart nova-compute
-  cmd: |
-    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@nova:compute' service.restart nova-compute;
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 5}
-  skip_fail: false
-
-- description: conver cirros image
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-    'qemu-img convert cirros-0.3.4-i386-disk.img cirros.raw'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 30}
-  skip_fail: false
-
-- description: Register image in glance
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-    '. /root/keystonercv3; glance --timeout 120 image-create --name "cirros" --disk-format raw --container-format bare --visibility public --file cirros.raw'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
\ No newline at end of file
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ceph-offline/core.yaml b/tcp_tests/templates/virtual-mcp-ocata-ceph-offline/core.yaml
deleted file mode 100644
index 464cf82..0000000
--- a/tcp_tests/templates/virtual-mcp-ocata-ceph-offline/core.yaml
+++ /dev/null
@@ -1,124 +0,0 @@
-{% from 'virtual-mcp-ocata-ceph-offline/underlay.yaml' import HOSTNAME_CFG01 with context %}
-
-- description: remove apparmor
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    '*' cmd.run 'service apparmor stop; service apparmor teardown; update-rc.d -f apparmor remove; apt-get -y remove apparmor'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: true
-
-# Install support services
-- description: Install keepalived on ctl01
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keepalived:cluster and *01*' state.sls keepalived
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: true
-
-- description: Install keepalived
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keepalived:cluster' state.sls keepalived
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: true
-
-- description: Install glusterfs
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@glusterfs:server' state.sls glusterfs.server.service
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Setup glusterfs on primary controller
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 5}
-  skip_fail: false
-
-- description: Check the gluster status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install RabbitMQ on ctl01
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install RabbitMQ
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@rabbitmq:server' state.sls rabbitmq
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check the rabbitmq status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install Galera on first server
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:master' state.sls galera
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install Galera on other servers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:slave' state.sls galera -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check mysql status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: true
-
-
-- description: Install haproxy
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' state.sls haproxy
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check haproxy status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' service.status haproxy
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Restart rsyslog
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' service.restart rsyslog
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install memcached on all controllers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@memcached:server' state.sls memcached
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check the VIP
-  cmd: |
-    OPENSTACK_CONTROL_ADDRESS=`salt-call --out=newline_values_only pillar.get _param:openstack_control_address`;
-    echo "_param:openstack_control_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
-    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 3, delay: 10}
-  skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ceph-offline/openstack.yaml b/tcp_tests/templates/virtual-mcp-ocata-ceph-offline/openstack.yaml
deleted file mode 100644
index 827b16a..0000000
--- a/tcp_tests/templates/virtual-mcp-ocata-ceph-offline/openstack.yaml
+++ /dev/null
@@ -1,278 +0,0 @@
-{% from 'virtual-mcp-ocata-ceph-offline/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'virtual-mcp-ocata-ceph-offline/underlay.yaml' import HOSTNAME_CTL01 with context %}
-{% from 'virtual-mcp-ocata-ceph-offline/underlay.yaml' import HOSTNAME_CTL02 with context %}
-{% from 'virtual-mcp-ocata-ceph-offline/underlay.yaml' import HOSTNAME_CTL03 with context %}
-{% from 'virtual-mcp-ocata-ceph-offline/underlay.yaml' import HOSTNAME_GTW01 with context %}
-{% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
-{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
-
-# Install OpenStack control services
-
-- description: Install glance on all controllers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-     -C 'I@glance:server' state.sls glance -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install keystone service (note that different fernet keys are created on different nodes)
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keystone:server' state.sls keystone.server -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 15}
-  skip_fail: false
-
-- description: Restart apache due to PROD-10477
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' cmd.run "systemctl restart apache2"
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 15}
-  skip_fail: false
-
-- description: Check apache status to PROD-10477
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' cmd.run "systemctl status apache2"
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 15}
-  skip_fail: false
-
-- description: Mount glusterfs.client volumes (resuires created 'keystone' and 'glusterfs' system users)
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@glance:server' state.sls glusterfs.client
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Update fernet keys for keystone server on the mounted glusterfs volume
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keystone:server' state.sls keystone.server -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Populate keystone services/tenants/admins
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keystone:client' state.sls keystone.client
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 5}
-  skip_fail: false
-
-- description: Check keystone service-list
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keystone:server' cmd.run '. /root/keystonercv3; openstack service list'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check glance image-list
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keystone:server' cmd.run '. /root/keystonercv3; glance image-list'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-
-- description: Install nova on all controllers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@nova:controller' state.sls nova -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 5}
-  skip_fail: false
-
-- description: Check nova service-list
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keystone:server' cmd.run '. /root/keystonercv3; nova --debug service-list'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 3, delay: 5}
-  skip_fail: false
-
-
-- description: Install cinder
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@cinder:controller' state.sls cinder -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 5}
-  skip_fail: false
-
-- description: Check cinder list
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keystone:server' cmd.run '. /root/keystonercv3; cinder list'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-
-- description: Install neutron service
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@neutron:server' state.sls neutron -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install neutron on gtw node
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@neutron:gateway' state.sls neutron
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check neutron agent-list
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keystone:server' cmd.run '. /root/keystonercv3; neutron agent-list'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-
-- description: Install heat service
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@heat:server' state.sls heat -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check heat service
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keystone:server' cmd.run '. /root/keystonercv3; openstack orchestration resource type list'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 5, delay: 10}
-  skip_fail: false
-
-
-- description: Deploy horizon dashboard
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@horizon:server' state.sls horizon
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: true
-
-- description: Deploy nginx proxy
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@nginx:server' state.sls nginx
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: true
-
-
-# Install compute node
-
-- description: Apply formulas for compute node
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: true
-
-- description: Re-apply(as in doc) formulas for compute node
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check IP on computes
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
-    'ip a'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 10, delay: 30}
-  skip_fail: false
-
-
-  # Upload cirros image
-
-- description: Upload cirros image on ctl01
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-    'wget http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-i386-disk.img'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 30}
-  skip_fail: false
-
-- description: Create net04_external
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-    '. /root/keystonercv3; neutron net-create net04_ext --router:external True --provider:physical_network physnet1 --provider:network_type flat'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Create subnet_external
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-    '. /root/keystonercv3; neutron subnet-create net04_ext {{ IPV4_NET_EXTERNAL_PREFIX }}.0/24 --name net04_ext__subnet --disable-dhcp --allocation-pool start={{ IPV4_NET_EXTERNAL_PREFIX }}.150,end={{ IPV4_NET_EXTERNAL_PREFIX }}.180 --gateway {{ IPV4_NET_EXTERNAL_PREFIX }}.1'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Create net04
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-    '. /root/keystonercv3; neutron net-create net04'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Create subnet_net04
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-    '. /root/keystonercv3; neutron subnet-create net04 {{ IPV4_NET_TENANT_PREFIX }}.0/24 --name net04__subnet --allocation-pool start={{ IPV4_NET_TENANT_PREFIX }}.120,end={{ IPV4_NET_TENANT_PREFIX }}.240'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Create router
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-    '. /root/keystonercv3; neutron router-create net04_router01'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Set geteway
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-    '. /root/keystonercv3; neutron router-gateway-set net04_router01 net04_ext'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description:  Add interface
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-    '. /root/keystonercv3; neutron router-interface-add net04_router01 net04__subnet'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description:  Allow all tcp
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-    '. /root/keystonercv3; nova secgroup-add-rule default tcp 1 65535 0.0.0.0/0'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description:  Allow all icmp
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-    '. /root/keystonercv3; nova secgroup-add-rule default icmp -1 -1 0.0.0.0/0'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: sync time
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run
-    'service ntp stop; ntpd -gq;  service ntp start'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Install docker.io on gtw
-  cmd: salt-call cmd.run 'apt-get install docker.io -y'
-  node_name: {{ HOSTNAME_GTW01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Enable forward policy
-  cmd: iptables --policy FORWARD ACCEPT
-  node_name: {{ HOSTNAME_GTW01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: create rc file on cfg
-  cmd: scp ctl01:/root/keystonercv3 /root
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Copy rc file
-  cmd: scp /root/keystonercv3 gtw01:/root
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ceph-offline/run_test.sh b/tcp_tests/templates/virtual-mcp-ocata-ceph-offline/run_test.sh
deleted file mode 100755
index 612cd47..0000000
--- a/tcp_tests/templates/virtual-mcp-ocata-ceph-offline/run_test.sh
+++ /dev/null
@@ -1,46 +0,0 @@
-#!/bin/bash
-
-. /home/jenkins/fuel-devops30/bin/activate
-pip install -r ./tcp_tests/requirements.txt -U
-pip install psycopg2
-
-export ENV_NAME=virtual-mcp-ocata-ceph-offline
-export VENV_PATH=/home/jenkins/fuel-devops30
-export IMAGE_PATH1604=/home/jenkins/images/xenial-server-cloudimg-amd64.qcow2
-export SHUTDOWN_ENV_ON_TEARDOWN=false
-export PYTHONIOENCODING=UTF-8
-export LAB_CONFIG_NAME=virtual-mcp-ocata-ceph-offline
-export CLUSTER_NAME=virtual-mcp-ocata-ovs-ceph-local
-export REPOSITORY_SUITE=stable
-export DISTROS_CODENAME=xenial
-export SALT_VERSION=2017.7
-
-export TEST_GROUP=test_ocata_ceph_all_ovs_install
-export RUN_TEMPEST=true
-
-# Offline deploy parameters
-export SALT_MODELS_REF_CHANGE=refs/changes/86/13886/9
-
-export BOOTSTRAP_TIMEOUT=1200
-
-export HOST_APT=10.170.0.242
-export HOST_SALTSTACK=10.170.0.242
-export HOST_ARCHIVE_UBUNTU=10.170.0.242
-export HOST_MIRROR_MCP_MIRANTIS=10.170.0.242
-export HOST_MIRROR_FUEL_INFRA=10.170.0.242
-export HOST_PPA_LAUNCHPAD=10.170.0.242
-
-export SALT_MODELS_SYSTEM_REPOSITORY=https://gerrit.mcp.mirantis.local.test/salt-models/reclass-system
-export SALT_FORMULAS_REPO=https://gerrit.mcp.mirantis.local.test/salt-formulas
-export FORMULA_REPOSITORY="deb [arch=amd64] http://apt.mirantis.local.test/ubuntu-xenial ${REPOSITORY_SUITE} salt extra"
-export FORMULA_GPG="http://apt.mirantis.local.test/public.gpg"
-export SALT_REPOSITORY = "deb [arch=amd64] http://mirror.mirantis.local.test/" + REPOSITORY_SUITE+ "/saltstack-" + SALT_VERSION+ "/${DISTRIB_CODENAME} ${DISTRIB_CODENAME} main"
-
-export SALT_REPOSITORY="deb [arch=amd64] http://apt.mirantis.local.test/ubuntu-xenial/ ${REPOSITORY_SUITE} salt/2017.7 main"
-export SALT_GPG="http://apt.mirantis.local.test/public.gpg"
-export UBUNTU_REPOSITORY="deb http://mirror.mcp.mirantis.local.test/ubuntu xenial main universe restricted"
-export UBUNTU_UPDATES_REPOSITORY="deb http://mirror.mcp.mirantis.local.test/ubuntu xenial-updates main universe restricted"
-export UBUNTU_SECURITY_REPOSITORY="deb http://mirror.mcp.mirantis.local.test/ubuntu xenial-security main universe restricted"
-
-cd tcp_tests
-py.test -vvv -s -p no:django -p no:ipdb --junit-xml=nosetests.xml -k ${TEST_GROUP}
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ceph-offline/salt.yaml b/tcp_tests/templates/virtual-mcp-ocata-ceph-offline/salt.yaml
deleted file mode 100644
index 540acdc..0000000
--- a/tcp_tests/templates/virtual-mcp-ocata-ceph-offline/salt.yaml
+++ /dev/null
@@ -1,84 +0,0 @@
-{% from 'virtual-mcp-ocata-ceph-offline/underlay.yaml' import HOSTNAME_APT01 with context %}
-{% from 'virtual-mcp-ocata-ceph-offline/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'virtual-mcp-ocata-ceph-offline/underlay.yaml' import HOSTNAME_CTL01 with context %}
-{% from 'virtual-mcp-ocata-ceph-offline/underlay.yaml' import HOSTNAME_CTL02 with context %}
-{% from 'virtual-mcp-ocata-ceph-offline/underlay.yaml' import HOSTNAME_CTL03 with context %}
-{% from 'virtual-mcp-ocata-ceph-offline/underlay.yaml' import HOSTNAME_CMP01 with context %}
-{% from 'virtual-mcp-ocata-ceph-offline/underlay.yaml' import HOSTNAME_CMP02 with context %}
-{% from 'virtual-mcp-ocata-ceph-offline/underlay.yaml' import HOSTNAME_GTW01 with context %}
-{% from 'virtual-mcp-ocata-ceph-offline/underlay.yaml' import HOSTNAME_PRX01 with context %}
-{% from 'virtual-mcp-ocata-ceph-offline/underlay.yaml' import HOSTNAME_CMN01 with context %}
-{% from 'virtual-mcp-ocata-ceph-offline/underlay.yaml' import HOSTNAME_CMN02 with context %}
-{% from 'virtual-mcp-ocata-ceph-offline/underlay.yaml' import HOSTNAME_CMN03 with context %}
-{% from 'virtual-mcp-ocata-ceph-offline/underlay.yaml' import HOSTNAME_OSD01 with context %}
-{% from 'virtual-mcp-ocata-ceph-offline/underlay.yaml' import HOSTNAME_OSD02 with context %}
-{% from 'virtual-mcp-ocata-ceph-offline/underlay.yaml' import LAB_CONFIG_NAME with context %}
-{% from 'virtual-mcp-ocata-ceph-offline/underlay.yaml' import DOMAIN_NAME with context %}
-
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.local.test/salt-models/mcp-virtual-lab') %}
-# Other salt model repository parameters see in shared-salt.yaml
-
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-{% from 'shared-salt.yaml' import IPV4_NET_CONTROL_PREFIX with context %}
-
-#- description: 'Generate nginx cert'
-#  cmd: |
-#    openssl req -new -newkey rsa:4096 -days 365 -nodes -x509 \
-#    -subj "/C=US/ST=Denial/L=Springfield/O=Dis/CN=www.gerrit.com" \
-#    -keyout ssl-nginx.key  -out ssl-nginx.crt;
-#  node_name: {{ HOSTNAME_APT01 }}
-#  retry: {count: 1, delay: 5}
-#  skip_fail: false
-
-- description: Check nginx APT node is ready
-  cmd: systemctl status nginx;
-  node_name: {{ HOSTNAME_APT01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-- description: Check dnsmasq on APT node is ready
-  cmd: systemctl status dnsmasq;
-  node_name: {{ HOSTNAME_APT01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CFG01) }}
-{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CTL01) }}
-{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CTL02) }}
-{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CTL03) }}
-{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CMP01) }}
-{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CMP02) }}
-{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CMN01) }}
-{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CMN02) }}
-{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CMN03) }}
-{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_OSD01) }}
-{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_OSD02) }}
-{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_GTW01) }}
-{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_PRX01) }}
-
-{{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
-
-{{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
-
-- description: 'Workaround of local_repo_url - set to offline image repository structure'
-  cmd: |
-    find /srv/salt/reclass/classes/cluster -type f -exec sed -i 's/local_repo_url: .*/local_repo_url: mirror.mcp.mirantis.local.test/g' {} +
-    find /srv/salt/reclass/classes/cluster -type f -exec sed -i 's/aptly_server_address: .*/aptly_server_address: {{ os_env('HOST_APT') }}/g' {} +
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "powerdns" "logrotate"') }}
-
-{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
-
-{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
-
-{{ SHARED.MACRO_GENERATE_INVENTORY() }}
-
-{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ceph-offline/underlay--meta-data.yaml b/tcp_tests/templates/virtual-mcp-ocata-ceph-offline/underlay--meta-data.yaml
deleted file mode 100644
index 3699401..0000000
--- a/tcp_tests/templates/virtual-mcp-ocata-ceph-offline/underlay--meta-data.yaml
+++ /dev/null
@@ -1,4 +0,0 @@
-| # All the data below will be stored as a string object
-  instance-id: iid-local1
-  hostname: {hostname}
-  local-hostname: {hostname}
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ceph-offline/underlay--user-data-apt01.yaml b/tcp_tests/templates/virtual-mcp-ocata-ceph-offline/underlay--user-data-apt01.yaml
deleted file mode 100644
index 9710531..0000000
--- a/tcp_tests/templates/virtual-mcp-ocata-ceph-offline/underlay--user-data-apt01.yaml
+++ /dev/null
@@ -1,115 +0,0 @@
-| # All the data below will be stored as a string object
-  #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
-  ssh_pwauth: True
-  users:
-   - name: root
-     sudo: ALL=(ALL) NOPASSWD:ALL
-     shell: /bin/bash
-     ssh_authorized_keys:
-     {% for key in config.underlay.ssh_keys %}
-      - ssh-rsa {{ key['public'] }}
-     {% endfor %}
-
-  disable_root: false
-  chpasswd:
-   list: |
-    root:r00tme
-   expire: False
-
-  bootcmd:
-   # Block access to SSH while node is preparing
-   - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
-   # Enable root access
-   - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
-   - service sshd restart
-  output:
-    all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
-  runcmd:
-   # Prepare network connection
-   - sudo ifup ens3
-   #- sudo route add default gw {gateway} {interface_name}
-   - sudo ifup ens4
-   - sudo ifup ens5
-   - sudo ifup ens6
-
-   - rm /etc/resolv.conf
-   - touch /etc/resolv.conf
-   - export LOCAL_DNS_IP=$(ifconfig ens3 | grep "inet addr" | cut -d ':' -f 2 | cut -d ' ' -f 1)
-   - echo "nameserver $LOCAL_DNS_IP" >> /etc/resolv.conf;
-   - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
-   - echo "supersede domain-name-servers $LOCAL_DNS_IP, 172.18.208.44" >> /etc/dhcp/dhclient.conf
-   - export TERM=linux
-   - export LANG=C
-
-   # Create swap
-   - fallocate -l 4G /swapfile
-   - chmod 600 /swapfile
-   - mkswap /swapfile
-   - swapon /swapfile
-   - echo "/swapfile   none    swap    defaults   0   0" >> /etc/fstab
-
-
-   ############## Cloud repo01 node ##################
-   - which wget >/dev/null || (apt-get update; apt-get install -y wget);
-   - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
-   - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
-   - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/{{ SALT_VERSION }} xenial main" > /etc/apt/sources.list.d/saltstack.list;
-   - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/{{ SALT_VERSION }}/SALTSTACK-GPG-KEY.pub | apt-key add -;
-
-   - eatmydata apt-get clean && apt-get update
-
-   # Install common packages
-   - eatmydata apt-get install -y salt-minion python-pip git curl tmux byobu iputils-ping traceroute htop tree ntp;
-   - openssl req -new -newkey rsa:4096 -days 365 -nodes -x509 -subj "/C=US/ST=Denial/L=Springfield/O=Dis/CN=gerrit.mcp.mirantis.local.test" -keyout /root/ssl-nginx.key  -out /root/ssl-nginx.crt;
-   - cd /tmp;
-   - git clone https://github.com/TatyankaLeontovich/salt-formula-nginx;
-   - git clone https://github.com/TatyankaLeontovich/salt-dnsmasq;
-   - git clone https://github.com/TatyankaLeontovich/underpillar;
-   - mkdir -p /srv/pillar/;
-   - mkdir -p /srv/salt;
-   - cd /srv/salt;
-   - ln -s /tmp/salt-formula-nginx/nginx;
-   - ln -s /tmp/salt-dnsmasq/dnsmasq;
-   - cp /tmp/underpillar/pillar/*.sls /srv/pillar/;
-   - cp /tmp/underpillar/states/*.sls /srv/salt/;
-   - cp /srv/pillar/top_localdns.sls /srv/pillar/top.sls;
-   - cp /srv/salt/top_localdns.sls /srv/salt/top.sls;
-   - find /srv/pillar/ -type f -exec sed -i "s/==LOCAL_DNS_IP==/${LOCAL_DNS_IP}/g" {} +
-   - find /srv/pillar/ -type f -exec sed -i "s/==HOST_APT==/{{ os_env('HOST_APT', 'apt.mirantis.com') }}/g" {} +
-   - find /srv/pillar/ -type f -exec sed -i "s/==HOST_SALTSTACK==/{{ os_env('HOST_SALTSTACK', 'repo.saltstack.com') }}/g" {} +
-   - find /srv/pillar/ -type f -exec sed -i "s/==HOST_ARCHIVE_UBUNTU==/{{ os_env('HOST_ARCHIVE_UBUNTU', 'archive.ubuntu.com') }}/g" {} +
-   - find /srv/pillar/ -type f -exec sed -i "s/==HOST_MIRROR_MCP_MIRANTIS==/{{ os_env('HOST_MIRROR_MCP_MIRANTIS', 'mirror.mcp.mirantis.net') }}/g" {} +
-   - find /srv/pillar/ -type f -exec sed -i "s/==HOST_MIRROR_FUEL_INFRA==/{{ os_env('HOST_MIRROR_FUEL_INFRA', 'mirror.fuel-infra.org') }}/g" {} +
-   - find /srv/pillar/ -type f -exec sed -i "s/==HOST_PPA_LAUNCHPAD==/{{ os_env('HOST_PPA_LAUNCHPAD', 'ppa.launchpad.net') }}/g" {} +
-   - find /srv/pillar/ -type f -exec sed -i "s/==HOST_GERRIT_MCP_MIRANTIS==/{{ os_env('HOST_GERRIT_MCP_MIRANTIS', 'gerrit.mcp.mirantis.com') }}/g" {} +
-   - salt-call --local  --state-output=mixed state.sls dnsmasq;
-   - salt-call --local  --state-output=mixed state.sls nginx;
-   ########################################################
-   # Node is ready, allow SSH access
-   - echo "Allow SSH access ..."
-   - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
-   ########################################################
-
-  write_files:
-   - path: /etc/network/interfaces
-     content: |
-          auto ens3
-          iface ens3 inet dhcp
-          auto ens4
-          iface ens4 inet dhcp
-          auto ens5
-          iface ens5 inet dhcp
-          auto ens6
-          iface ens6 inet dhcp
-
-   - path: /root/.ssh/config
-     owner: root:root
-     permissions: '0600'
-     content: |
-          Host *
-            ServerAliveInterval 300
-            ServerAliveCountMax 10
-            StrictHostKeyChecking no
-            UserKnownHostsFile /dev/null
\ No newline at end of file
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ceph-offline/underlay--user-data-cfg01.yaml b/tcp_tests/templates/virtual-mcp-ocata-ceph-offline/underlay--user-data-cfg01.yaml
deleted file mode 100644
index 800a0b1..0000000
--- a/tcp_tests/templates/virtual-mcp-ocata-ceph-offline/underlay--user-data-cfg01.yaml
+++ /dev/null
@@ -1,70 +0,0 @@
-| # All the data below will be stored as a string object
-  #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
-  ssh_pwauth: True
-  users:
-   - name: root
-     sudo: ALL=(ALL) NOPASSWD:ALL
-     shell: /bin/bash
-     ssh_authorized_keys:
-     {% for key in config.underlay.ssh_keys %}
-      - ssh-rsa {{ key['public'] }}
-     {% endfor %}
-
-  disable_root: false
-  chpasswd:
-   list: |
-    root:r00tme
-   expire: False
-
-  bootcmd:
-   # Enable root access
-   - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
-   - service sshd restart
-  output:
-    all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
-  runcmd:
-   # Prepare network connection
-   - sudo ifdown ens3
-   - sudo ip r d default || true  # remove existing default route to get it from dhcp
-   - sudo ifup ens3
-
-   #- sudo route add default gw {gateway} {interface_name}
-   - rm /etc/resolv.conf
-   - touch /etc/resolv.conf
-   - LOCAL_IP=$(ifconfig ens3 | grep "inet addr" | cut -d ':' -f 2 | cut -d ' ' -f 1 | cut -d"." -f1-3)
-   - export DNS_IP=$LOCAL_IP".122"
-   - echo "nameserver $DNS_IP" >> /etc/resolv.conf;
-   - echo "nameserver $LOCAL_IP.1" >> /etc/resolv.conf;
-   - echo "supersede domain-name-servers $DNS_IP" >> /etc/dhcp/dhclient.conf
-
-   # Create swap
-   - fallocate -l 4G /swapfile
-   - chmod 600 /swapfile
-   - mkswap /swapfile
-   - swapon /swapfile
-   - echo "/swapfile   none    swap    defaults    0   0" >> /etc/fstab
-
-   - mkdir -p /srv/salt/reclass/nodes
-   - systemctl enable salt-master
-   - systemctl enable salt-minion
-   - systemctl start salt-master
-   - systemctl start salt-minion
-   - salt-call -l info --timeout=120 test.ping
-
-  write_files:
-   - path: /etc/network/interfaces
-     content: |
-          auto ens3
-          iface ens3 inet dhcp
-
-   - path: /root/.ssh/config
-     owner: root:root
-     permissions: '0600'
-     content: |
-          Host *
-            ServerAliveInterval 300
-            ServerAliveCountMax 10
-            StrictHostKeyChecking no
-            UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ceph-offline/underlay--user-data1604.yaml b/tcp_tests/templates/virtual-mcp-ocata-ceph-offline/underlay--user-data1604.yaml
deleted file mode 100644
index 5a02d24..0000000
--- a/tcp_tests/templates/virtual-mcp-ocata-ceph-offline/underlay--user-data1604.yaml
+++ /dev/null
@@ -1,72 +0,0 @@
-| # All the data below will be stored as a string object
-  #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
-  ssh_pwauth: True
-  users:
-   - name: root
-     sudo: ALL=(ALL) NOPASSWD:ALL
-     shell: /bin/bash
-     ssh_authorized_keys:
-     {% for key in config.underlay.ssh_keys %}
-      - ssh-rsa {{ key['public'] }}
-     {% endfor %}
-
-  disable_root: false
-  chpasswd:
-   list: |
-    root:r00tme
-   expire: False
-
-  bootcmd:
-   # Block access to SSH while node is preparing
-   - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
-   # Enable root access
-   - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
-   - service sshd restart
-  output:
-    all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
-  runcmd:
-   # Prepare network connection
-   - sudo ifup ens3
-   #- sudo route add default gw {gateway} {interface_name}
-
-   - rm /etc/resolv.conf
-   - touch /etc/resolv.conf
-   - LOCAL_IP=$(ifconfig ens3 | grep "inet addr" | cut -d ':' -f 2 | cut -d ' ' -f 1 | cut -d"." -f1-3)
-   - export DNS_IP=$LOCAL_IP".122"
-   - echo "nameserver $DNS_IP" >> /etc/resolv.conf;
-   - echo "nameserver $LOCAL_IP.1" >> /etc/resolv.conf;
-   - echo "supersede domain-name-servers $DNS_IP" >> /etc/dhcp/dhclient.conf
-
-   # Create swap
-   - fallocate -l 4G /swapfile
-   - chmod 600 /swapfile
-   - mkswap /swapfile
-   - swapon /swapfile
-   - echo "/swapfile   none    swap    defaults    0   0" >> /etc/fstab
-
-   ############## TCP Cloud cfg01 node ##################
-   - echo "Preparing base OS"
-   # find /etc/apt/ -type f -exec sed -i "s/ubuntu.com/ubuntu.local.test/g" {} +;
-   ########################################################
-   # Node is ready, allow SSH access
-   - echo "Allow SSH access ..."
-   - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
-   ########################################################
-
-  write_files:
-   - path: /etc/network/interfaces
-     content: |
-          auto ens3
-          iface ens3 inet dhcp
-
-   - path: /root/.ssh/config
-     owner: root:root
-     permissions: '0600'
-     content: |
-          Host *
-            ServerAliveInterval 300
-            ServerAliveCountMax 10
-            StrictHostKeyChecking no
-            UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ceph-offline/underlay.yaml b/tcp_tests/templates/virtual-mcp-ocata-ceph-offline/underlay.yaml
deleted file mode 100644
index 1fd9b17..0000000
--- a/tcp_tests/templates/virtual-mcp-ocata-ceph-offline/underlay.yaml
+++ /dev/null
@@ -1,635 +0,0 @@
-# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
-{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
-
-{% import 'virtual-mcp-ocata-ceph-offline/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
-{% import 'virtual-mcp-ocata-ceph-offline/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
-{% import 'virtual-mcp-ocata-ceph-offline/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
-{% import 'virtual-mcp-ocata-ceph-offline/underlay--user-data-apt01.yaml' as CLOUDINIT_USER_DATA_APT01 with context %}
-
-
----
-aliases:
- - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
- - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
- - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
- - &cloudinit_user_data_1604 {{ CLOUDINIT_USER_DATA_1604 }}
- - &cloudinit_user_data_apt01 {{ CLOUDINIT_USER_DATA_APT01 }}
-
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'virtual-mcp-ocata-ceph-offline') %}
-{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME + '.local') %}
-{% set HOSTNAME_APT01 = os_env('HOSTNAME_APT01', 'apt01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMN01 = os_env('HOSTNAME_CMN01', 'cmn01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMN02 = os_env('HOSTNAME_CMN02', 'cmn02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMN03 = os_env('HOSTNAME_CMN03', 'cmn03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_OSD01 = os_env('HOSTNAME_OSD01', 'osd01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_OSD02 = os_env('HOSTNAME_OSD02', 'osd02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
-
-template:
-  devops_settings:
-    env_name: {{ os_env('ENV_NAME', 'virtual-mcp-ocata-ceph-offline_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
-
-    address_pools:
-      private-pool01:
-        net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.60.0.0/16:24') }}
-        params:
-          ip_reserved:
-            l2_network_device: +1
-            default_{{ HOSTNAME_APT01 }}: +122
-            default_{{ HOSTNAME_CFG01 }}: +100
-            default_{{ HOSTNAME_CTL01 }}: +101
-            default_{{ HOSTNAME_CTL02 }}: +102
-            default_{{ HOSTNAME_CTL03 }}: +103
-            default_{{ HOSTNAME_CMP01 }}: +105
-            default_{{ HOSTNAME_CMP02 }}: +106
-            default_{{ HOSTNAME_OSD01 }}: +94
-            default_{{ HOSTNAME_OSD02 }}: +95
-            default_{{ HOSTNAME_CMN01 }}: +96
-            default_{{ HOSTNAME_CMN02 }}: +97
-            default_{{ HOSTNAME_CMN03 }}: +98
-            default_{{ HOSTNAME_GTW01 }}: +110
-            default_{{ HOSTNAME_PRX01 }}: +121
-          ip_ranges:
-            dhcp: [+90, -10]
-
-      admin-pool01:
-        net: {{ os_env('ADMIN_ADDRESS_POOL01', '10.70.0.0/16:24') }}
-        params:
-          ip_reserved:
-            gateway: +122
-            l2_network_device: +1
-            default_{{ HOSTNAME_APT01 }}: +122
-            default_{{ HOSTNAME_CFG01 }}: +90
-            default_{{ HOSTNAME_CTL01 }}: +101
-            default_{{ HOSTNAME_CTL02 }}: +102
-            default_{{ HOSTNAME_CTL03 }}: +103
-            default_{{ HOSTNAME_CMP01 }}: +105
-            default_{{ HOSTNAME_CMP02 }}: +106
-            default_{{ HOSTNAME_OSD01 }}: +94
-            default_{{ HOSTNAME_OSD02 }}: +95
-            default_{{ HOSTNAME_CMN01 }}: +96
-            default_{{ HOSTNAME_CMN02 }}: +97
-            default_{{ HOSTNAME_CMN03 }}: +98
-            default_{{ HOSTNAME_GTW01 }}: +110
-            default_{{ HOSTNAME_PRX01 }}: +121
-          ip_ranges:
-            dhcp: [+90, -10]
-
-      tenant-pool01:
-        net: {{ os_env('TENANT_ADDRESS_POOL01', '10.80.0.0/16:24') }}
-        params:
-          ip_reserved:
-            l2_network_device: +1
-            default_{{ HOSTNAME_APT01 }}: +122
-            default_{{ HOSTNAME_CFG01 }}: +100
-            default_{{ HOSTNAME_CTL01 }}: +101
-            default_{{ HOSTNAME_CTL02 }}: +102
-            default_{{ HOSTNAME_CTL03 }}: +103
-            default_{{ HOSTNAME_CMP01 }}: +105
-            default_{{ HOSTNAME_CMP02 }}: +106
-            default_{{ HOSTNAME_OSD01 }}: +94
-            default_{{ HOSTNAME_OSD02 }}: +95
-            default_{{ HOSTNAME_CMN01 }}: +96
-            default_{{ HOSTNAME_CMN02 }}: +97
-            default_{{ HOSTNAME_CMN03 }}: +98
-            default_{{ HOSTNAME_GTW01 }}: +110
-            default_{{ HOSTNAME_PRX01 }}: +121
-          ip_ranges:
-            dhcp: [+10, -10]
-      external-pool01:
-        net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '10.90.0.0/16:24') }}
-        params:
-          ip_reserved:
-            l2_network_device: +1
-            default_{{ HOSTNAME_APT01 }}: +122
-            default_{{ HOSTNAME_CFG01 }}: +100
-            default_{{ HOSTNAME_CTL01 }}: +101
-            default_{{ HOSTNAME_CTL02 }}: +102
-            default_{{ HOSTNAME_CTL03 }}: +103
-            default_{{ HOSTNAME_CMP01 }}: +105
-            default_{{ HOSTNAME_CMP02 }}: +106
-            default_{{ HOSTNAME_OSD01 }}: +94
-            default_{{ HOSTNAME_OSD02 }}: +95
-            default_{{ HOSTNAME_CMN01 }}: +96
-            default_{{ HOSTNAME_CMN02 }}: +97
-            default_{{ HOSTNAME_CMN03 }}: +98
-            default_{{ HOSTNAME_GTW01 }}: +110
-            default_{{ HOSTNAME_PRX01 }}: +121
-          ip_ranges:
-            dhcp: [+90, -10]
-
-    groups:
-      - name: default
-        driver:
-          name: devops.driver.libvirt
-          params:
-            connection_string: !os_env CONNECTION_STRING, qemu:///system
-            storage_pool_name: !os_env STORAGE_POOL_NAME, default
-            stp: False
-            hpet: False
-            enable_acpi: true
-            use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
-            use_hugepages: !os_env DRIVER_USE_HUGEPAGES, false
-
-        network_pools:
-          admin: admin-pool01
-          private: private-pool01
-          tenant: tenant-pool01
-          external: external-pool01
-
-        l2_network_devices:
-          private:
-            address_pool: private-pool01
-            dhcp: false
-            forward:
-              mode: route
-
-          admin:
-            address_pool: admin-pool01
-            dhcp: true
-            forward:
-              mode: nat
-
-          tenant:
-            address_pool: tenant-pool01
-            dhcp: false
-
-          external:
-            address_pool: external-pool01
-            dhcp: true
-            forward:
-              mode: route
-
-        group_volumes:
-         - name: cloudimage1604    # This name is used for 'backing_store' option for node volumes.
-           source_image: !os_env IMAGE_PATH1604  # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
-           format: qcow2
-         - name: cfg01_day01_image               # Pre-configured day01 image
-           source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01', os_env('IMAGE_PATH1604')) }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
-           format: qcow2
-         - name: mcp_ubuntu_1604_image           # Pre-configured image for VCP nodes initially based on kvm nodes.
-           # http://images.mirantis.com/ubuntu-16-04-x64-latest.qcow2  (preffered)
-           # or
-           # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
-           source_image: {{ os_env('MCP_IMAGE_PATH1604', os_env('IMAGE_PATH1604')) }}
-           format: qcow2
-
-        nodes:
-          - name: {{ HOSTNAME_APT01 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: cloudimage1604
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_apt01
-
-              interfaces:
-                - label: ens3
-                  l2_network_device: admin
-                  interface_model: *interface_model
-                - label: ens4
-                  l2_network_device: private
-                  interface_model: *interface_model
-                - label: ens5
-                  l2_network_device: tenant
-                  interface_model: *interface_model
-                - label: ens6
-                  l2_network_device: external
-                  interface_model: *interface_model
-              network_config:
-                ens3:
-                  networks:
-                    - admin
-                ens4:
-                  networks:
-                    - private
-                ens5:
-                  networks:
-                    - tenant
-                ens6:
-                  networks:
-                    - external
-
-          - name: {{ HOSTNAME_CFG01 }}
-            role: salt_master
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 8192
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: cfg01_day01_image
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_cfg01
-
-              interfaces:
-                - label: ens3
-                  l2_network_device: admin
-                  interface_model: *interface_model
-                - label: ens4
-                  l2_network_device: private
-                  interface_model: *interface_model
-              network_config:
-                ens3:
-                  networks:
-                    - admin
-                ens4:
-                  networks:
-                    - private
-
-          - name: {{ HOSTNAME_CTL01 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 3
-              memory: !os_env SLAVE_NODE_MEMORY, 16384
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: mcp_ubuntu_1604_image
-                  format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: &interfaces
-                - label: ens3
-                  l2_network_device: admin
-                  interface_model: *interface_model
-                - label: ens4
-                  l2_network_device: private
-                  interface_model: *interface_model
-              network_config: &network_config
-                ens3:
-                  networks:
-                    - admin
-                ens4:
-                  networks:
-                    - private
-
-          - name: {{ HOSTNAME_CTL02 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 3
-              memory: !os_env SLAVE_NODE_MEMORY, 16384
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: mcp_ubuntu_1604_image
-                  format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: *interfaces
-              network_config: *network_config
-
-          - name: {{ HOSTNAME_CTL03 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 3
-              memory: !os_env SLAVE_NODE_MEMORY, 16384
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: mcp_ubuntu_1604_image
-                  format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: *interfaces
-              network_config: *network_config
-
-          - name: {{ HOSTNAME_CMN01 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 3
-              memory: !os_env SLAVE_NODE_MEMORY, 2048
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: cloudimage1604
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: *interfaces
-              network_config: *network_config
-
-          - name: {{ HOSTNAME_CMN02 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 3
-              memory: !os_env SLAVE_NODE_MEMORY, 2048
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: cloudimage1604
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: *interfaces
-              network_config: *network_config
-
-          - name: {{ HOSTNAME_CMN03 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 3
-              memory: !os_env SLAVE_NODE_MEMORY, 2048
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: cloudimage1604
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: *interfaces
-              network_config: *network_config
-
-          - name: {{ HOSTNAME_OSD01 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 3
-              memory: !os_env SLAVE_NODE_MEMORY, 2048
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: cloudimage1604
-                  format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: *interfaces
-              network_config: *network_config
-
-          - name: {{ HOSTNAME_OSD02 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 3
-              memory: !os_env SLAVE_NODE_MEMORY, 2048
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: cloudimage1604
-                  format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: *interfaces
-              network_config: *network_config
-
-          - name: {{ HOSTNAME_PRX01 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 1
-              memory: !os_env SLAVE_NODE_MEMORY, 2048
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: cloudimage1604
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: *interfaces
-              network_config: *network_config
-
-          - name: {{ HOSTNAME_CMP01 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 3
-              memory: !os_env SLAVE_NODE_MEMORY, 8192
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: cloudimage1604
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-
-              interfaces: &all_interfaces
-                - label: ens3
-                  l2_network_device: admin
-                  interface_model: *interface_model
-                - label: ens4
-                  l2_network_device: private
-                  interface_model: *interface_model
-                - label: ens5
-                  l2_network_device: tenant
-                  interface_model: *interface_model
-                - label: ens6
-                  l2_network_device: external
-                  interface_model: *interface_model
-              network_config: &all_network_config
-                ens3:
-                  networks:
-                    - admin
-                ens4:
-                  networks:
-                    - private
-                ens5:
-                  networks:
-                    - tenant
-                ens6:
-                  networks:
-                    - external
-
-          - name: {{ HOSTNAME_CMP02 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 3
-              memory: !os_env SLAVE_NODE_MEMORY, 8192
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: cloudimage1604
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: *all_interfaces
-              network_config: *all_network_config
-
-          - name: {{ HOSTNAME_GTW01 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 8
-              memory: !os_env SLAVE_NODE_MEMORY, 8192
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: cloudimage1604
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: *all_interfaces
-              network_config: *all_network_config
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/ceph.yaml b/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/ceph.yaml
deleted file mode 100644
index 25ec2f0..0000000
--- a/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/ceph.yaml
+++ /dev/null
@@ -1,162 +0,0 @@
-{% from 'virtual-mcp-ocata-ovs-ceph/underlay.yaml' import HOSTNAME_CFG01 with context %}
-
-
-# Install ceph mons
-- description: Update grains
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@ceph:common' state.sls salt.minion.grains
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-- description: Generate keyrings
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@ceph:mon:keyring:mon or I@ceph:common:keyring:admin' state.sls ceph.mon
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-- description: Sync grains on ceph mon nodes
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@ceph:mon' saltutil.sync_grains
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-- description: Update mine on ceph mons
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@ceph:mon:keyring:mon or I@ceph:common:keyring:admin' mine.update
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-- description: Install ceph mon
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@ceph:mon' state.sls ceph.mon
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install ceph mgr if defined(needed only for Luminious)
-  cmd: |
-    if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:mgr' match.pillar 'ceph:mgr' ; then
-      salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:mgr' state.sls ceph.mgr
-    fi
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 5}
-  skip_fail: false
-
-- description: Install ceph osd
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@ceph:osd' state.sls ceph.osd
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Sync grains
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@ceph:osd' saltutil.sync_grains
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install ceph osd nodes
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@ceph:osd' state.sls ceph.osd.custom
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: true
-
-- description: Sync grains
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@ceph:osd' saltutil.sync_grains
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Update mine on ceph osd
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@ceph:osd' mine.update
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-- description: Setup pools, keyrings and maybe crush
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@ceph:setup' state.sls ceph.setup
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-- description: Install ceph clinet
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@ceph:setup' state.sls ceph.setup
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-- description: Install radosgw if exists
-  cmd: |
-    if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:radosgw' match.pillar 'ceph:radosgw' ; then
-      salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:radosgw' saltutil.sync_grains;
-      salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:radosgw' state.sls ceph.radosgw;
-      salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keystone:client' state.sls keystone.client;
-    fi
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 5}
-  skip_fail: false
-
-- description: Install ceph clinet
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@ceph:setup' state.sls ceph.setup
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-- description: Connect ceph to glance
-  cmd: |
-    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@glance:server' state.sls ceph.common,ceph.setup.keyring,glance;
-    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@glance:server' service.restart glance-api;
-    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@glance:server' service.restart glance-glare;
-    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@glance:server' service.restart glance-registry;
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 5}
-  skip_fail: false
-
-- description: Connect ceph to cinder and nova
-  cmd: |
-    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@cinder:controller' state.sls ceph.common,ceph.setup.keyring,cinder;
-    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@nova:compute' state.sls ceph.common,ceph.setup.keyring;
-    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@nova:compute' saltutil.sync_grains;
-    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@nova:compute' state.sls nova;
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 5}
-  skip_fail: false
-
-- description: Restart cinder volume
-  cmd: |
-    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@cinder:controller' service.restart cinder-volume;
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 5}
-  skip_fail: false
-
-- description: Restart nova-compute
-  cmd: |
-    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@nova:compute' service.restart nova-compute;
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 5}
-  skip_fail: false
-
-- description: conver cirros image
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-    'qemu-img convert cirros-0.3.4-i386-disk.img cirros.raw'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 30}
-  skip_fail: false
-
-- description: Register image in glance
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-    '. /root/keystonercv3; glance --timeout 120 image-create --name "cirros" --disk-format raw --container-format bare --visibility public --file cirros.raw'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
\ No newline at end of file
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/core.yaml b/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/core.yaml
deleted file mode 100644
index 70112b7..0000000
--- a/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/core.yaml
+++ /dev/null
@@ -1,116 +0,0 @@
-{% from 'virtual-mcp-ocata-ovs-ceph/underlay.yaml' import HOSTNAME_CFG01 with context %}
-
-# Install support services
-- description: Install keepalived on ctl01
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keepalived:cluster and *01*' state.sls keepalived
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: true
-
-- description: Install keepalived
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keepalived:cluster' state.sls keepalived
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: true
-
-- description: Install glusterfs
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@glusterfs:server' state.sls glusterfs.server.service
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Setup glusterfs on primary controller
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 5}
-  skip_fail: false
-
-- description: Check the gluster status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install RabbitMQ on ctl01
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install RabbitMQ
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@rabbitmq:server' state.sls rabbitmq
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check the rabbitmq status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install Galera on first server
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:master' state.sls galera
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install Galera on other servers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:slave' state.sls galera -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check mysql status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: true
-
-- description: Install haproxy
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' state.sls haproxy
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check haproxy status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' service.status haproxy
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Restart rsyslog
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' service.restart rsyslog
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install memcached on all controllers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@memcached:server' state.sls memcached
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check the VIP
-  cmd: |
-    OPENSTACK_CONTROL_ADDRESS=`salt-call --out=newline_values_only pillar.get _param:openstack_control_address`;
-    echo "_param:openstack_control_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
-    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 3, delay: 10}
-  skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/openstack.yaml b/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/openstack.yaml
deleted file mode 100644
index 4fbecca..0000000
--- a/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/openstack.yaml
+++ /dev/null
@@ -1,278 +0,0 @@
-{% from 'virtual-mcp-ocata-ovs-ceph/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'virtual-mcp-ocata-ovs-ceph/underlay.yaml' import HOSTNAME_CTL01 with context %}
-{% from 'virtual-mcp-ocata-ovs-ceph/underlay.yaml' import HOSTNAME_CTL02 with context %}
-{% from 'virtual-mcp-ocata-ovs-ceph/underlay.yaml' import HOSTNAME_CTL03 with context %}
-{% from 'virtual-mcp-ocata-ovs-ceph/underlay.yaml' import HOSTNAME_GTW01 with context %}
-{% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
-{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
-
-# Install OpenStack control services
-
-- description: Install glance on all controllers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-     -C 'I@glance:server' state.sls glance -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install keystone service (note that different fernet keys are created on different nodes)
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keystone:server' state.sls keystone.server -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 15}
-  skip_fail: false
-
-- description: Restart apache due to PROD-10477
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' cmd.run "systemctl restart apache2"
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 15}
-  skip_fail: false
-
-- description: Check apache status to PROD-10477
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' cmd.run "systemctl status apache2"
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 15}
-  skip_fail: false
-
-- description: Mount glusterfs.client volumes (resuires created 'keystone' and 'glusterfs' system users)
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@glance:server' state.sls glusterfs.client
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Update fernet keys for keystone server on the mounted glusterfs volume
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keystone:server' state.sls keystone.server -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Populate keystone services/tenants/admins
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keystone:client' state.sls keystone.client
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 5}
-  skip_fail: false
-
-- description: Check keystone service-list
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keystone:server' cmd.run '. /root/keystonercv3; openstack service list'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check glance image-list
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keystone:server' cmd.run '. /root/keystonercv3; glance image-list'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-
-- description: Install nova on all controllers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@nova:controller' state.sls nova -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 5}
-  skip_fail: false
-
-- description: Check nova service-list
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keystone:server' cmd.run '. /root/keystonercv3; nova --debug service-list'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 3, delay: 5}
-  skip_fail: false
-
-
-- description: Install cinder
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@cinder:controller' state.sls cinder -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 5}
-  skip_fail: false
-
-- description: Check cinder list
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keystone:server' cmd.run '. /root/keystonercv3; cinder list'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-
-- description: Install neutron service
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@neutron:server' state.sls neutron -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install neutron on gtw node
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@neutron:gateway' state.sls neutron
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check neutron agent-list
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keystone:server' cmd.run '. /root/keystonercv3; neutron agent-list'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-
-- description: Install heat service
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@heat:server' state.sls heat -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check heat service
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keystone:server' cmd.run '. /root/keystonercv3; openstack orchestration resource type list'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 5, delay: 10}
-  skip_fail: false
-
-
-- description: Deploy horizon dashboard
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@horizon:server' state.sls horizon
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: true
-
-- description: Deploy nginx proxy
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@nginx:server' state.sls nginx
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: true
-
-
-# Install compute node
-
-- description: Apply formulas for compute node
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: true
-
-- description: Re-apply(as in doc) formulas for compute node
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check IP on computes
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
-    'ip a'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 10, delay: 30}
-  skip_fail: false
-
-
-  # Upload cirros image
-
-- description: Upload cirros image on ctl01
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-    'wget http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-i386-disk.img'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 30}
-  skip_fail: false
-
-- description: Create net04_external
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-    '. /root/keystonercv3; neutron net-create net04_ext --router:external True --provider:physical_network physnet1 --provider:network_type flat'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Create subnet_external
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-    '. /root/keystonercv3; neutron subnet-create net04_ext {{ IPV4_NET_EXTERNAL_PREFIX }}.0/24 --name net04_ext__subnet --disable-dhcp --allocation-pool start={{ IPV4_NET_EXTERNAL_PREFIX }}.150,end={{ IPV4_NET_EXTERNAL_PREFIX }}.180 --gateway {{ IPV4_NET_EXTERNAL_PREFIX }}.1'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Create net04
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-    '. /root/keystonercv3; neutron net-create net04'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Create subnet_net04
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-    '. /root/keystonercv3; neutron subnet-create net04 {{ IPV4_NET_TENANT_PREFIX }}.0/24 --name net04__subnet --allocation-pool start={{ IPV4_NET_TENANT_PREFIX }}.120,end={{ IPV4_NET_TENANT_PREFIX }}.240'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Create router
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-    '. /root/keystonercv3; neutron router-create net04_router01'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Set geteway
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-    '. /root/keystonercv3; neutron router-gateway-set net04_router01 net04_ext'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description:  Add interface
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-    '. /root/keystonercv3; neutron router-interface-add net04_router01 net04__subnet'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description:  Allow all tcp
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-    '. /root/keystonercv3; nova secgroup-add-rule default tcp 1 65535 0.0.0.0/0'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description:  Allow all icmp
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-    '. /root/keystonercv3; nova secgroup-add-rule default icmp -1 -1 0.0.0.0/0'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: sync time
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run
-    'service ntp stop; ntpd -gq;  service ntp start'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Install docker.io on gtw
-  cmd: salt-call cmd.run 'apt-get install docker.io -y'
-  node_name: {{ HOSTNAME_GTW01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Enable forward policy
-  cmd: iptables --policy FORWARD ACCEPT
-  node_name: {{ HOSTNAME_GTW01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: create rc file on cfg
-  cmd: scp ctl01:/root/keystonercv3 /root
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Copy rc file
-  cmd: scp /root/keystonercv3 gtw01:/root
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/salt.yaml b/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/salt.yaml
deleted file mode 100644
index 350be48..0000000
--- a/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/salt.yaml
+++ /dev/null
@@ -1,26 +0,0 @@
-{% from 'virtual-mcp-ocata-ovs-ceph/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'virtual-mcp-ocata-ovs-ceph/underlay.yaml' import LAB_CONFIG_NAME with context %}
-{% from 'virtual-mcp-ocata-ovs-ceph/underlay.yaml' import DOMAIN_NAME with context %}
-
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
-# Other salt model repository parameters see in shared-salt.yaml
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-{{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
-
-{{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
-
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "logrotate"') }}
-
-{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
-
-{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
-
-{{ SHARED.MACRO_GENERATE_INVENTORY() }}
-
-{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/underlay--meta-data.yaml b/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/underlay--meta-data.yaml
deleted file mode 100644
index 3699401..0000000
--- a/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/underlay--meta-data.yaml
+++ /dev/null
@@ -1,4 +0,0 @@
-| # All the data below will be stored as a string object
-  instance-id: iid-local1
-  hostname: {hostname}
-  local-hostname: {hostname}
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/underlay--user-data-cfg01.yaml b/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/underlay--user-data-cfg01.yaml
deleted file mode 100644
index 48562ad..0000000
--- a/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/underlay--user-data-cfg01.yaml
+++ /dev/null
@@ -1,68 +0,0 @@
-| # All the data below will be stored as a string object
-  #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
-  ssh_pwauth: True
-  users:
-   - name: root
-     sudo: ALL=(ALL) NOPASSWD:ALL
-     shell: /bin/bash
-     ssh_authorized_keys:
-     {% for key in config.underlay.ssh_keys %}
-      - ssh-rsa {{ key['public'] }}
-     {% endfor %}
-
-  disable_root: false
-  chpasswd:
-   list: |
-    root:r00tme
-   expire: False
-
-  bootcmd:
-   # Enable root access
-   - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
-   - service sshd restart
-  output:
-    all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
-  runcmd:
-   # Configure dhclient
-   - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
-   - sudo resolvconf -u
-
-   # Prepare network connection
-   - sudo ifdown ens3
-   - sudo ip r d default || true  # remove existing default route to get it from dhcp
-   - sudo ifup ens3
-   #- sudo route add default gw {gateway} {interface_name}
-
-   # Create swap
-   - fallocate -l 4G /swapfile
-   - chmod 600 /swapfile
-   - mkswap /swapfile
-   - swapon /swapfile
-   - echo "/swapfile   none    swap    defaults    0   0" >> /etc/fstab
-
-   - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
-
-   - mkdir -p /srv/salt/reclass/nodes
-   - systemctl enable salt-master
-   - systemctl enable salt-minion
-   - systemctl start salt-master
-   - systemctl start salt-minion
-   - salt-call -l info --timeout=120 test.ping
-
-  write_files:
-   - path: /etc/network/interfaces
-     content: |
-          auto ens3
-          iface ens3 inet dhcp
-
-   - path: /root/.ssh/config
-     owner: root:root
-     permissions: '0600'
-     content: |
-          Host *
-            ServerAliveInterval 300
-            ServerAliveCountMax 10
-            StrictHostKeyChecking no
-            UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/underlay--user-data1604.yaml b/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/underlay--user-data1604.yaml
deleted file mode 100644
index 3fbb777..0000000
--- a/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/underlay--user-data1604.yaml
+++ /dev/null
@@ -1,50 +0,0 @@
-| # All the data below will be stored as a string object
-  #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
-  ssh_pwauth: True
-  users:
-   - name: root
-     sudo: ALL=(ALL) NOPASSWD:ALL
-     shell: /bin/bash
-     ssh_authorized_keys:
-     {% for key in config.underlay.ssh_keys %}
-      - ssh-rsa {{ key['public'] }}
-     {% endfor %}
-
-  disable_root: false
-  chpasswd:
-   list: |
-    root:r00tme
-   expire: False
-
-  bootcmd:
-   # Enable root access
-   - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
-   - service sshd restart
-  output:
-    all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
-  runcmd:
-   - export TERM=linux
-   - export LANG=C
-   # Configure dhclient
-   - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
-   - sudo resolvconf -u
-
-   # Prepare network connection
-   - sudo ifup ens3
-   #- sudo route add default gw {gateway} {interface_name}
-
-   # Create swap
-   - fallocate -l 4G /swapfile
-   - chmod 600 /swapfile
-   - mkswap /swapfile
-   - swapon /swapfile
-   - echo "/swapfile   none    swap    defaults   0   0" >> /etc/fstab
-
-  write_files:
-   - path: /etc/network/interfaces
-     content: |
-          auto ens3
-          iface ens3 inet dhcp
-
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/underlay.yaml b/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/underlay.yaml
deleted file mode 100644
index 75fffe4..0000000
--- a/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/underlay.yaml
+++ /dev/null
@@ -1,583 +0,0 @@
-# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
-{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
-
-{% import 'virtual-mcp-ocata-ovs-ceph/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
-{% import 'virtual-mcp-ocata-ovs-ceph/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
-{% import 'virtual-mcp-ocata-ovs-ceph/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
-
----
-aliases:
- - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
- - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
- - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
- - &cloudinit_user_data_1604 {{ CLOUDINIT_USER_DATA_1604 }}
-
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'virtual-mcp-ocata-ovs-ceph') %}
-{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME) + '.local' %}
-{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMN01 = os_env('HOSTNAME_CMN01', 'cmn01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMN02 = os_env('HOSTNAME_CMN02', 'cmn02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMN03 = os_env('HOSTNAME_CMN03', 'cmn03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_OSD01 = os_env('HOSTNAME_OSD01', 'osd01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_OSD02 = os_env('HOSTNAME_OSD02', 'osd02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
-
-template:
-  devops_settings:
-    env_name: {{ os_env('ENV_NAME', 'virtual-mcp-ocata-ovs-ceph_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
-
-    address_pools:
-      private-pool01:
-        net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.60.0.0/16:24') }}
-        params:
-          ip_reserved:
-            gateway: +1
-            l2_network_device: +1
-            default_{{ HOSTNAME_CFG01 }}: +100
-            default_{{ HOSTNAME_CTL01 }}: +101
-            default_{{ HOSTNAME_CTL02 }}: +102
-            default_{{ HOSTNAME_CTL03 }}: +103
-            default_{{ HOSTNAME_CMP01 }}: +105
-            default_{{ HOSTNAME_CMP02 }}: +106
-            default_{{ HOSTNAME_OSD01 }}: +94
-            default_{{ HOSTNAME_OSD02 }}: +95
-            default_{{ HOSTNAME_CMN01 }}: +96
-            default_{{ HOSTNAME_CMN02 }}: +97
-            default_{{ HOSTNAME_CMN03 }}: +98
-            default_{{ HOSTNAME_GTW01 }}: +110
-            default_{{ HOSTNAME_PRX01 }}: +121
-          ip_ranges:
-            dhcp: [+90, -10]
-
-      admin-pool01:
-        net: {{ os_env('ADMIN_ADDRESS_POOL01', '10.70.0.0/16:24') }}
-        params:
-          ip_reserved:
-            gateway: +1
-            l2_network_device: +1
-            default_{{ HOSTNAME_CFG01 }}: +90
-            default_{{ HOSTNAME_CTL01 }}: +101
-            default_{{ HOSTNAME_CTL02 }}: +102
-            default_{{ HOSTNAME_CTL03 }}: +103
-            default_{{ HOSTNAME_CMP01 }}: +105
-            default_{{ HOSTNAME_CMP02 }}: +106
-            default_{{ HOSTNAME_OSD01 }}: +94
-            default_{{ HOSTNAME_OSD02 }}: +95
-            default_{{ HOSTNAME_CMN01 }}: +96
-            default_{{ HOSTNAME_CMN02 }}: +97
-            default_{{ HOSTNAME_CMN03 }}: +98
-            default_{{ HOSTNAME_GTW01 }}: +110
-            default_{{ HOSTNAME_PRX01 }}: +121
-          ip_ranges:
-            dhcp: [+90, -10]
-
-      tenant-pool01:
-        net: {{ os_env('TENANT_ADDRESS_POOL01', '10.80.0.0/16:24') }}
-        params:
-          ip_reserved:
-            gateway: +1
-            l2_network_device: +1
-            default_{{ HOSTNAME_CFG01 }}: +100
-            default_{{ HOSTNAME_CTL01 }}: +101
-            default_{{ HOSTNAME_CTL02 }}: +102
-            default_{{ HOSTNAME_CTL03 }}: +103
-            default_{{ HOSTNAME_CMP01 }}: +105
-            default_{{ HOSTNAME_CMP02 }}: +106
-            default_{{ HOSTNAME_OSD01 }}: +94
-            default_{{ HOSTNAME_OSD02 }}: +95
-            default_{{ HOSTNAME_CMN01 }}: +96
-            default_{{ HOSTNAME_CMN02 }}: +97
-            default_{{ HOSTNAME_CMN03 }}: +98
-            default_{{ HOSTNAME_GTW01 }}: +110
-            default_{{ HOSTNAME_PRX01 }}: +121
-          ip_ranges:
-            dhcp: [+10, -10]
-
-      external-pool01:
-        net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '10.90.0.0/16:24') }}
-        params:
-          ip_reserved:
-            gateway: +1
-            l2_network_device: +1
-            default_{{ HOSTNAME_CFG01 }}: +100
-            default_{{ HOSTNAME_CTL01 }}: +101
-            default_{{ HOSTNAME_CTL02 }}: +102
-            default_{{ HOSTNAME_CTL03 }}: +103
-            default_{{ HOSTNAME_CMP01 }}: +105
-            default_{{ HOSTNAME_CMP02 }}: +106
-            default_{{ HOSTNAME_OSD01 }}: +94
-            default_{{ HOSTNAME_OSD02 }}: +95
-            default_{{ HOSTNAME_CMN01 }}: +96
-            default_{{ HOSTNAME_CMN02 }}: +97
-            default_{{ HOSTNAME_CMN03 }}: +98
-            default_{{ HOSTNAME_GTW01 }}: +110
-            default_{{ HOSTNAME_PRX01 }}: +121
-          ip_ranges:
-            dhcp: [+10, -10]
-
-
-    groups:
-      - name: default
-        driver:
-          name: devops.driver.libvirt
-          params:
-            connection_string: !os_env CONNECTION_STRING, qemu:///system
-            storage_pool_name: !os_env STORAGE_POOL_NAME, default
-            stp: False
-            hpet: False
-            enable_acpi: true
-            use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
-            use_hugepages: !os_env DRIVER_USE_HUGEPAGES, false
-
-        network_pools:
-          admin: admin-pool01
-          private: private-pool01
-          tenant: tenant-pool01
-          external: external-pool01
-
-        l2_network_devices:
-          private:
-            address_pool: private-pool01
-            dhcp: false
-            forward:
-              mode: route
-
-          admin:
-            address_pool: admin-pool01
-            dhcp: true
-            forward:
-              mode: nat
-
-          tenant:
-            address_pool: tenant-pool01
-            dhcp: false
-
-          external:
-            address_pool: external-pool01
-            dhcp: true
-            forward:
-              mode: route
-
-
-        group_volumes:
-         - name: cloudimage1604    # This name is used for 'backing_store' option for node volumes.
-           source_image: !os_env IMAGE_PATH1604  # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
-           format: qcow2
-         - name: cfg01_day01_image               # Pre-configured day01 image
-           source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01', os_env('IMAGE_PATH1604')) }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
-           format: qcow2
-         - name: mcp_ubuntu_1604_image           # Pre-configured image for VCP nodes initially based on kvm nodes.
-           # http://images.mirantis.com/ubuntu-16-04-x64-latest.qcow2  (preffered)
-           # or
-           # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
-           source_image: {{ os_env('MCP_IMAGE_PATH1604', os_env('IMAGE_PATH1604')) }}
-           format: qcow2
-
-        nodes:
-          - name: {{ HOSTNAME_CFG01 }}
-            role: salt_master
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 8192
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: cfg01_day01_image
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_cfg01
-
-              interfaces:
-                - label: ens3
-                  l2_network_device: admin
-                  interface_model: *interface_model
-                - label: ens4
-                  l2_network_device: private
-                  interface_model: *interface_model
-              network_config:
-                ens3:
-                  networks:
-                    - admin
-                ens4:
-                  networks:
-                    - private
-
-          - name: {{ HOSTNAME_CTL01 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 16384
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: mcp_ubuntu_1604_image
-                  format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: &interfaces
-                - label: ens3
-                  l2_network_device: admin
-                  interface_model: *interface_model
-                - label: ens4
-                  l2_network_device: private
-                  interface_model: *interface_model
-              network_config: &network_config
-                ens3:
-                  networks:
-                    - admin
-                ens4:
-                  networks:
-                    - private
-
-          - name: {{ HOSTNAME_CTL02 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 16384
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: mcp_ubuntu_1604_image
-                  format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: *interfaces
-              network_config: *network_config
-
-          - name: {{ HOSTNAME_CTL03 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 16384
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: mcp_ubuntu_1604_image
-                  format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: *interfaces
-              network_config: *network_config
-
-          - name: {{ HOSTNAME_CMN01 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 3
-              memory: !os_env SLAVE_NODE_MEMORY, 2048
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: cloudimage1604
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: *interfaces
-              network_config: *network_config
-
-          - name: {{ HOSTNAME_CMN02 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 3
-              memory: !os_env SLAVE_NODE_MEMORY, 2048
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: cloudimage1604
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: *interfaces
-              network_config: *network_config
-
-          - name: {{ HOSTNAME_CMN03 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 3
-              memory: !os_env SLAVE_NODE_MEMORY, 2048
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: cloudimage1604
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: *interfaces
-              network_config: *network_config
-
-          - name: {{ HOSTNAME_OSD01 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 3
-              memory: !os_env SLAVE_NODE_MEMORY, 2048
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: cloudimage1604
-                  format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: *interfaces
-              network_config: *network_config
-
-          - name: {{ HOSTNAME_OSD02 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 3
-              memory: !os_env SLAVE_NODE_MEMORY, 2048
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: cloudimage1604
-                  format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: *interfaces
-              network_config: *network_config
-
-          - name: {{ HOSTNAME_PRX01 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 1
-              memory: !os_env SLAVE_NODE_MEMORY, 2048
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: mcp_ubuntu_1604_image
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: *interfaces
-              network_config: *network_config
-
-          - name: {{ HOSTNAME_CMP01 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 3
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: cloudimage1604
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-
-              interfaces: &all_interfaces
-                - label: ens3
-                  l2_network_device: admin
-                  interface_model: *interface_model
-                - label: ens4
-                  l2_network_device: private
-                  interface_model: *interface_model
-                - label: ens5
-                  l2_network_device: tenant
-                  interface_model: *interface_model
-                - label: ens6
-                  l2_network_device: external
-                  interface_model: *interface_model
-              network_config: &all_network_config
-                ens3:
-                  networks:
-                    - admin
-                ens4:
-                  networks:
-                    - private
-                ens5:
-                  networks:
-                    - tenant
-                ens6:
-                  networks:
-                    - external
-
-          - name: {{ HOSTNAME_CMP02 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 3
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: cloudimage1604
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: *all_interfaces
-              network_config: *all_network_config
-
-          - name: {{ HOSTNAME_GTW01 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 1
-              memory: !os_env SLAVE_NODE_MEMORY, 2048
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: cloudimage1604
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: *all_interfaces
-              network_config: *all_network_config
diff --git a/tcp_tests/templates/virtual-mcp11-dvr/core.yaml b/tcp_tests/templates/virtual-mcp11-dvr/core.yaml
deleted file mode 100644
index 6653d1f..0000000
--- a/tcp_tests/templates/virtual-mcp11-dvr/core.yaml
+++ /dev/null
@@ -1,124 +0,0 @@
-{% from 'virtual-mcp11-dvr/underlay.yaml' import HOSTNAME_CFG01 with context %}
-
-# Install support services
-- description: sync time
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run
-    'service ntp stop; ntpd -gq;  service ntp start'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: true
-
-- description: Install keepalived on ctl01
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keepalived:cluster and *01*' state.sls keepalived
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: true
-
-- description: Install keepalived
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keepalived:cluster' state.sls keepalived
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: true
-
-- description: Check the VIP
-  cmd: |
-    OPENSTACK_CONTROL_ADDRESS=`salt-call --out=newline_values_only pillar.get _param:openstack_control_address`;
-    echo "_param:openstack_control_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
-    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install glusterfs
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@glusterfs:server' state.sls glusterfs.server.service
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Setup glusterfs on primary controller
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 20}
-  skip_fail: false
-
-- description: Check the gluster status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install RabbitMQ on ctl01
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install RabbitMQ
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@rabbitmq:server' state.sls rabbitmq
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check the rabbitmq status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install Galera on first server
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:master' state.sls galera
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install Galera on other servers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:slave' state.sls galera -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check mysql status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: true
-
-
-- description: Install haproxy
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' state.sls haproxy
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check haproxy status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' service.status haproxy
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Restart rsyslog
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' service.restart rsyslog
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install memcached on all controllers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@memcached:server' state.sls memcached
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp11-dvr/openstack.yaml b/tcp_tests/templates/virtual-mcp11-dvr/openstack.yaml
deleted file mode 100644
index 16ada26..0000000
--- a/tcp_tests/templates/virtual-mcp11-dvr/openstack.yaml
+++ /dev/null
@@ -1,360 +0,0 @@
-{% from 'virtual-mcp11-dvr/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'virtual-mcp11-dvr/underlay.yaml' import HOSTNAME_CTL01 with context %}
-{% from 'virtual-mcp11-dvr/underlay.yaml' import HOSTNAME_CTL02 with context %}
-{% from 'virtual-mcp11-dvr/underlay.yaml' import HOSTNAME_CTL03 with context %}
-{% from 'virtual-mcp11-dvr/underlay.yaml' import HOSTNAME_GTW01 with context %}
-{% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
-{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
-
-# Install OpenStack control services
-
-- description: Install glance on all controllers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-     -C 'I@glance:server' state.sls glance -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install keystone service (note that different fernet keys are created on different nodes)
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keystone:server' state.sls keystone.server -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 15}
-  skip_fail: false
-
-- description: Restart apache due to PROD-10477
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' cmd.run "systemctl restart apache2"
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 15}
-  skip_fail: false
-
-- description: Check apache status to PROD-10477
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' cmd.run "systemctl status apache2"
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 15}
-  skip_fail: false
-
-- description: Mount glusterfs.client volumes (resuires created 'keystone' and 'glusterfs' system users)
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@glance:server' state.sls glusterfs.client
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Update fernet keys for keystone server on the mounted glusterfs volume
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keystone:server' state.sls keystone.server -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Populate keystone services/tenants/admins
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keystone:client' state.sls keystone.client
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check keystone service-list
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keystone:server' cmd.run '. /root/keystonercv3; openstack service list'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check glance image-list
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keystone:server' cmd.run '. /root/keystonerc; glance image-list'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-
-- description: Install nova on all controllers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@nova:controller' state.sls nova -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 5}
-  skip_fail: false
-
-- description: Check nova service-list
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keystone:server' cmd.run '. /root/keystonerc; nova service-list'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-
-- description: Install cinder
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@cinder:controller' state.sls cinder -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check cinder list
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keystone:server' cmd.run '. /root/keystonerc; cinder list'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-
-- description: Install neutron service
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@neutron:server' state.sls neutron -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install neutron on gtw node
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@neutron:gateway' state.sls neutron
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-
-- description: Check neutron agent-list
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keystone:server' cmd.run '. /root/keystonerc; neutron agent-list'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-
-- description: Install heat service
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@heat:server' state.sls heat -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check heat service
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keystone:server' cmd.run '. /root/keystonercv3; openstack orchestration resource type list'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 5, delay: 10}
-  skip_fail: false
-
-
-- description: Deploy horizon dashboard
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@horizon:server' state.sls horizon
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: true
-
-- description: Deploy nginx proxy
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@nginx:server' state.sls nginx
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: true
-
-
-# Install compute node
-
-- description: Apply formulas for compute node
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: true
-
-- description: Re-apply(as in doc) formulas for compute node
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check IP on computes
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
-    'ip a'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 10, delay: 30}
-  skip_fail: false
-
-
-  # Upload cirros image
-
-- description: Upload cirros image on ctl01
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-    'wget http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-i386-disk.img'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 30}
-  skip_fail: false
-
-- description: Register image in glance
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-    '. /root/keystonercv3; glance --timeout 120 image-create --name cirros --visibility public --disk-format qcow2 --container-format bare --progress < /root/cirros-0.3.4-i386-disk.img'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Create net04_external
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-    '. /root/keystonercv3; neutron net-create net04_ext --router:external True --provider:physical_network physnet1 --provider:network_type flat'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Create subnet_external
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-    '. /root/keystonercv3; neutron subnet-create net04_ext {{ IPV4_NET_EXTERNAL_PREFIX }}.0/24 --name net04_ext__subnet --disable-dhcp --allocation-pool start={{ IPV4_NET_EXTERNAL_PREFIX }}.150,end={{ IPV4_NET_EXTERNAL_PREFIX }}.180 --gateway {{ IPV4_NET_EXTERNAL_PREFIX }}.1'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Create net04
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-    '. /root/keystonercv3; neutron net-create net04'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Create subnet_net04
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-    '. /root/keystonercv3; neutron subnet-create net04 {{ IPV4_NET_TENANT_PREFIX }}.0/24 --name net04__subnet --allocation-pool start={{ IPV4_NET_TENANT_PREFIX }}.120,end={{ IPV4_NET_TENANT_PREFIX }}.240'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Create router
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-    '. /root/keystonercv3; neutron router-create net04_router01'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Set geteway
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-    '. /root/keystonercv3; neutron router-gateway-set net04_router01 net04_ext'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description:  Add interface
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-    '. /root/keystonercv3; neutron router-interface-add net04_router01 net04__subnet'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description:  Allow all tcp
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-    '. /root/keystonercv3; nova secgroup-add-rule default tcp 1 65535 0.0.0.0/0'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description:  Allow all icmp
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-    '. /root/keystonercv3; nova secgroup-add-rule default icmp -1 -1 0.0.0.0/0'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: sync time
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run
-    'service ntp stop; ntpd -gq;  service ntp start'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: true
-
-# Configure cinder-volume salt-call PROD-13167
-- description: Set disks 01
-  cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
-  node_name: {{ HOSTNAME_CTL01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Set disks 02
-  cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
-  node_name: {{ HOSTNAME_CTL02 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Set disks 03
-  cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
-  node_name: {{ HOSTNAME_CTL03 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Create partitions 01
-  cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
-  node_name: {{ HOSTNAME_CTL01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Create partitions 02
-  cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
-  node_name: {{ HOSTNAME_CTL02 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Create partitions 03
-  cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
-  node_name: {{ HOSTNAME_CTL03 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: create volume_group
-  cmd: salt "ctl*" cmd.run 'vgcreate cinder-volumes /dev/vdb1'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Install cinder-volume
-  cmd: salt 'ctl*' cmd.run 'apt-get install cinder-volume -y'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Install crudini
-  cmd: salt "ctl*" cmd.run 'apt-get install crudini -y'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Temporary WR set enabled backends value 01
-  cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
-  node_name: {{ HOSTNAME_CTL01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Temporary WR set enabled backends value 02
-  cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
-  node_name: {{ HOSTNAME_CTL02 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Temporary WR set enabled backends value 03
-  cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
-  node_name: {{ HOSTNAME_CTL03 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Restart cinder volume
-  cmd: |
-    salt -C 'I@cinder:controller' service.restart cinder-volume;
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 5}
-  skip_fail: false
-
-- description: Install docker.io on gtw
-  cmd: salt-call cmd.run 'apt-get install docker.io -y'
-  node_name: {{ HOSTNAME_GTW01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: create rc file on cfg
-  cmd: scp ctl01:/root/keystonercv3 /root
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Copy rc file
-  cmd: scp /root/keystonercv3 gtw01:/root
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp11-dvr/salt.yaml b/tcp_tests/templates/virtual-mcp11-dvr/salt.yaml
deleted file mode 100644
index b557d3a..0000000
--- a/tcp_tests/templates/virtual-mcp11-dvr/salt.yaml
+++ /dev/null
@@ -1,44 +0,0 @@
-{% from 'virtual-mcp11-dvr/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'virtual-mcp11-dvr/underlay.yaml' import LAB_CONFIG_NAME with context %}
-{% from 'virtual-mcp11-dvr/underlay.yaml' import DOMAIN_NAME with context %}
-
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
-# Other salt model repository parameters see in shared-salt.yaml
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-{{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
-
-{{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
-
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "fluentd" "logrotate"') }}
-
-{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
-
-{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
-
-{{ SHARED.MACRO_GENERATE_INVENTORY() }}
-
-{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
-
-#- description: Hack gtw node
-#  cmd: salt 'gtw*' cmd.run "ip addr del {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.94/24 dev ens4; ip addr flush dev ens4";
-#  node_name: {{ HOSTNAME_CFG01 }}
-#  retry: {count: 1, delay: 10}
-#  skip_fail: false
-#
-#- description: Hack cmp01 node
-#  cmd: salt 'cmp01*' cmd.run "ip addr del {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.95/24 dev ens4; ip addr flush dev ens4";
-#  node_name: {{ HOSTNAME_CFG01 }}
-#  retry: {count: 1, delay: 10}
-#  skip_fail: false
-#
-#- description: Hack cmp02 node
-#  cmd: salt 'cmp02*' cmd.run "ip addr del {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.96/24 dev ens4; ip addr flush dev ens4";
-#  node_name: {{ HOSTNAME_CFG01 }}
-#  retry: {count: 1, delay: 10}
-#  skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp11-dvr/underlay--meta-data.yaml b/tcp_tests/templates/virtual-mcp11-dvr/underlay--meta-data.yaml
deleted file mode 100644
index 3699401..0000000
--- a/tcp_tests/templates/virtual-mcp11-dvr/underlay--meta-data.yaml
+++ /dev/null
@@ -1,4 +0,0 @@
-| # All the data below will be stored as a string object
-  instance-id: iid-local1
-  hostname: {hostname}
-  local-hostname: {hostname}
diff --git a/tcp_tests/templates/virtual-mcp11-dvr/underlay--user-data-cfg01.yaml b/tcp_tests/templates/virtual-mcp11-dvr/underlay--user-data-cfg01.yaml
deleted file mode 100644
index 600f6fb..0000000
--- a/tcp_tests/templates/virtual-mcp11-dvr/underlay--user-data-cfg01.yaml
+++ /dev/null
@@ -1,90 +0,0 @@
-| # All the data below will be stored as a string object
-  #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
-  ssh_pwauth: True
-  users:
-   - name: root
-     sudo: ALL=(ALL) NOPASSWD:ALL
-     shell: /bin/bash
-     ssh_authorized_keys:
-     {% for key in config.underlay.ssh_keys %}
-      - ssh-rsa {{ key['public'] }}
-     {% endfor %}
-
-  disable_root: false
-  chpasswd:
-   list: |
-    root:r00tme
-   expire: False
-
-  bootcmd:
-   # Block access to SSH while node is preparing
-   - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
-   # Enable root access
-   - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
-   - service sshd restart
-  output:
-    all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
-  runcmd:
-   # Configure dhclient
-   - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
-   - sudo resolvconf -u
-
-   # Prepare network connection
-   - sudo ifdown ens3
-   - sudo ip r d default || true  # remove existing default route to get it from dhcp
-   - sudo ifup ens3
-   #- sudo route add default gw {gateway} {interface_name}
-
-   # Create swap
-   - fallocate -l 4G /swapfile
-   - chmod 600 /swapfile
-   - mkswap /swapfile
-   - swapon /swapfile
-   - echo "/swapfile   none    swap    defaults    0   0" >> /etc/fstab
-
-   ############## TCP Cloud cfg01 node ##################
-   #- sleep 120
-   - echo "Preparing base OS"
-
-   - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
-   - which wget >/dev/null || (apt-get update; apt-get install -y wget);
-
-   # Configure Ubuntu mirrors
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
-
-   - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
-   - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
-   - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/{{ SALT_VERSION }} xenial main" > /etc/apt/sources.list.d/saltstack.list;
-   - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/{{ SALT_VERSION }}/SALTSTACK-GPG-KEY.pub | apt-key add -;
-
-   - apt-get clean
-   - apt-get update
-
-   # Install common packages
-   - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree
-
-   ########################################################
-   # Node is ready, allow SSH access
-   - echo "Allow SSH access ..."
-   - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
-   ########################################################
-
-  write_files:
-   - path: /etc/network/interfaces
-     content: |
-          auto ens3
-          iface ens3 inet dhcp
-
-   - path: /root/.ssh/config
-     owner: root:root
-     permissions: '0600'
-     content: |
-          Host *
-            ServerAliveInterval 300
-            ServerAliveCountMax 10
-            StrictHostKeyChecking no
-            UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/virtual-mcp11-dvr/underlay--user-data1604.yaml b/tcp_tests/templates/virtual-mcp11-dvr/underlay--user-data1604.yaml
deleted file mode 100644
index 48e3a15..0000000
--- a/tcp_tests/templates/virtual-mcp11-dvr/underlay--user-data1604.yaml
+++ /dev/null
@@ -1,80 +0,0 @@
-| # All the data below will be stored as a string object
-  #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
-  ssh_pwauth: True
-  users:
-   - name: root
-     sudo: ALL=(ALL) NOPASSWD:ALL
-     shell: /bin/bash
-     ssh_authorized_keys:
-     {% for key in config.underlay.ssh_keys %}
-      - ssh-rsa {{ key['public'] }}
-     {% endfor %}
-
-  disable_root: false
-  chpasswd:
-   list: |
-    root:r00tme
-   expire: False
-
-  bootcmd:
-   # Block access to SSH while node is preparing
-   - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
-   # Enable root access
-   - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
-   - service sshd restart
-  output:
-    all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
-  runcmd:
-   - export TERM=linux
-   - export LANG=C
-   # Configure dhclient
-   - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
-   - sudo resolvconf -u
-
-   # Prepare network connection
-   - sudo ifup ens3
-   #- sudo route add default gw {gateway} {interface_name}
-
-   # Create swap
-   - fallocate -l 4G /swapfile
-   - chmod 600 /swapfile
-   - mkswap /swapfile
-   - swapon /swapfile
-   - echo "/swapfile   none    swap    defaults   0   0" >> /etc/fstab
-
-
-   ############## TCP Cloud cfg01 node ##################
-   #- sleep 120
-   - echo "Preparing base OS"
-   - which wget >/dev/null || (apt-get update; apt-get install -y wget)
-
-   # Configure Ubuntu mirrors
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
-
-   - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
-   - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
-   - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/{{ SALT_VERSION }} xenial main" > /etc/apt/sources.list.d/saltstack.list;
-   - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/{{ SALT_VERSION }}/SALTSTACK-GPG-KEY.pub | apt-key add -;
-
-   - apt-get clean
-   - eatmydata apt-get update && apt-get -y upgrade
-
-   # Install common packages
-   - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc
-
-   ########################################################
-   # Node is ready, allow SSH access
-   - echo "Allow SSH access ..."
-   - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
-   ########################################################
-
-  write_files:
-   - path: /etc/network/interfaces
-     content: |
-          auto ens3
-          iface ens3 inet dhcp
-
diff --git a/tcp_tests/templates/virtual-mcp11-dvr/underlay.yaml b/tcp_tests/templates/virtual-mcp11-dvr/underlay.yaml
deleted file mode 100644
index 51f1b64..0000000
--- a/tcp_tests/templates/virtual-mcp11-dvr/underlay.yaml
+++ /dev/null
@@ -1,420 +0,0 @@
-# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
-{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
-
-{% import 'virtual-mcp11-dvr/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
-{% import 'virtual-mcp11-dvr/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
-{% import 'virtual-mcp11-dvr/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
-
----
-aliases:
- - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
- - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
- - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
- - &cloudinit_user_data_1604 {{ CLOUDINIT_USER_DATA_1604 }}
-
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'virtual-mcp11-dvr') %}
-{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME) + '.local' %}
-{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
-
-template:
-  devops_settings:
-    env_name: {{ os_env('ENV_NAME', 'virtual-mcp11-dvr_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
-
-    address_pools:
-      private-pool01:
-        net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.60.0.0/16:24') }}
-        params:
-          ip_reserved:
-            gateway: +1
-            l2_network_device: +1
-            default_{{ HOSTNAME_CFG01 }}: +90
-            default_{{ HOSTNAME_CTL01 }}: +91
-            default_{{ HOSTNAME_CTL02 }}: +92
-            default_{{ HOSTNAME_CTL03 }}: +93
-            default_{{ HOSTNAME_CMP01 }}: +95
-            default_{{ HOSTNAME_CMP02 }}: +96
-            default_{{ HOSTNAME_GTW01 }}: +94
-            default_{{ HOSTNAME_PRX01 }}: +121
-          ip_ranges:
-            dhcp: [+90, -10]
-
-      admin-pool01:
-        net: {{ os_env('ADMIN_ADDRESS_POOL01', '10.70.0.0/16:24') }}
-        params:
-          ip_reserved:
-            gateway: +1
-            l2_network_device: +1
-            default_{{ HOSTNAME_CFG01 }}: +90
-            default_{{ HOSTNAME_CTL01 }}: +91
-            default_{{ HOSTNAME_CTL02 }}: +92
-            default_{{ HOSTNAME_CTL03 }}: +93
-            default_{{ HOSTNAME_CMP01 }}: +95
-            default_{{ HOSTNAME_CMP02 }}: +96
-            default_{{ HOSTNAME_GTW01 }}: +94
-            default_{{ HOSTNAME_PRX01 }}: +121
-          ip_ranges:
-            dhcp: [+90, -10]
-
-      tenant-pool01:
-        net: {{ os_env('TENANT_ADDRESS_POOL01', '10.80.0.0/16:24') }}
-        params:
-          ip_reserved:
-            gateway: +1
-            l2_network_device: +1
-            default_{{ HOSTNAME_CFG01 }}: +100
-            default_{{ HOSTNAME_CTL01 }}: +101
-            default_{{ HOSTNAME_CTL02 }}: +102
-            default_{{ HOSTNAME_CTL03 }}: +103
-            default_{{ HOSTNAME_CMP01 }}: +105
-            default_{{ HOSTNAME_CMP02 }}: +106
-            default_{{ HOSTNAME_GTW01 }}: +110
-            default_{{ HOSTNAME_PRX01 }}: +121
-          ip_ranges:
-            dhcp: [+10, -10]
-
-      external-pool01:
-        net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '10.90.0.0/16:24') }}
-        params:
-          ip_reserved:
-            gateway: +1
-            l2_network_device: +1
-            default_{{ HOSTNAME_CFG01 }}: +100
-            default_{{ HOSTNAME_CTL01 }}: +101
-            default_{{ HOSTNAME_CTL02 }}: +102
-            default_{{ HOSTNAME_CTL03 }}: +103
-            default_{{ HOSTNAME_CMP01 }}: +105
-            default_{{ HOSTNAME_CMP02 }}: +106
-            default_{{ HOSTNAME_GTW01 }}: +110
-            default_{{ HOSTNAME_PRX01 }}: +121
-          ip_ranges:
-            dhcp: [+10, -10]
-
-
-    groups:
-      - name: default
-        driver:
-          name: devops.driver.libvirt
-          params:
-            connection_string: !os_env CONNECTION_STRING, qemu:///system
-            storage_pool_name: !os_env STORAGE_POOL_NAME, default
-            stp: False
-            hpet: False
-            enable_acpi: true
-            use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
-            use_hugepages: !os_env DRIVER_USE_HUGEPAGES, false
-
-        network_pools:
-          admin: admin-pool01
-          private: private-pool01
-          tenant: tenant-pool01
-          external: external-pool01
-
-        l2_network_devices:
-          private:
-            address_pool: private-pool01
-            dhcp: false
-            forward:
-              mode: route
-
-          admin:
-            address_pool: admin-pool01
-            dhcp: true
-            forward:
-              mode: nat
-
-          tenant:
-            address_pool: tenant-pool01
-            dhcp: false
-
-          external:
-            address_pool: external-pool01
-            dhcp: true
-            forward:
-              mode: route
-
-
-        group_volumes:
-         - name: cloudimage1604    # This name is used for 'backing_store' option for node volumes.
-           source_image: !os_env IMAGE_PATH1604  # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
-           format: qcow2
-         - name: cfg01_day01_image               # Pre-configured day01 image
-           source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01', os_env('IMAGE_PATH1604')) }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
-           format: qcow2
-
-        nodes:
-          - name: {{ HOSTNAME_CFG01 }}
-            role: salt_master
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: cfg01_day01_image
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_cfg01
-
-              interfaces:
-                - label: ens3
-                  l2_network_device: admin
-                  interface_model: *interface_model
-                - label: ens4
-                  l2_network_device: private
-                  interface_model: *interface_model
-              network_config:
-                ens3:
-                  networks:
-                    - admin
-                ens4:
-                  networks:
-                    - private
-
-          - name: {{ HOSTNAME_CTL01 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 3
-              memory: !os_env SLAVE_NODE_MEMORY, 12288
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: cloudimage1604
-                  format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: &interfaces
-                - label: ens3
-                  l2_network_device: admin
-                  interface_model: *interface_model
-                - label: ens4
-                  l2_network_device: private
-                  interface_model: *interface_model
-              network_config: &network_config
-                ens3:
-                  networks:
-                    - admin
-                ens4:
-                  networks:
-                    - private
-
-          - name: {{ HOSTNAME_CTL02 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 3
-              memory: !os_env SLAVE_NODE_MEMORY, 12288
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: cloudimage1604
-                  format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: *interfaces
-              network_config: *network_config
-
-          - name: {{ HOSTNAME_CTL03 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 3
-              memory: !os_env SLAVE_NODE_MEMORY, 12288
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: cloudimage1604
-                  format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: *interfaces
-              network_config: *network_config
-
-          - name: {{ HOSTNAME_PRX01 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 1
-              memory: !os_env SLAVE_NODE_MEMORY, 8192
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: cloudimage1604
-                  format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: *interfaces
-              network_config: *network_config
-
-
-          - name: {{ HOSTNAME_CMP01 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 3
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: cloudimage1604
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-
-              interfaces: &all_interfaces
-                - label: ens3
-                  l2_network_device: admin
-                  interface_model: *interface_model
-                - label: ens4
-                  l2_network_device: private
-                  interface_model: *interface_model
-                - label: ens5
-                  l2_network_device: tenant
-                  interface_model: *interface_model
-                - label: ens6
-                  l2_network_device: external
-                  interface_model: *interface_model
-              network_config: &all_network_config
-                ens3:
-                  networks:
-                    - admin
-                ens4:
-                  networks:
-                    - private
-                ens5:
-                  networks:
-                    - tenant
-                ens6:
-                  networks:
-                    - external
-
-          - name: {{ HOSTNAME_CMP02 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 3
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: cloudimage1604
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: *all_interfaces
-              network_config: *all_network_config
-
-          - name: {{ HOSTNAME_GTW01 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 1
-              memory: !os_env SLAVE_NODE_MEMORY, 2048
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: cloudimage1604
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: *all_interfaces
-              network_config: *all_network_config
diff --git a/tcp_tests/templates/virtual-mcp11-k8s-calico-minimal/core.yaml b/tcp_tests/templates/virtual-mcp11-k8s-calico-minimal/core.yaml
deleted file mode 100644
index 717fccc..0000000
--- a/tcp_tests/templates/virtual-mcp11-k8s-calico-minimal/core.yaml
+++ /dev/null
@@ -1,43 +0,0 @@
-{% from 'virtual-mcp11-k8s-calico/underlay.yaml' import HOSTNAME_CFG01 with context %}
-
-# Install support services
-- description: Create and distribute SSL certificates for services using salt state
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' state.sls salt
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: true
-
-- description:  Install keepalived
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keepalived:cluster' state.sls keepalived -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: true
-
-- description: Install haproxy
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' state.sls haproxy
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install haproxy.service
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' service.status haproxy
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install docker
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@docker:host' state.sls docker.host
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check docker
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@docker:host' cmd.run 'docker ps'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp11-k8s-calico-minimal/k8s.yaml b/tcp_tests/templates/virtual-mcp11-k8s-calico-minimal/k8s.yaml
deleted file mode 100644
index e9c17ec..0000000
--- a/tcp_tests/templates/virtual-mcp11-k8s-calico-minimal/k8s.yaml
+++ /dev/null
@@ -1,52 +0,0 @@
-{% from 'virtual-mcp11-k8s-calico/underlay.yaml' import HOSTNAME_CFG01 with context %}
-
-- description: Install etcd
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@etcd:server' state.sls etcd.server.service
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check the etcd health
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-     -C 'I@etcd:server' cmd.run '. /var/lib/etcd/configenv && etcdctl cluster-health'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install Kubernetes and Calico
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-     -C 'I@kubernetes:master' state.sls kubernetes.master.kube-addons;
-     salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kubernetes:pool' state.sls kubernetes.pool;
-     salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kubernetes:pool' cmd.run 'calicoctl node status';
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 3, delay: 5}
-  skip_fail: false
-
-- description: Setup NAT for Calico
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@etcd:server' --subset 1 state.sls etcd.server.setup
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Run whole master to check consistency
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-     -C 'I@kubernetes:master' state.sls kubernetes exclude=kubernetes.master.setup
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: true
-
-- description: Restart kubelet
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@kubernetes:pool' service.restart kubelet
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Register addons
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@kubernetes:master' --subset 1 state.sls kubernetes.master.setup
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp11-k8s-calico-minimal/salt.yaml b/tcp_tests/templates/virtual-mcp11-k8s-calico-minimal/salt.yaml
deleted file mode 100644
index d16a126..0000000
--- a/tcp_tests/templates/virtual-mcp11-k8s-calico-minimal/salt.yaml
+++ /dev/null
@@ -1,24 +0,0 @@
-{% from 'virtual-mcp11-k8s-calico-minimal/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'virtual-mcp11-k8s-calico-minimal/underlay.yaml' import LAB_CONFIG_NAME with context %}
-{% from 'virtual-mcp11-k8s-calico-minimal/underlay.yaml' import DOMAIN_NAME with context %}
-
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
-# Other salt model repository parameters see in shared-salt.yaml
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-{{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
-
-{{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
-
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "logrotate"') }}
-
-{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
-
-{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
-
-{{ SHARED.ADJUST_K8S_OPTS() }}
-
-{{ SHARED.MACRO_GENERATE_INVENTORY() }}
-
-{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
diff --git a/tcp_tests/templates/virtual-mcp11-k8s-calico-minimal/underlay--meta-data.yaml b/tcp_tests/templates/virtual-mcp11-k8s-calico-minimal/underlay--meta-data.yaml
deleted file mode 100644
index 3699401..0000000
--- a/tcp_tests/templates/virtual-mcp11-k8s-calico-minimal/underlay--meta-data.yaml
+++ /dev/null
@@ -1,4 +0,0 @@
-| # All the data below will be stored as a string object
-  instance-id: iid-local1
-  hostname: {hostname}
-  local-hostname: {hostname}
diff --git a/tcp_tests/templates/virtual-mcp11-k8s-calico-minimal/underlay--user-data-cfg01.yaml b/tcp_tests/templates/virtual-mcp11-k8s-calico-minimal/underlay--user-data-cfg01.yaml
deleted file mode 100644
index ba76ad8..0000000
--- a/tcp_tests/templates/virtual-mcp11-k8s-calico-minimal/underlay--user-data-cfg01.yaml
+++ /dev/null
@@ -1,93 +0,0 @@
-| # All the data below will be stored as a string object
-  #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
-  ssh_pwauth: True
-  users:
-   - name: root
-     sudo: ALL=(ALL) NOPASSWD:ALL
-     shell: /bin/bash
-     ssh_authorized_keys:
-     {% for key in config.underlay.ssh_keys %}
-      - ssh-rsa {{ key['public'] }}
-     {% endfor %}
-
-  disable_root: false
-  chpasswd:
-   list: |
-    root:r00tme
-   expire: False
-
-  bootcmd:
-   # Block access to SSH while node is preparing
-   - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
-   # Enable root access
-   - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
-   - service sshd restart
-  output:
-    all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
-  runcmd:
-   # Configure dhclient
-   - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
-   - sudo resolvconf -u
-
-   # Prepare network connection
-   - sudo ifdown ens3
-   - sudo ip r d default || true  # remove existing default route to get it from dhcp
-   - sudo ifup ens3
-   #- sudo route add default gw {gateway} {interface_name}
-   - sudo ifdown ens4
-   - sudo ifup ens4
-
-   # Create swap
-   - fallocate -l 4G /swapfile
-   - chmod 600 /swapfile
-   - mkswap /swapfile
-   - swapon /swapfile
-   - echo "/swapfile   none    swap    defaults    0   0" >> /etc/fstab
-
-   ############## TCP Cloud cfg01 node ##################
-   #- sleep 120
-   - echo "Preparing base OS"
-
-   - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
-   - which wget >/dev/null || (apt-get update; apt-get install -y wget);
-
-   # Configure Ubuntu mirrors
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
-
-   - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
-   - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
-   - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2017.7 xenial main" > /etc/apt/sources.list.d/saltstack.list;
-   - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2017.7/SALTSTACK-GPG-KEY.pub | apt-key add -;
-
-   - eatmydata apt-get clean && apt-get update
-
-   # Install common packages
-   - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree
-
-   ########################################################
-   # Node is ready, allow SSH access
-   - echo "Allow SSH access ..."
-   - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
-   ########################################################
-
-  write_files:
-   - path: /etc/network/interfaces
-     content: |
-          auto ens3
-          iface ens3 inet dhcp
-          auto ens4
-          iface ens4 inet dhcp
-
-   - path: /root/.ssh/config
-     owner: root:root
-     permissions: '0600'
-     content: |
-          Host *
-            ServerAliveInterval 300
-            ServerAliveCountMax 10
-            StrictHostKeyChecking no
-            UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/virtual-mcp11-k8s-calico-minimal/underlay--user-data1604.yaml b/tcp_tests/templates/virtual-mcp11-k8s-calico-minimal/underlay--user-data1604.yaml
deleted file mode 100644
index 23b112f..0000000
--- a/tcp_tests/templates/virtual-mcp11-k8s-calico-minimal/underlay--user-data1604.yaml
+++ /dev/null
@@ -1,82 +0,0 @@
-| # All the data below will be stored as a string object
-  #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
-  ssh_pwauth: True
-  users:
-   - name: root
-     sudo: ALL=(ALL) NOPASSWD:ALL
-     shell: /bin/bash
-     ssh_authorized_keys:
-     {% for key in config.underlay.ssh_keys %}
-      - ssh-rsa {{ key['public'] }}
-     {% endfor %}
-
-  disable_root: false
-  chpasswd:
-   list: |
-    root:r00tme
-   expire: False
-
-  bootcmd:
-   # Block access to SSH while node is preparing
-   - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
-   # Enable root access
-   - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
-   - service sshd restart
-  output:
-    all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
-  runcmd:
-   - export TERM=linux
-   - export LANG=C
-   # Configure dhclient
-   - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
-   - sudo resolvconf -u
-
-   # Prepare network connection
-   - sudo ifup ens3
-   #- sudo route add default gw {gateway} {interface_name}
-   - sudo ifup ens4
-
-   # Create swap
-   - fallocate -l 4G /swapfile
-   - chmod 600 /swapfile
-   - mkswap /swapfile
-   - swapon /swapfile
-   - echo "/swapfile   none    swap    defaults   0   0" >> /etc/fstab
-
-
-   ############## TCP Cloud cfg01 node ##################
-   #- sleep 120
-   - echo "Preparing base OS"
-   - which wget >/dev/null || (apt-get update; apt-get install -y wget)
-
-   # Configure Ubuntu mirrors
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
-
-   - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
-   - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
-   - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2017.7 xenial main" > /etc/apt/sources.list.d/saltstack.list;
-   - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2017.7/SALTSTACK-GPG-KEY.pub | apt-key add -;
-
-   - apt-get clean
-   - eatmydata apt-get update && apt-get -y upgrade
-
-   # Install common packages
-   - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc
-
-   ########################################################
-   # Node is ready, allow SSH access
-   - echo "Allow SSH access ..."
-   - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
-   ########################################################
-
-  write_files:
-   - path: /etc/network/interfaces
-     content: |
-          auto ens3
-          iface ens3 inet dhcp
-          auto ens4
-          iface ens4 inet dhcp
diff --git a/tcp_tests/templates/virtual-mcp11-k8s-calico-minimal/underlay.yaml b/tcp_tests/templates/virtual-mcp11-k8s-calico-minimal/underlay.yaml
deleted file mode 100644
index 2e92e1b..0000000
--- a/tcp_tests/templates/virtual-mcp11-k8s-calico-minimal/underlay.yaml
+++ /dev/null
@@ -1,270 +0,0 @@
-# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
-{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
-
-{% import 'virtual-mcp11-k8s-calico-minimal/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
-{% import 'virtual-mcp11-k8s-calico-minimal/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
-{% import 'virtual-mcp11-k8s-calico-minimal/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
-
----
-aliases:
- - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
- - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
- - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
- - &cloudinit_user_data_1604 {{ CLOUDINIT_USER_DATA_1604 }}
-
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'virtual-mcp11-k8s-calico-minimal.local') %}
-{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME) + '.local' %}
-{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp01.' + DOMAIN_NAME) %}
-
-
-template:
-  devops_settings:
-    env_name: {{ os_env('ENV_NAME', 'virtual-mcp11-k8s-calico-minimal_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
-
-    address_pools:
-      admin-pool01:
-        net: {{ os_env('ADMIN_ADDRESS_POOL01', '10.70.0.0/16:24') }}
-        params:
-          ip_reserved:
-            gateway: +1
-            l2_network_device: +1
-            default_{{ HOSTNAME_CFG01 }}: +90
-            default_{{ HOSTNAME_CTL01 }}: +101
-            default_{{ HOSTNAME_CTL02 }}: +102
-            default_{{ HOSTNAME_CTL03 }}: +103
-            default_{{ HOSTNAME_CMP01 }}: +105
-          ip_ranges:
-            dhcp: [+90, -10]
-
-      private-pool01:
-        net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.60.0.0/16:24') }}
-        params:
-          ip_reserved:
-            gateway: +1
-            l2_network_device: +1
-            default_{{ HOSTNAME_CFG01 }}: +100
-            default_{{ HOSTNAME_CTL01 }}: +101
-            default_{{ HOSTNAME_CTL02 }}: +102
-            default_{{ HOSTNAME_CTL03 }}: +103
-            default_{{ HOSTNAME_CMP01 }}: +105
-          ip_ranges:
-            dhcp: [+90, -10]
-
-    groups:
-      - name: default
-        driver:
-          name: devops.driver.libvirt
-          params:
-            connection_string: !os_env CONNECTION_STRING, qemu:///system
-            storage_pool_name: !os_env STORAGE_POOL_NAME, default
-            stp: False
-            hpet: False
-            enable_acpi: true
-            use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
-            use_hugepages: !os_env DRIVER_USE_HUGEPAGES, false
-
-        network_pools:
-          admin: admin-pool01
-          private: private-pool01
-
-        l2_network_devices:
-          private:
-            address_pool: private-pool01
-            dhcp: true
-
-          admin:
-            address_pool: admin-pool01
-            dhcp: true
-            forward:
-              mode: nat
-
-        group_volumes:
-         - name: cloudimage1604    # This name is used for 'backing_store' option for node volumes.
-           # http://images.mirantis.com/ubuntu-16-04-x64-latest.qcow2  (preffered)
-           # or
-           # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
-           source_image: {{ os_env('MCP_IMAGE_PATH1604', os_env('IMAGE_PATH1604')) }}
-           format: qcow2
-         - name: cfg01_day01_image               # Pre-configured day01 image
-           source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01', os_env('IMAGE_PATH1604')) }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
-           format: qcow2
-
-        nodes:
-          - name: {{ HOSTNAME_CFG01 }}
-            role: salt_master
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 3072
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: cfg01_day01_image
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_cfg01
-
-              interfaces:
-                - label: ens3
-                  l2_network_device: admin
-                  interface_model: *interface_model
-                - label: ens4
-                  l2_network_device: private
-                  interface_model: *interface_model
-              network_config:
-                ens3:
-                  networks:
-                    - admin
-                ens4:
-                  networks:
-                    - private
-
-          - name: {{ HOSTNAME_CTL01 }}
-            role: k8s_controller
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: cloudimage1604
-                  format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: &interfaces
-                - label: ens3
-                  l2_network_device: admin
-                  interface_model: *interface_model
-                - label: ens4
-                  l2_network_device: private
-                  interface_model: *interface_model
-              network_config: &network_config
-                ens3:
-                  networks:
-                    - admin
-                ens4:
-                  networks:
-                    - private
-
-          - name: {{ HOSTNAME_CTL02 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: cloudimage1604
-                  format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: *interfaces
-              network_config: *network_config
-
-          - name: {{ HOSTNAME_CTL03 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: cloudimage1604
-                  format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: *interfaces
-              network_config: *network_config
-
-          - name: {{ HOSTNAME_CMP01 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 3
-              memory: !os_env SLAVE_NODE_MEMORY, 2048
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: cloudimage1604
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: &all_interfaces
-                - label: ens3
-                  l2_network_device: admin
-                  interface_model: *interface_model
-                - label: ens4
-                  l2_network_device: private
-                  interface_model: *interface_model
-              network_config: &all_network_config
-                ens3:
-                  networks:
-                    - admin
-                ens4:
-                  networks:
-                    - private
diff --git a/tcp_tests/templates/virtual-mcp11-k8s-calico/core.yaml b/tcp_tests/templates/virtual-mcp11-k8s-calico/core.yaml
deleted file mode 100644
index 1be9ba1..0000000
--- a/tcp_tests/templates/virtual-mcp11-k8s-calico/core.yaml
+++ /dev/null
@@ -1,115 +0,0 @@
-{% from 'virtual-mcp11-k8s-calico/underlay.yaml' import HOSTNAME_CFG01 with context %}
-
-# Install support services
-- description: Create and distribute SSL certificates for services using salt state
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*'
-    state.sls salt.minion.cert
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: true
-
-- description: Install docker
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@docker:host' state.sls docker.host
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 3, delay: 5}
-  skip_fail: false
-
-- description: Check docker
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@docker:host' cmd.run 'docker ps'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description:  Install keepalived on first node
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keepalived:cluster and *01*' state.sls keepalived
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: true
-
-- description:  Install keepalived on whole cluster
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keepalived:cluster' state.sls keepalived
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: true
-
-- description: Install haproxy
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' state.sls haproxy
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install haproxy.service
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' service.status haproxy
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-# Isntall SL core services
-#- description: Install glusterfs
-#  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-#    -C 'I@glusterfs:server' state.sls glusterfs.server.service
-#  node_name: {{ HOSTNAME_CFG01 }}
-#  retry: {count: 1, delay: 5}
-#  skip_fail: false
-
-#- description: Setup glusterfs on primary controller
-#  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-#    -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
-#  node_name: {{ HOSTNAME_CFG01 }}
-#  retry: {count: 2, delay: 5}
-#  skip_fail: false
-
-#- description: Check the gluster status
-#  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-#    -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
-#  node_name: {{ HOSTNAME_CFG01 }}
-#  retry: {count: 1, delay: 5}
-#  skip_fail: false
-
-#- description: Install gluserfs client
-#  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-#    -C 'I@glusterfs:client' state.sls glusterfs.client
-#  node_name: {{ HOSTNAME_CFG01 }}
-#  retry: {count: 1, delay: 5}
-#  skip_fail: false
-
-- description: Install Galera on first server
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:master' state.sls galera
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install Galera on other servers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:slave' state.sls galera -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check mysql status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install nginx
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@nginx:server' state.sls nginx
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Restart rsyslog for haproxy logs
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' service.restart rsyslog
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp11-k8s-calico/k8s.yaml b/tcp_tests/templates/virtual-mcp11-k8s-calico/k8s.yaml
deleted file mode 100644
index 04b46a3..0000000
--- a/tcp_tests/templates/virtual-mcp11-k8s-calico/k8s.yaml
+++ /dev/null
@@ -1,71 +0,0 @@
-{% from 'virtual-mcp11-k8s-calico/underlay.yaml' import HOSTNAME_CFG01 with context %}
-
-# Kubernetes
-- description: Install etcd
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-     -C 'I@etcd:server' state.sls etcd.server.service
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 3, delay: 5}
-  skip_fail: false
-
-- description: Check the etcd health
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-     -C 'I@etcd:server' cmd.run '. /var/lib/etcd/configenv && etcdctl cluster-health'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install Kubernetes addons
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-     -C 'I@kubernetes:master' state.sls kubernetes.master.kube-addons
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 3, delay: 5}
-  skip_fail: false
-
-- description: Install Kubernetes and Calico
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-     -C 'I@kubernetes:pool' state.sls kubernetes.pool
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 3, delay: 5}
-  skip_fail: false
-
-- description: Setup NAT for Calico
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@etcd:server and *01*' state.sls etcd.server.setup
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Run whole master to check consistency
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-     -C 'I@kubernetes:master' state.sls kubernetes exclude=kubernetes.master.setup
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: true
-
-- description: Register addons
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@kubernetes:master and *01*' state.sls kubernetes.master.setup
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Restart kubelet
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@kubernetes:pool' service.restart kubelet
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Renew hosts file on a whole cluster
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C '*' state.sls linux.network.host;
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Get kubeconfig
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@kubernetes:control and *01*' cmd.run 'cat /etc/kubernetes/admin-kube-config && echo "Salt command execution success"'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp11-k8s-calico/salt.yaml b/tcp_tests/templates/virtual-mcp11-k8s-calico/salt.yaml
deleted file mode 100644
index 36a0228..0000000
--- a/tcp_tests/templates/virtual-mcp11-k8s-calico/salt.yaml
+++ /dev/null
@@ -1,37 +0,0 @@
-{% from 'virtual-mcp11-k8s-calico/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'virtual-mcp11-k8s-calico/underlay.yaml' import LAB_CONFIG_NAME with context %}
-{% from 'virtual-mcp11-k8s-calico/underlay.yaml' import DOMAIN_NAME with context %}
-
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
-# Other salt model repository parameters see in shared-salt.yaml
-
-{% set ENABLE_COMPUTES_SELF_REGISTER = os_env('ENABLE_COMPUTES_SELF_REGISTER', '') %}
-# Different templates using this mechanism.
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-{{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
-
-{{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
-
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "fluentd" "logrotate"') }}
-
-{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
-
-{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
-
-{{ SHARED.ADJUST_K8S_OPTS() }}
-
-{{ SHARED.ADJUST_SL_OPTS(OVERRIDES_FILENAME='/srv/salt/reclass/classes/cluster/' + SHARED.CLUSTER_NAME + '/stacklight/server.yml') }}
-
-{%- if ENABLE_COMPUTES_SELF_REGISTER != '' %}
-{{ SHARED.REGISTER_COMPUTE_NODES() }}
-{%- endif %}
-
-{{ SHARED.MACRO_GENERATE_INVENTORY() }}
-
-{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
diff --git a/tcp_tests/templates/virtual-mcp11-k8s-calico/sl.yaml b/tcp_tests/templates/virtual-mcp11-k8s-calico/sl.yaml
deleted file mode 100644
index 70d9a3b..0000000
--- a/tcp_tests/templates/virtual-mcp11-k8s-calico/sl.yaml
+++ /dev/null
@@ -1,236 +0,0 @@
-{% from 'virtual-mcp11-k8s-calico/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'virtual-mcp11-k8s-calico/salt.yaml' import ENVIRONMENT_MODEL_INVENTORY_NAME with context %}
-
-
-{% import 'shared-sl-tests.yaml' as SHARED_SL_TESTS with context %}
-
-# Install docker swarm
-- description: Configure docker service
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls docker.host
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-- description: Install docker swarm on master node
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-- description: Send grains to the swarm slave nodes
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls salt.minion.grains
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-- description:  Update mine
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' mine.update
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-- description:  Refresh modules
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' saltutil.refresh_modules; sleep 5;
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-- description:  Rerun swarm on slaves to proper token population
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-- description:  Configure slave nodes
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:manager' state.sls docker.swarm -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-- description:  List registered Docker swarm nodes
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' cmd.run 'docker node ls'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-- description: Install keepalived on mon nodes
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'mon*' state.sls keepalived
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-- description: Check the VIP on mon nodes
-  cmd: |
-    SL_VIP=`salt-call --out=newline_values_only pillar.get _param:stacklight_monitor_address`;
-    echo "_param:stacklight_monitor_address (vip): ${SL_VIP}";
-    salt --hard-crash --state-output=mixed --state-verbose=False -C 'mon*' cmd.run "ip a | grep ${SL_VIP}" | grep -B1 ${SL_VIP}
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-# Install slv2 infra
-# Install MongoDB for alerta
-- description: Install Mongo if target matches
-  cmd: |
-    if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' match.pillar 'mongodb:server' ; then
-      salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' state.sls mongodb.server
-    fi
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-# Create MongoDB cluster
-- description: Install Mongo if target matches
-  cmd: |
-    if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' match.pillar 'mongodb:server' ; then
-      salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' state.sls mongodb.cluster
-    fi
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 5, delay: 20}
-  skip_fail: false
-
-- description: Install telegraf
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@telegraf:agent or I@telegraf:remote_agent' state.sls telegraf
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 10}
-  skip_fail: false
-
-- description: Configure Prometheus exporters, if pillar 'prometheus:exporters' exists on any server
-  cmd: |
-    if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' match.pillar 'prometheus:exporters' ; then
-      salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' state.sls prometheus
-    fi
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-- description: Install elasticsearch server
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:server' state.sls elasticsearch.server -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-- description: Install kibana server
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:server' state.sls kibana.server -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-- description: Install elasticsearch client
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:client' state.sls elasticsearch.client
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 30}
-  skip_fail: false
-
-- description: Install kibana client
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:client' state.sls kibana.client
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-- description: Check influix db
-  cmd: |
-    INFLUXDB_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' test.ping 1>/dev/null 2>&1 && echo true`;
-    echo "Influxdb service presence: ${INFLUXDB_SERVICE}";
-    if [[ "$INFLUXDB_SERVICE" == "true" ]]; then
-        salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' state.sls influxdb
-    fi
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: true
-
-# Install Prometheus LTS(optional if set in model)
-- description: Prometheus LTS(optional if set in model)
-  cmd: |
-    PROMETHEUS_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:relay' test.ping 1>/dev/null 2>&1 && echo true`;
-    echo "PROMETHEUS rely service presence: ${PROMETHEUS_SERVICE}";
-    if [[ "$PROMETHEUS_SERVICE" == "true" ]]; then
-        salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:relay' state.sls prometheus
-    fi
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: true
-
-# Install service for the log collection
-- description: Configure fluentd
-  cmd: |
-    FLUENTD_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@fluentd:agent' test.ping 1>/dev/null 2>&1 && echo true`;
-    echo "Fluentd service presence: ${FLUENTD_SERVICE}";
-    if [[ "$FLUENTD_SERVICE" == "true" ]]; then
-        salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@fluentd:agent' state.sls fluentd
-    else
-        salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:log_collector' state.sls heka.log_collector
-    fi
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-#Install heka ceilometer collector
-- description: Install heka ceilometer if they exists
-  cmd: |
-    CEILO=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:ceilometer_collector:enabled' test.ping 1>/dev/null 2>&1 && echo true`;
-    echo "Ceilometer service presence: ${CEILO}";
-    if [[ "$CEILO" == "true" ]]; then
-        salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:ceilometer_collector:enabled' state.sls heka.ceilometer_collector;
-        salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:ceilometer_collector:enabled' service.restart ceilometer_collector
-    fi
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-# Collect grains needed to configure the services
-
-- description: Get grains
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' state.sls salt.minion.grains
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-- description: Sync modules
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' saltutil.refresh_modules
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-- description: Update mine
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' mine.update; sleep 5;
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 5, delay: 15}
-  skip_fail: false
-
-# Configure the services running in Docker Swarm
-- description: Configure prometheus in docker swarm
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:server' state.sls prometheus
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-#Launch containers
-- description: launch prometheus containers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master and I@prometheus:server' state.sls docker.client
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 10}
-  skip_fail: false
-
-- description: Check docker ps
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:server' cmd.run "docker ps"
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 10}
-  skip_fail: false
-
-- description: Configure Grafana dashboards and datasources
-  cmd: sleep 30;  salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@grafana:client' state.sls grafana.client
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 10}
-  skip_fail: false
-
-- description: Run salt minion to create cert files
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False "*" state.sls salt.minion
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-
-{{  SHARED_SL_TESTS.MACRO_CLONE_SL_TESTS() }}
-{{  SHARED_SL_TESTS.MACRO_CONFIGURE_TESTS() }}
diff --git a/tcp_tests/templates/virtual-mcp11-k8s-calico/underlay--meta-data.yaml b/tcp_tests/templates/virtual-mcp11-k8s-calico/underlay--meta-data.yaml
deleted file mode 100644
index 3699401..0000000
--- a/tcp_tests/templates/virtual-mcp11-k8s-calico/underlay--meta-data.yaml
+++ /dev/null
@@ -1,4 +0,0 @@
-| # All the data below will be stored as a string object
-  instance-id: iid-local1
-  hostname: {hostname}
-  local-hostname: {hostname}
diff --git a/tcp_tests/templates/virtual-mcp11-k8s-calico/underlay--user-data-cfg01.yaml b/tcp_tests/templates/virtual-mcp11-k8s-calico/underlay--user-data-cfg01.yaml
deleted file mode 100644
index 504fd80..0000000
--- a/tcp_tests/templates/virtual-mcp11-k8s-calico/underlay--user-data-cfg01.yaml
+++ /dev/null
@@ -1,65 +0,0 @@
-| # All the data below will be stored as a string object
-  #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
-  ssh_pwauth: True
-  users:
-   - name: root
-     sudo: ALL=(ALL) NOPASSWD:ALL
-     shell: /bin/bash
-     ssh_authorized_keys:
-     {% for key in config.underlay.ssh_keys %}
-      - ssh-rsa {{ key['public'] }}
-     {% endfor %}
-
-  disable_root: false
-  chpasswd:
-   list: |
-    root:r00tme
-   expire: False
-
-  bootcmd:
-   # Enable root access
-   - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
-   - service sshd restart
-  output:
-    all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
-  runcmd:
-   # Configure dhclient
-   - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
-   - sudo resolvconf -u
-
-   # Prepare network connection
-   - sudo ifdown ens3
-   - sudo ip r d default || true  # remove existing default route to get it from dhcp
-   - sudo ifup ens3
-   #- sudo route add default gw {gateway} {interface_name}
-   - sudo ifdown ens4
-   - sudo ifup ens4
-
-   # Create swap
-   - fallocate -l 4G /swapfile
-   - chmod 600 /swapfile
-   - mkswap /swapfile
-   - swapon /swapfile
-   - echo "/swapfile   none    swap    defaults    0   0" >> /etc/fstab
-
-   - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
-
-  write_files:
-   - path: /etc/network/interfaces
-     content: |
-          auto ens3
-          iface ens3 inet dhcp
-          auto ens4
-          iface ens4 inet dhcp
-
-   - path: /root/.ssh/config
-     owner: root:root
-     permissions: '0600'
-     content: |
-          Host *
-            ServerAliveInterval 300
-            ServerAliveCountMax 10
-            StrictHostKeyChecking no
-            UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/virtual-mcp11-k8s-calico/underlay--user-data1604.yaml b/tcp_tests/templates/virtual-mcp11-k8s-calico/underlay--user-data1604.yaml
deleted file mode 100644
index 6fd3272..0000000
--- a/tcp_tests/templates/virtual-mcp11-k8s-calico/underlay--user-data1604.yaml
+++ /dev/null
@@ -1,52 +0,0 @@
-| # All the data below will be stored as a string object
-  #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
-  ssh_pwauth: True
-  users:
-   - name: root
-     sudo: ALL=(ALL) NOPASSWD:ALL
-     shell: /bin/bash
-     ssh_authorized_keys:
-     {% for key in config.underlay.ssh_keys %}
-      - ssh-rsa {{ key['public'] }}
-     {% endfor %}
-
-  disable_root: false
-  chpasswd:
-   list: |
-    root:r00tme
-   expire: False
-
-  bootcmd:
-   # Enable root access
-   - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
-   - service sshd restart
-  output:
-    all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
-  runcmd:
-   - export TERM=linux
-   - export LANG=C
-   # Configure dhclient
-   - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
-   - sudo resolvconf -u
-
-   # Prepare network connection
-   - sudo ifup ens3
-   #- sudo route add default gw {gateway} {interface_name}
-   - sudo ifup ens4
-
-   # Create swap
-   - fallocate -l 4G /swapfile
-   - chmod 600 /swapfile
-   - mkswap /swapfile
-   - swapon /swapfile
-   - echo "/swapfile   none    swap    defaults   0   0" >> /etc/fstab
-
-  write_files:
-   - path: /etc/network/interfaces
-     content: |
-          auto ens3
-          iface ens3 inet dhcp
-          auto ens4
-          iface ens4 inet dhcp
diff --git a/tcp_tests/templates/virtual-mcp11-k8s-calico/underlay.yaml b/tcp_tests/templates/virtual-mcp11-k8s-calico/underlay.yaml
deleted file mode 100644
index 1e3df5b..0000000
--- a/tcp_tests/templates/virtual-mcp11-k8s-calico/underlay.yaml
+++ /dev/null
@@ -1,414 +0,0 @@
-# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
-{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
-
-{% import 'virtual-mcp11-k8s-calico/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
-{% import 'virtual-mcp11-k8s-calico/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
-{% import 'virtual-mcp11-k8s-calico/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
-
----
-aliases:
- - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
- - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
- - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
- - &cloudinit_user_data_1604 {{ CLOUDINIT_USER_DATA_1604 }}
-
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'virtual-mcp11-k8s-calico.local') %}
-{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME) + '.local' %}
-{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON01 = os_env('HOSTNAME_MON01', 'mon01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON02 = os_env('HOSTNAME_MON02', 'mon02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON03 = os_env('HOSTNAME_MON03', 'mon03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
-
-template:
-  devops_settings:
-    env_name: {{ os_env('ENV_NAME', 'virtual-mcp11-k8s-calico_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
-
-    address_pools:
-      admin-pool01:
-        net: {{ os_env('ADMIN_ADDRESS_POOL01', '10.70.0.0/16:24') }}
-        params:
-          ip_reserved:
-            gateway: +1
-            l2_network_device: +1
-            default_{{ HOSTNAME_CFG01 }}: +90
-            default_{{ HOSTNAME_CTL01 }}: +101
-            default_{{ HOSTNAME_CTL02 }}: +102
-            default_{{ HOSTNAME_CTL03 }}: +103
-            default_{{ HOSTNAME_CMP01 }}: +105
-            default_{{ HOSTNAME_CMP02 }}: +106
-            default_{{ HOSTNAME_MON01 }}: +107
-            default_{{ HOSTNAME_MON02 }}: +108
-            default_{{ HOSTNAME_MON03 }}: +109
-            default_{{ HOSTNAME_PRX01 }}: +121
-          ip_ranges:
-            dhcp: [+90, -10]
-
-      private-pool01:
-        net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.60.0.0/16:24') }}
-        params:
-          ip_reserved:
-            gateway: +1
-            l2_network_device: +1
-            default_{{ HOSTNAME_CFG01 }}: +100
-            default_{{ HOSTNAME_CTL01 }}: +101
-            default_{{ HOSTNAME_CTL02 }}: +102
-            default_{{ HOSTNAME_CTL03 }}: +103
-            default_{{ HOSTNAME_CMP01 }}: +105
-            default_{{ HOSTNAME_CMP02 }}: +106
-            default_{{ HOSTNAME_MON01 }}: +107
-            default_{{ HOSTNAME_MON02 }}: +108
-            default_{{ HOSTNAME_MON03 }}: +109
-            default_{{ HOSTNAME_PRX01 }}: +121
-          ip_ranges:
-            dhcp: [+90, -10]
-
-    groups:
-      - name: default
-        driver:
-          name: devops.driver.libvirt
-          params:
-            connection_string: !os_env CONNECTION_STRING, qemu:///system
-            storage_pool_name: !os_env STORAGE_POOL_NAME, default
-            stp: False
-            hpet: False
-            enable_acpi: true
-            use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
-            use_hugepages: !os_env DRIVER_USE_HUGEPAGES, false
-
-        network_pools:
-          admin: admin-pool01
-          private: private-pool01
-
-        l2_network_devices:
-          private:
-            address_pool: private-pool01
-            dhcp: true
-
-          admin:
-            address_pool: admin-pool01
-            dhcp: true
-            forward:
-              mode: nat
-
-        group_volumes:
-         - name: cloudimage1604    # This name is used for 'backing_store' option for node volumes.
-           # http://images.mirantis.com/ubuntu-16-04-x64-latest.qcow2  (preffered)
-           # or
-           # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
-           source_image: {{ os_env('MCP_IMAGE_PATH1604', os_env('IMAGE_PATH1604')) }}
-           format: qcow2
-         - name: cfg01_day01_image               # Pre-configured day01 image
-           source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01', os_env('IMAGE_PATH1604')) }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
-           format: qcow2
-
-        nodes:
-          - name: {{ HOSTNAME_CFG01 }}
-            role: salt_master
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 6144
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: cfg01_day01_image
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_cfg01
-
-              interfaces:
-                - label: ens3
-                  l2_network_device: admin
-                  interface_model: *interface_model
-                - label: ens4
-                  l2_network_device: private
-                  interface_model: *interface_model
-              network_config:
-                ens3:
-                  networks:
-                    - admin
-                ens4:
-                  networks:
-                    - private
-
-          - name: {{ HOSTNAME_CTL01 }}
-            role: k8s_controller
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 2048
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: cloudimage1604
-                  format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: &interfaces
-                - label: ens3
-                  l2_network_device: admin
-                  interface_model: *interface_model
-                - label: ens4
-                  l2_network_device: private
-                  interface_model: *interface_model
-              network_config: &network_config
-                ens3:
-                  networks:
-                    - admin
-                ens4:
-                  networks:
-                    - private
-
-          - name: {{ HOSTNAME_CTL02 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 2048
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: cloudimage1604
-                  format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: *interfaces
-              network_config: *network_config
-
-          - name: {{ HOSTNAME_CTL03 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 2048
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: cloudimage1604
-                  format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: *interfaces
-              network_config: *network_config
-
-          - name: {{ HOSTNAME_CMP01 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 3
-              memory: !os_env SLAVE_NODE_MEMORY, 2048
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: cloudimage1604
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: &all_interfaces
-                - label: ens3
-                  l2_network_device: admin
-                  interface_model: *interface_model
-                - label: ens4
-                  l2_network_device: private
-                  interface_model: *interface_model
-              network_config: &all_network_config
-                ens3:
-                  networks:
-                    - admin
-                ens4:
-                  networks:
-                    - private
-
-          - name: {{ HOSTNAME_CMP02 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 3
-              memory: !os_env SLAVE_NODE_MEMORY, 2048
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: cloudimage1604
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: *interfaces
-              network_config: *network_config
-
-          - name: {{ HOSTNAME_MON01 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 3
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: cloudimage1604
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: *interfaces
-              network_config: *network_config
-
-          - name: {{ HOSTNAME_MON02 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 3
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: cloudimage1604
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: *interfaces
-              network_config: *network_config
-
-          - name: {{ HOSTNAME_MON03 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 3
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: cloudimage1604
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: *interfaces
-              network_config: *network_config
-
-          - name: {{ HOSTNAME_PRX01 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 1
-              memory: !os_env SLAVE_NODE_MEMORY, 1024
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: cloudimage1604
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: *interfaces
-              network_config: *network_config
diff --git a/tcp_tests/templates/virtual-mcp11-k8s-contrail/core.yaml b/tcp_tests/templates/virtual-mcp11-k8s-contrail/core.yaml
deleted file mode 100644
index 840134d..0000000
--- a/tcp_tests/templates/virtual-mcp11-k8s-contrail/core.yaml
+++ /dev/null
@@ -1,130 +0,0 @@
-{% from 'virtual-mcp11-k8s-contrail/underlay.yaml' import HOSTNAME_CFG01 with context %}
-
-# Install support services
-- description: Create and distribute SSL certificates for services using salt state
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' state.sls salt
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: true
-
-- description: Install docker
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@docker:host' state.sls docker.host
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check docker
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@docker:host' cmd.run 'docker ps'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install keepalived on primary controller
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keepalived:cluster and *01*' state.sls keepalived
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: true
-
-- description: Install keepalived on all controllers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keepalived:cluster' state.sls keepalived
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: true
-
-- description: Install RabbitMQ
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@rabbitmq:server' state.sls rabbitmq
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 10}
-  skip_fail: false
-
-- description: Check RabbitMQ
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@rabbitmq:server' cmd.run "rabbitmqctl cluster_status"
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-- description: Install haproxy
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' state.sls haproxy
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check haproxy service
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' service.status haproxy
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-#- description: Install glusterfs
-#  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-#    -C 'I@glusterfs:server' state.sls glusterfs.server.service
-#  node_name: {{ HOSTNAME_CFG01 }}
-#  retry: {count: 1, delay: 5}
-#  skip_fail: false
-
-#- description: Setup glusterfs on primary controller
-#  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-#    -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
-#  node_name: {{ HOSTNAME_CFG01 }}
-#  retry: {count: 2, delay: 5}
-#  skip_fail: false
-
-#- description: Check the gluster status
-#  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-#    -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
-#  node_name: {{ HOSTNAME_CFG01 }}
-#  retry: {count: 1, delay: 5}
-#  skip_fail: false
-
-# Isntall SL core services
-
-#- description: Install gluserfs client
-#  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-#    -C 'I@glusterfs:client' state.sls glusterfs.client
-#  node_name: {{ HOSTNAME_CFG01 }}
-#  retry: {count: 1, delay: 5}
-#  skip_fail: false
-
-- description: Install Galera on first server
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:master' state.sls galera
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install Galera on other servers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:slave' state.sls galera -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check mysql status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install nginx
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@nginx:server' state.sls nginx
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Restart rsyslog
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' service.restart rsyslog
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
diff --git a/tcp_tests/templates/virtual-mcp11-k8s-contrail/juniper.conf b/tcp_tests/templates/virtual-mcp11-k8s-contrail/juniper.conf
deleted file mode 100644
index 398a257..0000000
--- a/tcp_tests/templates/virtual-mcp11-k8s-contrail/juniper.conf
+++ /dev/null
@@ -1,110 +0,0 @@
-## Last commit: 2017-05-18 08:39:52 UTC by root
-version 12.1X46-D20.5;
-system {
-    host-name vsrx1;
-    root-authentication {
-        encrypted-password "$1$gpbfk/Jr$lF2foqHYBd/Sp56dlmkXH1"; ## SECRET-DATA
-    }
-    name-server {
-        8.8.8.8;
-        8.8.4.4;
-    }
-    services {
-        ssh;
-        web-management {
-            http {
-                interface ge-0/0/0.0;
-            }
-        }
-    }
-    syslog {
-        file messages {
-            any any;
-        }
-    }
-    license {
-        autoupdate {
-            url https://ae1.juniper.net/junos/key_retrieval;
-        }
-    }
-    ntp {
-        peer 46.243.48.4;
-        peer 147.251.48.140;
-        peer 46.243.48.88;
-    }
-}
-interfaces {
-    ge-0/0/0 {
-        unit 0 {
-            family inet {
-                address 172.16.10.90/24;
-            }
-        }
-    }
-    ge-0/0/1 {
-        unit 0 {
-            family inet {
-                address 192.168.10.90/24;
-            }
-        }
-    }
-    ge-0/0/2 {
-        unit 0 {
-            family inet {
-                address 10.70.0.91/24;
-            }
-        }
-    }
-}
-routing-options {
-    route-distinguisher-id 172.16.10.90;
-    autonomous-system 64512;
-    dynamic-tunnels {
-        dynamic_overlay_tunnels {
-            source-address 172.16.10.90;
-            gre;
-            destination-networks {
-                172.16.10.0/24;
-            }
-        }
-    }
-}
-protocols {
-    mpls {
-        interface all;
-    }
-    bgp {
-        group Contrail_Controller {
-            type internal;
-            local-address 172.16.10.90;
-            keep all;
-            family inet-vpn {
-                unicast;
-            }
-            allow 172.16.10.0/24;
-        }
-    }
-}
-security {
-    forwarding-options {
-        family {
-            mpls {
-                mode packet-based;
-            }
-        }
-    }
-}
-routing-instances {
-    public {
-        instance-type vrf;
-        interface ge-0/0/1.0;
-        vrf-target target:64512:10000;
-        vrf-table-label;
-        routing-options {
-            static {
-                route 192.168.10.0/24 discard;
-                route 0.0.0.0/0 next-hop 192.168.10.1;
-            }
-        }
-    }
-}
diff --git a/tcp_tests/templates/virtual-mcp11-k8s-contrail/juniper.conf.template b/tcp_tests/templates/virtual-mcp11-k8s-contrail/juniper.conf.template
deleted file mode 100644
index e7eed4a..0000000
--- a/tcp_tests/templates/virtual-mcp11-k8s-contrail/juniper.conf.template
+++ /dev/null
@@ -1,110 +0,0 @@
-## Last commit: 2017-05-18 08:39:52 UTC by root
-version 12.1X46-D20.5;
-system {
-    host-name vsrx1;
-    root-authentication {
-        encrypted-password "$1$gpbfk/Jr$lF2foqHYBd/Sp56dlmkXH1"; ## SECRET-DATA
-    }
-    name-server {
-        8.8.8.8;
-        8.8.4.4;
-    }
-    services {
-        ssh;
-        web-management {
-            http {
-                interface ge-0/0/0.0;
-            }
-        }
-    }
-    syslog {
-        file messages {
-            any any;
-        }
-    }
-    license {
-        autoupdate {
-            url https://ae1.juniper.net/junos/key_retrieval;
-        }
-    }
-    ntp {
-        peer 46.243.48.4;
-        peer 147.251.48.140;
-        peer 46.243.48.88;
-    }
-}
-interfaces {
-    ge-0/0/0 {
-        unit 0 {
-            family inet {
-                address {{ private_address }}/24;
-            }
-        }
-    }
-    ge-0/0/1 {
-        unit 0 {
-            family inet {
-                address {{ public_address }}/24;
-            }
-        }
-    }
-    ge-0/0/2 {
-        unit 0 {
-            family inet {
-                address {{ admin_address }};
-            }
-        }
-    }
-}
-routing-options {
-    route-distinguisher-id {{ private_address }};
-    autonomous-system 64512;
-    dynamic-tunnels {
-        dynamic_overlay_tunnels {
-            source-address {{ private_address }};
-            gre;
-            destination-networks {
-                {{ private_network }}/24;
-            }
-        }
-    }
-}
-protocols {
-    mpls {
-        interface all;
-    }
-    bgp {
-        group Contrail_Controller {
-            type internal;
-            local-address 172.16.10.90;
-            keep all;
-            family inet-vpn {
-                unicast;
-            }
-            allow {{ private_network }}/24;
-        }
-    }
-}
-security {
-    forwarding-options {
-        family {
-            mpls {
-                mode packet-based;
-            }
-        }
-    }
-}
-routing-instances {
-    public {
-        instance-type vrf;
-        interface ge-0/0/1.0;
-        vrf-target target:64512:10000;
-        vrf-table-label;
-        routing-options {
-            static {
-                route {{ public_network }} discard;
-                route 0.0.0.0/0 next-hop {{ public_network_gateway }};
-            }
-        }
-    }
-}
diff --git a/tcp_tests/templates/virtual-mcp11-k8s-contrail/k8s.yaml b/tcp_tests/templates/virtual-mcp11-k8s-contrail/k8s.yaml
deleted file mode 100644
index 5b35289..0000000
--- a/tcp_tests/templates/virtual-mcp11-k8s-contrail/k8s.yaml
+++ /dev/null
@@ -1,137 +0,0 @@
-{% from 'virtual-mcp11-k8s-contrail/underlay.yaml' import HOSTNAME_CFG01 with context %}
-
-- description: Install etcd
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@etcd:server' state.sls etcd.server.service
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check the etcd health
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-     -C 'I@etcd:server' cmd.run '. /var/lib/etcd/configenv && etcdctl cluster-health'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-  # Opencontrail Control Plane
-
-- description: Install Opencontrail db on ctl01
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@opencontrail:database and *01*' state.sls opencontrail.database
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 20}
-  skip_fail: false
-
-- description: Install Opencontrail db on all nodes
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@opencontrail:database' state.sls opencontrail.database
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 20}
-  skip_fail: false
-
-- description: Install Opencontrail control on ctl01
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@opencontrail:control and *01*' state.sls opencontrail exclude=opencontrail.client
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install Opencontrail control on all nodes
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@opencontrail:control' state.sls opencontrail exclude=opencontrail.client
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install Opencontrail on collector
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@opencontrail:collector' state.sls opencontrail exclude=opencontrail.client
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-# OpenContrail vrouters
-- description: Install Opencontrail client
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@opencontrail:database:id:1' state.sls opencontrail.client
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install Opencontrail on computes
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@opencontrail:compute' state.sls opencontrail exclude=opencontrail.client
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 5}
-  skip_fail: false
-
-- description: Wake up vhost0
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@nova:compute' cmd.run 'exec 0>&-; exec 1>&-; exec 2>&-;
-    nohup bash -c "ip link | grep vhost && echo no_reboot || sleep 5 && reboot & "' && sleep 30
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Wait for salt-minions wake up after restart
-  cmd: salt --timeout=15 --hard-crash --state-output=mixed --state-verbose=False '*' test.ping
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 25, delay: 30}
-  skip_fail: false
-
-- description: Install Opencontrail client on computes
-  cmd: sleep 15 && salt --timeout=60 --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@opencontrail:compute' state.sls 'opencontrail.client'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install Opencontrail on computes #2
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@opencontrail:compute' state.sls opencontrail
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 5}
-  skip_fail: false
-
-# Kubernetes
-- description: Install Kubernetes Addons
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@kubernetes:master' state.sls kubernetes.master.kube-addons
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: true
-
-- description: Install Kubernetes components
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@kubernetes:pool' state.sls kubernetes.pool
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 5, delay: 60}
-  skip_fail: false
-
-- description: Run Kubernetes master without setup
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-     -C 'I@kubernetes:master' state.sls kubernetes exclude=kubernetes.master.setup
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 3, delay: 5}
-  skip_fail: true
-
-- description: Run Kubernetes master setup
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-     -C 'I@kubernetes:master and *01*' state.sls kubernetes.master.setup
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: true
-
-- description: Restart Kubelet
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@kubernetes:pool' service.restart 'kubelet'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: true
-
-- description: Renew hosts file on a whole cluster
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C '*' state.sls linux.network.host;
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp11-k8s-contrail/salt.yaml b/tcp_tests/templates/virtual-mcp11-k8s-contrail/salt.yaml
deleted file mode 100644
index d0844bc..0000000
--- a/tcp_tests/templates/virtual-mcp11-k8s-contrail/salt.yaml
+++ /dev/null
@@ -1,38 +0,0 @@
-{% from 'virtual-mcp11-k8s-contrail/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'virtual-mcp11-k8s-contrail/underlay.yaml' import LAB_CONFIG_NAME with context %}
-{% from 'virtual-mcp11-k8s-contrail/underlay.yaml' import DOMAIN_NAME with context %}
-
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
-# Other salt model repository parameters see in shared-salt.yaml
-
-
-{% set ENABLE_COMPUTES_SELF_REGISTER = os_env('ENABLE_COMPUTES_SELF_REGISTER', '') %}
-# Different templates using this mechanism.
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-{{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
-
-{{ SHARED.MACRO_CLONE_RECLASS_MODELS(IS_CONTRAIL_LAB=true) }}
-
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "fluentd" "logrotate"') }}
-
-{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
-
-{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
-
-{{ SHARED.ADJUST_K8S_OPTS() }}
-
-{{ SHARED.ADJUST_SL_OPTS(OVERRIDES_FILENAME='/srv/salt/reclass/classes/cluster/' + SHARED.CLUSTER_NAME + '/stacklight/server.yml') }}
-
-{%- if ENABLE_COMPUTES_SELF_REGISTER != '' %}
-{{ SHARED.REGISTER_COMPUTE_NODES() }}
-{%- endif %}
-
-{{ SHARED.MACRO_GENERATE_INVENTORY() }}
-
-{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
diff --git a/tcp_tests/templates/virtual-mcp11-k8s-contrail/sl.yaml b/tcp_tests/templates/virtual-mcp11-k8s-contrail/sl.yaml
deleted file mode 100644
index a5b37a2..0000000
--- a/tcp_tests/templates/virtual-mcp11-k8s-contrail/sl.yaml
+++ /dev/null
@@ -1,249 +0,0 @@
-{% from 'virtual-mcp11-k8s-contrail/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'virtual-mcp11-k8s-contrail/salt.yaml' import ENVIRONMENT_MODEL_INVENTORY_NAME with context %}
-
-{% import 'shared-sl-tests.yaml' as SHARED_SL_TESTS with context %}
-
-# Install docker swarm
-- description: Configure docker service
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls docker.host
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-- description: Install docker swarm on master node
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-- description: Send grains to the swarm slave nodes
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls salt.minion.grains
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-- description:  Update mine
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' mine.update
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-- description:  Refresh modules
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' saltutil.refresh_modules; sleep 5;
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-- description:  Rerun swarm on slaves to proper token population
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-- description:  Configure slave nodes
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:manager' state.sls docker.swarm -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-- description:  List registered Docker swarm nodes
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' cmd.run 'docker node ls'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-- description: Install keepalived on mon nodes
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'mon*' state.sls keepalived
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-- description: Check the VIP on mon nodes
-  cmd: |
-    SL_VIP=`salt-call --out=newline_values_only pillar.get _param:stacklight_monitor_address`;
-    echo "_param:stacklight_monitor_address (vip): ${SL_VIP}";
-    salt --hard-crash --state-output=mixed --state-verbose=False -C 'mon*' cmd.run "ip a | grep ${SL_VIP}" | grep -B1 ${SL_VIP}
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-# Install slv2 infra
-# Install MongoDB for alerta
-- description: Install Mongo if target matches
-  cmd: |
-    if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' match.pillar 'mongodb:server' ; then
-      salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' state.sls mongodb.server
-    fi
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-# Create MongoDB cluster
-- description: Install Mongo if target matches
-  cmd: |
-    if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' match.pillar 'mongodb:server' ; then
-      salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' state.sls mongodb.cluster
-    fi
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 5, delay: 20}
-  skip_fail: false
-
-- description: Install telegraf
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@telegraf:agent or I@telegraf:remote_agent' state.sls telegraf
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 10}
-  skip_fail: false
-
-- description: Configure Prometheus exporters, if pillar 'prometheus:exporters' exists on any server
-  cmd: |
-    if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' match.pillar 'prometheus:exporters' ; then
-      salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' state.sls prometheus
-    fi
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-- description: Install elasticsearch server
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:server' state.sls elasticsearch.server -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-- description: Install kibana server
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:server' state.sls kibana.server -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-- description: Install elasticsearch client
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:client' state.sls elasticsearch.client
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 30}
-  skip_fail: false
-
-- description: Install kibana client
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:client' state.sls kibana.client
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-- description: Check influix db
-  cmd: |
-    INFLUXDB_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' test.ping 1>/dev/null 2>&1 && echo true`;
-    echo "Influxdb service presence: ${INFLUXDB_SERVICE}";
-    if [[ "$INFLUXDB_SERVICE" == "true" ]]; then
-        salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' state.sls influxdb
-    fi
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: true
-
-# Install Prometheus LTS(optional if set in model)
-- description: Prometheus LTS(optional if set in model)
-  cmd: |
-    PROMETHEUS_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:relay' test.ping 1>/dev/null 2>&1 && echo true`;
-    echo "PROMETHEUS rely service presence: ${PROMETHEUS_SERVICE}";
-    if [[ "$PROMETHEUS_SERVICE" == "true" ]]; then
-        salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:relay' state.sls prometheus
-    fi
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: true
-
-# Install service for the log collection
-- description: Configure fluentd
-  cmd: |
-    FLUENTD_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@fluentd:agent' test.ping 1>/dev/null 2>&1 && echo true`;
-    echo "Fluentd service presence: ${FLUENTD_SERVICE}";
-    if [[ "$FLUENTD_SERVICE" == "true" ]]; then
-        salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@fluentd:agent' state.sls fluentd
-    else
-        salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:log_collector' state.sls heka.log_collector
-    fi
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-#Install heka ceilometer collector
-- description: Install heka ceilometer if they exists
-  cmd: |
-    CEILO=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:ceilometer_collector:enabled' test.ping 1>/dev/null 2>&1 && echo true`;
-    echo "Ceilometer service presence: ${CEILO}";
-    if [[ "$CEILO" == "true" ]]; then
-        salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:ceilometer_collector:enabled' state.sls heka.ceilometer_collector;
-        salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:ceilometer_collector:enabled' service.restart ceilometer_collector
-    fi
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-# Collect grains needed to configure the services
-
-- description: Get grains
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' state.sls salt.minion.grains
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-- description: Sync modules
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' saltutil.refresh_modules
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-- description: Update mine
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' mine.update; sleep 5;
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 5, delay: 15}
-  skip_fail: false
-
-# Configure the services running in Docker Swarm
-- description: Configure prometheus in docker swarm
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:server' state.sls prometheus
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-#Launch containers
-- description: launch prometheus containers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master and I@prometheus:server' state.sls docker.client
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 10}
-  skip_fail: false
-
-- description: Check docker ps
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:server' cmd.run "docker ps"
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 10}
-  skip_fail: false
-
-###
-# From pipeline-library:
-# if (!common.checkContains('STACK_INSTALL', 'k8s')) {
-#        salt.enforceState(master, 'I@docker:swarm and I@prometheus:server', 'heka.remote_collector', true, false)
-#    }
-
-#- description: Configure Remote Collector in Docker Swarm for Openstack deployments
-#  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:server' state.sls heka.remote_collector
-#  node_name: {{ HOSTNAME_CFG01 }}
-#  retry: {count: 1, delay: 10}
-#  skip_fail: false
-###
-
-
-- description: Configure Grafana dashboards and datasources
-  cmd: sleep 30;  salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@grafana:client' state.sls grafana.client
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 10}
-  skip_fail: false
-
-- description: Run salt minion to create cert files
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False "*" state.sls salt.minion
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-
-{{  SHARED_SL_TESTS.MACRO_CLONE_SL_TESTS() }}
-{{  SHARED_SL_TESTS.MACRO_CONFIGURE_TESTS() }}
\ No newline at end of file
diff --git a/tcp_tests/templates/virtual-mcp11-k8s-contrail/underlay--meta-data.yaml b/tcp_tests/templates/virtual-mcp11-k8s-contrail/underlay--meta-data.yaml
deleted file mode 100644
index 3699401..0000000
--- a/tcp_tests/templates/virtual-mcp11-k8s-contrail/underlay--meta-data.yaml
+++ /dev/null
@@ -1,4 +0,0 @@
-| # All the data below will be stored as a string object
-  instance-id: iid-local1
-  hostname: {hostname}
-  local-hostname: {hostname}
diff --git a/tcp_tests/templates/virtual-mcp11-k8s-contrail/underlay--user-data-cfg01.yaml b/tcp_tests/templates/virtual-mcp11-k8s-contrail/underlay--user-data-cfg01.yaml
deleted file mode 100644
index 4f140a0..0000000
--- a/tcp_tests/templates/virtual-mcp11-k8s-contrail/underlay--user-data-cfg01.yaml
+++ /dev/null
@@ -1,69 +0,0 @@
-| # All the data below will be stored as a string object
-  #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
-  ssh_pwauth: True
-  users:
-   - name: root
-     sudo: ALL=(ALL) NOPASSWD:ALL
-     shell: /bin/bash
-     ssh_authorized_keys:
-     {% for key in config.underlay.ssh_keys %}
-      - ssh-rsa {{ key['public'] }}
-     {% endfor %}
-
-  disable_root: false
-  chpasswd:
-   list: |
-    root:r00tme
-   expire: False
-
-  bootcmd:
-   # Enable root access
-   - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
-   - service sshd restart
-  output:
-    all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
-  runcmd:
-   # Configure dhclient
-   - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
-   - sudo resolvconf -u
-
-   # Prepare network connection
-   - sudo ifdown ens3
-   - sudo ip r d default || true  # remove existing default route to get it from dhcp
-   - sudo ifup ens3
-   #- sudo route add default gw {gateway} {interface_name}
-   - sudo ifdown ens4
-   - sudo ifdown ens5
-   - sudo ifup ens4
-   - sudo ifup ens5
-
-   # Create swap
-   - fallocate -l 4G /swapfile
-   - chmod 600 /swapfile
-   - mkswap /swapfile
-   - swapon /swapfile
-   - echo "/swapfile   none    swap    defaults    0   0" >> /etc/fstab
-
-   - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
-
-  write_files:
-   - path: /etc/network/interfaces
-     content: |
-          auto ens3
-          iface ens3 inet dhcp
-          auto ens4
-          iface ens4 inet dhcp
-          auto ens5
-          iface ens5 inet dhcp
-
-   - path: /root/.ssh/config
-     owner: root:root
-     permissions: '0600'
-     content: |
-          Host *
-            ServerAliveInterval 300
-            ServerAliveCountMax 10
-            StrictHostKeyChecking no
-            UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/virtual-mcp11-k8s-contrail/underlay--user-data1404.yaml b/tcp_tests/templates/virtual-mcp11-k8s-contrail/underlay--user-data1404.yaml
deleted file mode 100644
index 2a41ee3..0000000
--- a/tcp_tests/templates/virtual-mcp11-k8s-contrail/underlay--user-data1404.yaml
+++ /dev/null
@@ -1,53 +0,0 @@
-| # All the data below will be stored as a string object
-  #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
-  ssh_pwauth: True
-  users:
-   - name: root
-     sudo: ALL=(ALL) NOPASSWD:ALL
-     shell: /bin/bash
-     ssh_authorized_keys:
-     {% for key in config.underlay.ssh_keys %}
-      - ssh-rsa {{ key['public'] }}
-     {% endfor %}
-
-  disable_root: false
-  chpasswd:
-   list: |
-    root:r00tme
-   expire: False
-
-  bootcmd:
-   # Enable root access
-   - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
-   - service sshd restart
-  output:
-    all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
-  runcmd:
-   # Configure dhclient
-   - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
-   - sudo resolvconf -u
-
-   # Prepare network connection
-   - sudo ifup eth0
-   #- sudo route add default gw {gateway} {interface_name}
-   - sudo ifup eth1
-   - sudo ifup eth2
-
-   # Create swap
-   - fallocate -l 4G /swapfile
-   - chmod 600 /swapfile
-   - mkswap /swapfile
-   - swapon /swapfile
-   - echo "/swapfile   none    swap    defaults    0   0" >> /etc/fstab
-
-  write_files:
-   - path: /etc/network/interfaces
-     content: |
-          auto eth0
-          iface eth0 inet dhcp
-          auto eth1
-          iface eth1 inet dhcp
-          auto eth2
-          iface eth2 inet dhcp
diff --git a/tcp_tests/templates/virtual-mcp11-k8s-contrail/underlay--user-data1604.yaml b/tcp_tests/templates/virtual-mcp11-k8s-contrail/underlay--user-data1604.yaml
deleted file mode 100644
index 5fc02ce..0000000
--- a/tcp_tests/templates/virtual-mcp11-k8s-contrail/underlay--user-data1604.yaml
+++ /dev/null
@@ -1,55 +0,0 @@
-| # All the data below will be stored as a string object
-  #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
-  ssh_pwauth: True
-  users:
-   - name: root
-     sudo: ALL=(ALL) NOPASSWD:ALL
-     shell: /bin/bash
-     ssh_authorized_keys:
-     {% for key in config.underlay.ssh_keys %}
-      - ssh-rsa {{ key['public'] }}
-     {% endfor %}
-
-  disable_root: false
-  chpasswd:
-   list: |
-    root:r00tme
-   expire: False
-
-  bootcmd:
-   # Enable root access
-   - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
-   - service sshd restart
-  output:
-    all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
-  runcmd:
-   - export TERM=linux
-   - export LANG=C
-   # Configure dhclient
-   - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
-   - sudo resolvconf -u
-
-   # Prepare network connection
-   - sudo ifup ens3
-   #- sudo route add default gw {gateway} {interface_name}
-   - sudo ifup ens4
-   - sudo ifup ens5
-
-   # Create swap
-   - fallocate -l 4G /swapfile
-   - chmod 600 /swapfile
-   - mkswap /swapfile
-   - swapon /swapfile
-   - echo "/swapfile   none    swap    defaults   0   0" >> /etc/fstab
-
-  write_files:
-   - path: /etc/network/interfaces
-     content: |
-          auto ens3
-          iface ens3 inet dhcp
-          auto ens4
-          iface ens4 inet dhcp
-          auto ens5
-          iface ens5 inet dhcp
diff --git a/tcp_tests/templates/virtual-mcp11-k8s-contrail/underlay.yaml b/tcp_tests/templates/virtual-mcp11-k8s-contrail/underlay.yaml
deleted file mode 100644
index 33b68d7..0000000
--- a/tcp_tests/templates/virtual-mcp11-k8s-contrail/underlay.yaml
+++ /dev/null
@@ -1,581 +0,0 @@
-# This environment requires 50.5 GB of RAM and 270GB of Storage. Run with caution.
-{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
-
-{% import 'virtual-mcp11-k8s-contrail/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
-{% import 'virtual-mcp11-k8s-contrail/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
-{% import 'virtual-mcp11-k8s-contrail/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
-{% import 'virtual-mcp11-k8s-contrail/underlay--user-data1404.yaml' as CLOUDINIT_USER_DATA_1404 with context %}
-
----
-aliases:
-# e1000 is not able to serve with multicasts, so keepalived is not
-# working correctly. in any contrail model virtio model
-# should be used.
- - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
- - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
- - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
- - &cloudinit_user_data_1604 {{ CLOUDINIT_USER_DATA_1604 }}
- - &cloudinit_user_data_1404 {{ CLOUDINIT_USER_DATA_1404 }}
-
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'virtual-mcp11-k8s-contrail') %}
-{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME) + '.local' %}
-{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON01 = os_env('HOSTNAME_MON01', 'mon01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON02 = os_env('HOSTNAME_MON02', 'mon02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON03 = os_env('HOSTNAME_MON03', 'mon03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_NTW01 = os_env('HOSTNAME_NTW01', 'ntw01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_NTW02 = os_env('HOSTNAME_NTW02', 'ntw02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_NTW03 = os_env('HOSTNAME_NTW03', 'ntw03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_VSRX01 = os_env('HOSTNAME_VSRX01', 'vsrx01.' + DOMAIN_NAME) %}
-
-template:
-  devops_settings:
-    env_name: {{ os_env('ENV_NAME', 'virtual-mcp11-k8s-contrail_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
-
-    address_pools:
-
-      private-pool01:
-        net: {{ os_env('PRIVATE_ADDRESS_POOL01', '172.16.10.0/24:24') }}
-        params:
-          ip_reserved:
-            gateway: +1
-            l2_network_device: +1
-            default_{{ HOSTNAME_CFG01 }}: +100
-            default_{{ HOSTNAME_CTL01 }}: +101
-            default_{{ HOSTNAME_CTL02 }}: +102
-            default_{{ HOSTNAME_CTL03 }}: +103
-            default_{{ HOSTNAME_CMP01 }}: +105
-            default_{{ HOSTNAME_CMP02 }}: +106
-            default_{{ HOSTNAME_MON01 }}: +107
-            default_{{ HOSTNAME_MON02 }}: +108
-            default_{{ HOSTNAME_MON03 }}: +109
-            default_{{ HOSTNAME_NTW01 }}: +110
-            default_{{ HOSTNAME_NTW02 }}: +111
-            default_{{ HOSTNAME_NTW03 }}: +112
-            default_{{ HOSTNAME_PRX01 }}: +121
-            default_{{ HOSTNAME_VSRX01 }}: +90
-          ip_ranges:
-            dhcp: [+90, -10]
-
-      public-pool01:
-        net: {{ os_env('PUBLIC_ADDRESS_POOL01', '192.168.10.0/24:24') }}
-        params:
-          ip_reserved:
-            gateway: +1
-            l2_network_device: +1
-            default_{{ HOSTNAME_CFG01 }}: +100
-            default_{{ HOSTNAME_CTL01 }}: +101
-            default_{{ HOSTNAME_CTL02 }}: +102
-            default_{{ HOSTNAME_CTL03 }}: +103
-            default_{{ HOSTNAME_CMP01 }}: +105
-            default_{{ HOSTNAME_CMP02 }}: +106
-            default_{{ HOSTNAME_MON01 }}: +107
-            default_{{ HOSTNAME_MON02 }}: +108
-            default_{{ HOSTNAME_MON03 }}: +109
-            default_{{ HOSTNAME_NTW01 }}: +110
-            default_{{ HOSTNAME_NTW02 }}: +111
-            default_{{ HOSTNAME_NTW03 }}: +112
-            default_{{ HOSTNAME_PRX01 }}: +121
-            default_{{ HOSTNAME_VSRX01 }}: +90
-          ip_ranges:
-            dhcp: [+90, -10]
-
-      admin-pool01:
-        net: {{ os_env('ADMIN_ADDRESS_POOL01', '10.70.0.0/24:24') }}
-        params:
-          ip_reserved:
-            gateway: +1
-            l2_network_device: +1
-            default_{{ HOSTNAME_CFG01 }}: +15
-            default_{{ HOSTNAME_CTL01 }}: +101
-            default_{{ HOSTNAME_CTL02 }}: +102
-            default_{{ HOSTNAME_CTL03 }}: +103
-            default_{{ HOSTNAME_CMP01 }}: +105
-            default_{{ HOSTNAME_CMP02 }}: +106
-            default_{{ HOSTNAME_MON01 }}: +107
-            default_{{ HOSTNAME_MON02 }}: +108
-            default_{{ HOSTNAME_MON03 }}: +109
-            default_{{ HOSTNAME_NTW01 }}: +110
-            default_{{ HOSTNAME_NTW02 }}: +111
-            default_{{ HOSTNAME_NTW03 }}: +112
-            default_{{ HOSTNAME_PRX01 }}: +121
-            default_{{ HOSTNAME_VSRX01 }}: +90
-          ip_ranges:
-            dhcp: [+10, -10]
-
-    groups:
-      - name: default
-        driver:
-          name: devops.driver.libvirt
-          params:
-            connection_string: !os_env CONNECTION_STRING, qemu:///system
-            storage_pool_name: !os_env STORAGE_POOL_NAME, default
-            stp: True
-            hpet: False
-            enable_acpi: true
-            use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
-            use_hugepages: !os_env DRIVER_USE_HUGEPAGES, false
-
-        network_pools:
-          private: private-pool01
-          public: public-pool01
-          admin: admin-pool01
-
-        l2_network_devices:
-          private:
-            address_pool: private-pool01
-            dhcp: true
-
-          public:
-            address_pool: public-pool01
-            dhcp: true
-            forward:
-              mode: nat
-
-          admin:
-            address_pool: admin-pool01
-            dhcp: true
-
-        group_volumes:
-         - name: cloudimage1604    # This name is used for 'backing_store' option for node volumes.
-           # http://images.mirantis.com/ubuntu-16-04-x64-latest.qcow2  (preffered)
-           # or
-           # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
-           source_image: {{ os_env('MCP_IMAGE_PATH1604', os_env('IMAGE_PATH1604')) }}
-           format: qcow2
-
-         - name: cfg01_day01_image               # Pre-configured day01 image
-           source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01', os_env('IMAGE_PATH1604')) }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
-           format: qcow2
-
-         - name: cloudimage1404
-           source_image: !os_env IMAGE_PATH1404
-           format: qcow2
-
-         - name: vsrx_image
-           source_image: !os_env IMAGE_VSRX
-           format: qcow2
-
-        nodes:
-          - name: {{ HOSTNAME_CFG01 }}
-            role: salt_master
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 8192
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: cfg01_day01_image
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_cfg01
-
-              interfaces:
-                - label: ens3
-                  l2_network_device: public
-                  interface_model: *interface_model
-                - label: ens4
-                  l2_network_device: private
-                  interface_model: *interface_model
-                - label: ens5
-                  l2_network_device: admin
-                  interface_model: *interface_model
-
-              network_config:
-                ens3:
-                  networks:
-                    - public
-                ens4:
-                  networks:
-                    - private
-                ens5:
-                  networks:
-                    - admin
-
-          - name: {{ HOSTNAME_CTL01 }}
-            role: k8s_controller
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 2048
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 20
-                  backing_store: cloudimage1604
-                  format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: &interfaces
-                - label: ens3
-                  l2_network_device: public
-                  interface_model: *interface_model
-                - label: ens4
-                  l2_network_device: private
-                  interface_model: *interface_model
-                - label: ens5
-                  l2_network_device: admin
-                  interface_model: *interface_model
-              network_config: &network_config
-                ens3:
-                  networks:
-                    - public
-                ens4:
-                  networks:
-                    - private
-                ens5:
-                  networks:
-                    - admin
-
-          - name: {{ HOSTNAME_CTL02 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 2048
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 20
-                  backing_store: cloudimage1604
-                  format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: *interfaces
-              network_config: *network_config
-
-          - name: {{ HOSTNAME_CTL03 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 2048
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 20
-                  backing_store: cloudimage1604
-                  format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: *interfaces
-              network_config: *network_config
-
-          - name: {{ HOSTNAME_CMP01 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 2048
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 20
-                  backing_store: cloudimage1604
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: *interfaces
-              network_config: *network_config
-
-          - name: {{ HOSTNAME_CMP02 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 2048
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 20
-                  backing_store: cloudimage1604
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: *interfaces
-              network_config: *network_config
-
-          - name: {{ HOSTNAME_MON01 }}
-            role: salt_minion
-            params:
-              vcpu: {{ os_env('MON_NODE_CPU', 1) }}
-              memory: {{ os_env('MON_NODE_MEMORY', 8192) }}
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 20
-                  backing_store: cloudimage1604
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: *interfaces
-              network_config: *network_config
-
-          - name: {{ HOSTNAME_MON02 }}
-            role: salt_minion
-            params:
-              vcpu: {{ os_env('MON_NODE_CPU', 1) }}
-              memory: {{ os_env('MON_NODE_MEMORY', 8192) }}
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 20
-                  backing_store: cloudimage1604
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: *interfaces
-              network_config: *network_config
-
-          - name: {{ HOSTNAME_MON03 }}
-            role: salt_minion
-            params:
-              vcpu: {{ os_env('MON_NODE_CPU', 1) }}
-              memory: {{ os_env('MON_NODE_MEMORY', 8192) }}
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 20
-                  backing_store: cloudimage1604
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: *interfaces
-              network_config: *network_config
-
-          - name: {{ HOSTNAME_NTW01 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 8192
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 20
-                  backing_store: cloudimage1404
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1404
-
-              interfaces: *interfaces
-              network_config: *network_config
-
-          - name: {{ HOSTNAME_NTW02 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 8192
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 20
-                  backing_store: cloudimage1404
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1404
-
-              interfaces: *interfaces
-              network_config: *network_config
-
-          - name: {{ HOSTNAME_NTW03 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 8192
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 20
-                  backing_store: cloudimage1404
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1404
-
-              interfaces: *interfaces
-              network_config: *network_config
-
-          - name: {{ HOSTNAME_PRX01 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 1024
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 20
-                  backing_store: cloudimage1604
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: *interfaces
-              network_config: *network_config
-
-          - name: {{ HOSTNAME_VSRX01 }}
-            role: vsrx
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 512
-              boot:
-                - hd
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 10
-                  backing_store: vsrx_image
-                  format: qcow2
-                  #- name: iso
-                  #- capacity: 1
-                  #- format: raw
-                  #- device: cdrom
-                  #- bus: ide
-                  #- cloudinit_user_data: !include juniper.conf
-
-              interfaces:
-                - label: ge-0/0/0
-                  l2_network_device: private
-                  interface_model: *interface_model
-                  mac_address: 52:54:00:4e:b4:36
-                - label: ge-0/0/1
-                  l2_network_device: public
-                  interface_model: *interface_model
-                  mac_address: 52:54:00:e1:44:9d
-                - label: ge-0/0/2
-                  l2_network_device: admin
-                  interface_model: *interface_model
-                  mac_address: 52:54:00:72:08:77
diff --git a/tcp_tests/templates/virtual-mcp11-ovs-dpdk/core.yaml b/tcp_tests/templates/virtual-mcp11-ovs-dpdk/core.yaml
deleted file mode 100644
index 2356475..0000000
--- a/tcp_tests/templates/virtual-mcp11-ovs-dpdk/core.yaml
+++ /dev/null
@@ -1,118 +0,0 @@
-{% from 'virtual-mcp11-ovs-dpdk/underlay.yaml' import HOSTNAME_CFG01 with context %}
-
-# Install support services
-- description: Install keepalived on ctl01
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keepalived:cluster and *01*' state.sls keepalived
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: true
-
-- description: Install keepalived
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keepalived:cluster' state.sls keepalived
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: true
-
-- description: Check the VIP
-  cmd: |
-    OPENSTACK_CONTROL_ADDRESS=`salt-call --out=newline_values_only pillar.get _param:openstack_control_address`;
-    echo "_param:openstack_control_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
-    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-
-- description: Install glusterfs
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@glusterfs:server' state.sls glusterfs.server.service
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Setup glusterfs on primary controller
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 5}
-  skip_fail: false
-
-- description: Check the gluster status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install RabbitMQ on ctl01
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install RabbitMQ
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@rabbitmq:server' state.sls rabbitmq
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check the rabbitmq status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install Galera on first server
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:master' state.sls galera
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install Galera on other servers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:slave' state.sls galera -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check mysql status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: true
-
-
-- description: Install haproxy
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' state.sls haproxy
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check haproxy status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' service.status haproxy
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Restart rsyslog
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' service.restart rsyslog
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install memcached on all controllers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@memcached:server' state.sls memcached
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp11-ovs-dpdk/openstack.yaml b/tcp_tests/templates/virtual-mcp11-ovs-dpdk/openstack.yaml
deleted file mode 100644
index afe3b21..0000000
--- a/tcp_tests/templates/virtual-mcp11-ovs-dpdk/openstack.yaml
+++ /dev/null
@@ -1,291 +0,0 @@
-{% from 'virtual-mcp11-ovs-dpdk/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'virtual-mcp11-ovs-dpdk/underlay.yaml' import HOSTNAME_CTL01 with context %}
-{% from 'virtual-mcp11-ovs-dpdk/underlay.yaml' import HOSTNAME_CTL02 with context %}
-{% from 'virtual-mcp11-ovs-dpdk/underlay.yaml' import HOSTNAME_CTL03 with context %}
-{% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
-{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
-
-# Install OpenStack control services
-
-- description: Install glance on all controllers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-     -C 'I@glance:server' state.sls glance -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install keystone service (note that different fernet keys are created on different nodes)
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keystone:server' state.sls keystone.server -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 15}
-  skip_fail: false
-
-- description: Restart apache due to PROD-10477
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' cmd.run "systemctl restart apache2"
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 15}
-  skip_fail: false
-
-- description: Check apache status to PROD-10477
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' cmd.run "systemctl status apache2"
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 15}
-  skip_fail: false
-
-- description: Mount glusterfs.client volumes (resuires created 'keystone' and 'glusterfs' system users)
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@glance:server' state.sls glusterfs.client
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Update fernet keys for keystone server on the mounted glusterfs volume
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keystone:server' state.sls keystone.server -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Populate keystone services/tenants/admins
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keystone:client' state.sls keystone.client
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check keystone service-list
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keystone:server' cmd.run '. /root/keystonercv3; openstack service list'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check glance image-list
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keystone:server' cmd.run '. /root/keystonerc; glance image-list'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-
-- description: Install nova on all controllers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@nova:controller' state.sls nova -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 5}
-  skip_fail: false
-
-- description: Check nova service-list
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keystone:server' cmd.run '. /root/keystonerc; nova service-list'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-
-- description: Install cinder
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@cinder:controller' state.sls cinder -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check cinder list
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keystone:server' cmd.run '. /root/keystonerc; cinder list'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-
-- description: Install neutron service
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@neutron:server' state.sls neutron -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install neutron on gtw node
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@neutron:gateway' state.sls neutron
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-
-- description: Check neutron agent-list
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keystone:server' cmd.run '. /root/keystonerc; neutron agent-list'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-
-- description: Install heat service
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@heat:server' state.sls heat -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check heat service
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keystone:server' cmd.run '. /root/keystonercv3; openstack orchestration resource type list'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 5, delay: 10}
-  skip_fail: false
-
-
-- description: Deploy horizon dashboard
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@horizon:server' state.sls horizon
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: true
-
-- description: Deploy nginx proxy
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@nginx:server' state.sls nginx
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: true
-
-
-# Install compute node
-
-- description: Apply formulas for compute node
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: true
-
-- description: Re-apply(as in doc) formulas for compute node
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check IP on computes
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
-    'ip a'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 10, delay: 30}
-  skip_fail: false
-
-
-  # Upload cirros image
-
-- description: Upload cirros image on ctl01
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-    'wget http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-i386-disk.img'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 30}
-  skip_fail: false
-
-- description: Register image in glance
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-    '. /root/keystonercv3; glance --timeout 120 image-create --name cirros --visibility public --disk-format qcow2 --container-format bare --progress < /root/cirros-0.3.4-i386-disk.img'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Create net04_external
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-    '. /root/keystonercv3; neutron net-create net04_ext --router:external True --provider:physical_network physnet1 --provider:network_type flat'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Create subnet_external
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-    '. /root/keystonercv3; neutron subnet-create net04_ext {{ IPV4_NET_EXTERNAL_PREFIX }}.0/24 --name net04_ext__subnet --disable-dhcp --allocation-pool start={{ IPV4_NET_EXTERNAL_PREFIX }}.150,end={{ IPV4_NET_EXTERNAL_PREFIX }}.180 --gateway {{ IPV4_NET_EXTERNAL_PREFIX }}.1'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Create net04
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-    '. /root/keystonercv3; neutron net-create net04 --provider:network_type gre'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Create subnet_net04
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-    '. /root/keystonercv3; neutron subnet-create net04 {{ IPV4_NET_TENANT_PREFIX }}.0/24 --name net04__subnet --allocation-pool start={{ IPV4_NET_TENANT_PREFIX }}.120,end={{ IPV4_NET_TENANT_PREFIX }}.240'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Create router
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-    '. /root/keystonercv3; neutron router-create net04_router01 --ha False'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Set geteway
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-    '. /root/keystonercv3; neutron router-gateway-set net04_router01 net04_ext'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description:  Add interface
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-    '. /root/keystonercv3; neutron router-interface-add net04_router01 net04__subnet'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description:  Allow all tcp
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-    '. /root/keystonercv3; nova secgroup-add-rule default tcp 1 65535 0.0.0.0/0'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description:  Allow all icmp
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-    '. /root/keystonercv3; nova secgroup-add-rule default icmp -1 -1 0.0.0.0/0'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-# Configure cinder-volume salt-call
-- description: Set disks 01
-  cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
-  node_name: {{ HOSTNAME_CTL01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Set disks 02
-  cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
-  node_name: {{ HOSTNAME_CTL02 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Set disks 03
-  cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
-  node_name: {{ HOSTNAME_CTL03 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Create partitions 01
-  cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
-  node_name: {{ HOSTNAME_CTL01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Create partitions 02
-  cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
-  node_name: {{ HOSTNAME_CTL02 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Create partitions 03
-  cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
-  node_name: {{ HOSTNAME_CTL03 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp11-ovs-dpdk/salt.yaml b/tcp_tests/templates/virtual-mcp11-ovs-dpdk/salt.yaml
deleted file mode 100644
index 27999e1..0000000
--- a/tcp_tests/templates/virtual-mcp11-ovs-dpdk/salt.yaml
+++ /dev/null
@@ -1,45 +0,0 @@
-{% from 'virtual-mcp11-ovs-dpdk/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'virtual-mcp11-ovs-dpdk/underlay.yaml' import LAB_CONFIG_NAME with context %}
-{% from 'virtual-mcp11-ovs-dpdk/underlay.yaml' import DOMAIN_NAME with context %}
-
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
-# Other salt model repository parameters see in shared-salt.yaml
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-{{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
-
-{{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
-
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "logrotate"') }}
-
-{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
-
-{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
-
-{{ SHARED.MACRO_GENERATE_INVENTORY() }}
-
-{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
-
-#
-#- description: Hack gtw node
-#  cmd: salt 'gtw*' cmd.run "ip addr del {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.110/24 dev ens4; ip addr flush dev ens4";
-#  node_name: {{ HOSTNAME_CFG01 }}
-#  retry: {count: 1, delay: 10}
-#  skip_fail: false
-#
-#- description: Hack cmp01 node
-#  cmd: salt 'cmp01*' cmd.run "ip addr del {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.105/24 dev ens4; ip addr flush dev ens4";
-#  node_name: {{ HOSTNAME_CFG01 }}
-#  retry: {count: 1, delay: 10}
-#  skip_fail: false
-#
-#- description: Hack cmp02 node
-#  cmd: salt 'cmp02*' cmd.run "ip addr del {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.106/24 dev ens4; ip addr flush dev ens4";
-#  node_name: {{ HOSTNAME_CFG01 }}
-#  retry: {count: 1, delay: 10}
-#  skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp11-ovs-dpdk/underlay--meta-data.yaml b/tcp_tests/templates/virtual-mcp11-ovs-dpdk/underlay--meta-data.yaml
deleted file mode 100644
index 3699401..0000000
--- a/tcp_tests/templates/virtual-mcp11-ovs-dpdk/underlay--meta-data.yaml
+++ /dev/null
@@ -1,4 +0,0 @@
-| # All the data below will be stored as a string object
-  instance-id: iid-local1
-  hostname: {hostname}
-  local-hostname: {hostname}
diff --git a/tcp_tests/templates/virtual-mcp11-ovs-dpdk/underlay--user-data-cfg01.yaml b/tcp_tests/templates/virtual-mcp11-ovs-dpdk/underlay--user-data-cfg01.yaml
deleted file mode 100644
index 600f6fb..0000000
--- a/tcp_tests/templates/virtual-mcp11-ovs-dpdk/underlay--user-data-cfg01.yaml
+++ /dev/null
@@ -1,90 +0,0 @@
-| # All the data below will be stored as a string object
-  #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
-  ssh_pwauth: True
-  users:
-   - name: root
-     sudo: ALL=(ALL) NOPASSWD:ALL
-     shell: /bin/bash
-     ssh_authorized_keys:
-     {% for key in config.underlay.ssh_keys %}
-      - ssh-rsa {{ key['public'] }}
-     {% endfor %}
-
-  disable_root: false
-  chpasswd:
-   list: |
-    root:r00tme
-   expire: False
-
-  bootcmd:
-   # Block access to SSH while node is preparing
-   - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
-   # Enable root access
-   - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
-   - service sshd restart
-  output:
-    all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
-  runcmd:
-   # Configure dhclient
-   - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
-   - sudo resolvconf -u
-
-   # Prepare network connection
-   - sudo ifdown ens3
-   - sudo ip r d default || true  # remove existing default route to get it from dhcp
-   - sudo ifup ens3
-   #- sudo route add default gw {gateway} {interface_name}
-
-   # Create swap
-   - fallocate -l 4G /swapfile
-   - chmod 600 /swapfile
-   - mkswap /swapfile
-   - swapon /swapfile
-   - echo "/swapfile   none    swap    defaults    0   0" >> /etc/fstab
-
-   ############## TCP Cloud cfg01 node ##################
-   #- sleep 120
-   - echo "Preparing base OS"
-
-   - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
-   - which wget >/dev/null || (apt-get update; apt-get install -y wget);
-
-   # Configure Ubuntu mirrors
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
-
-   - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
-   - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
-   - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/{{ SALT_VERSION }} xenial main" > /etc/apt/sources.list.d/saltstack.list;
-   - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/{{ SALT_VERSION }}/SALTSTACK-GPG-KEY.pub | apt-key add -;
-
-   - apt-get clean
-   - apt-get update
-
-   # Install common packages
-   - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree
-
-   ########################################################
-   # Node is ready, allow SSH access
-   - echo "Allow SSH access ..."
-   - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
-   ########################################################
-
-  write_files:
-   - path: /etc/network/interfaces
-     content: |
-          auto ens3
-          iface ens3 inet dhcp
-
-   - path: /root/.ssh/config
-     owner: root:root
-     permissions: '0600'
-     content: |
-          Host *
-            ServerAliveInterval 300
-            ServerAliveCountMax 10
-            StrictHostKeyChecking no
-            UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/virtual-mcp11-ovs-dpdk/underlay--user-data1604.yaml b/tcp_tests/templates/virtual-mcp11-ovs-dpdk/underlay--user-data1604.yaml
deleted file mode 100644
index 9852e2c..0000000
--- a/tcp_tests/templates/virtual-mcp11-ovs-dpdk/underlay--user-data1604.yaml
+++ /dev/null
@@ -1,83 +0,0 @@
-| # All the data below will be stored as a string object
-  #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
-  ssh_pwauth: True
-  users:
-   - name: root
-     sudo: ALL=(ALL) NOPASSWD:ALL
-     shell: /bin/bash
-     ssh_authorized_keys:
-     {% for key in config.underlay.ssh_keys %}
-      - ssh-rsa {{ key['public'] }}
-     {% endfor %}
-
-  disable_root: false
-  chpasswd:
-   list: |
-    root:r00tme
-   expire: False
-
-  bootcmd:
-   # Block access to SSH while node is preparing
-   - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
-   # Enable root access
-   - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
-   - service sshd restart
-  output:
-    all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
-  runcmd:
-   - export TERM=linux
-   - export LANG=C
-   # Configure dhclient
-   - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
-   - sudo resolvconf -u
-
-   # Prepare network connection
-   - sudo ifup ens3
-   #- sudo route add default gw {gateway} {interface_name}
-
-   # Create swap
-   - fallocate -l 4G /swapfile
-   - chmod 600 /swapfile
-   - mkswap /swapfile
-   - swapon /swapfile
-   - echo "/swapfile   none    swap    defaults   0   0" >> /etc/fstab
-
-
-   ############## TCP Cloud cfg01 node ##################
-   #- sleep 120
-   - echo "Preparing base OS"
-   - which wget >/dev/null || (apt-get update; apt-get install -y wget)
-
-   # Configure Ubuntu mirrors
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
-
-   - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
-   - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
-   - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/{{ SALT_VERSION }} xenial main" > /etc/apt/sources.list.d/saltstack.list;
-   - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/{{ SALT_VERSION }}/SALTSTACK-GPG-KEY.pub | apt-key add -;
-
-   - apt-get clean
-   - eatmydata apt-get update && apt-get -y upgrade
-
-   # Install common packages
-   - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc hugepages
-
-   # Enable on nodes hugepages
-   - echo 2048 > /proc/sys/vm/nr_hugepages
-
-   ########################################################
-   # Node is ready, allow SSH access
-   - echo "Allow SSH access ..."
-   - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
-   ########################################################
-
-  write_files:
-   - path: /etc/network/interfaces
-     content: |
-          auto ens3
-          iface ens3 inet dhcp
-
diff --git a/tcp_tests/templates/virtual-mcp11-ovs-dpdk/underlay.yaml b/tcp_tests/templates/virtual-mcp11-ovs-dpdk/underlay.yaml
deleted file mode 100644
index c396bcd..0000000
--- a/tcp_tests/templates/virtual-mcp11-ovs-dpdk/underlay.yaml
+++ /dev/null
@@ -1,432 +0,0 @@
-# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
-{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
-
-{% import 'virtual-mcp11-ovs-dpdk/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
-{% import 'virtual-mcp11-ovs-dpdk/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
-{% import 'virtual-mcp11-ovs-dpdk/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
-
----
-aliases:
- - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
- - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
- - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
- - &cloudinit_user_data_1604 {{ CLOUDINIT_USER_DATA_1604 }}
-
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'virtual-mcp11-ovs-dpdk') %}
-{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME) + '.local' %}
-{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
-
-template:
-  devops_settings:
-    env_name: {{ os_env('ENV_NAME', 'virtual-mcp11-ovs-dpdk_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
-
-    address_pools:
-      private-pool01:
-        net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.60.0.0/16:24') }}
-        params:
-          ip_reserved:
-            gateway: +1
-            l2_network_device: +1
-            default_{{ HOSTNAME_CFG01 }}: +100
-            default_{{ HOSTNAME_CTL01 }}: +101
-            default_{{ HOSTNAME_CTL02 }}: +102
-            default_{{ HOSTNAME_CTL03 }}: +103
-            default_{{ HOSTNAME_CMP01 }}: +105
-            default_{{ HOSTNAME_CMP02 }}: +106
-            default_{{ HOSTNAME_GTW01 }}: +110
-            default_{{ HOSTNAME_PRX01 }}: +121
-          ip_ranges:
-            dhcp: [+90, -10]
-
-      admin-pool01:
-        net: {{ os_env('ADMIN_ADDRESS_POOL01', '10.70.0.0/16:24') }}
-        params:
-          ip_reserved:
-            gateway: +1
-            l2_network_device: +1
-            default_{{ HOSTNAME_CFG01 }}: +90
-            default_{{ HOSTNAME_CTL01 }}: +101
-            default_{{ HOSTNAME_CTL02 }}: +102
-            default_{{ HOSTNAME_CTL03 }}: +103
-            default_{{ HOSTNAME_CMP01 }}: +105
-            default_{{ HOSTNAME_CMP02 }}: +106
-            default_{{ HOSTNAME_GTW01 }}: +110
-            default_{{ HOSTNAME_PRX01 }}: +121
-          ip_ranges:
-            dhcp: [+90, -10]
-
-      tenant-pool01:
-        net: {{ os_env('TENANT_ADDRESS_POOL01', '10.80.0.0/16:24') }}
-        params:
-          ip_reserved:
-            gateway: +1
-            l2_network_device: +1
-            default_{{ HOSTNAME_CFG01 }}: +100
-            default_{{ HOSTNAME_CTL01 }}: +101
-            default_{{ HOSTNAME_CTL02 }}: +102
-            default_{{ HOSTNAME_CTL03 }}: +103
-            default_{{ HOSTNAME_CMP01 }}: +105
-            default_{{ HOSTNAME_CMP02 }}: +106
-            default_{{ HOSTNAME_GTW01 }}: +110
-            default_{{ HOSTNAME_PRX01 }}: +121
-          ip_ranges:
-            dhcp: [+10, -10]
-
-      external-pool01:
-        net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '10.90.0.0/16:24') }}
-        params:
-          ip_reserved:
-            gateway: +1
-            l2_network_device: +1
-            default_{{ HOSTNAME_CFG01 }}: +100
-            default_{{ HOSTNAME_CTL01 }}: +101
-            default_{{ HOSTNAME_CTL02 }}: +102
-            default_{{ HOSTNAME_CTL03 }}: +103
-            default_{{ HOSTNAME_CMP01 }}: +105
-            default_{{ HOSTNAME_CMP02 }}: +106
-            default_{{ HOSTNAME_GTW01 }}: +110
-            default_{{ HOSTNAME_PRX01 }}: +121
-          ip_ranges:
-            dhcp: [+10, -10]
-
-
-    groups:
-      - name: default
-        driver:
-          name: devops.driver.libvirt
-          params:
-            connection_string: !os_env CONNECTION_STRING, qemu:///system
-            storage_pool_name: !os_env STORAGE_POOL_NAME, default
-            stp: False
-            hpet: True
-            use_hugepages: True
-            enable_acpi: true
-            use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
-            use_hugepages: !os_env DRIVER_USE_HUGEPAGES, false
-
-        network_pools:
-          admin: admin-pool01
-          private: private-pool01
-          tenant: tenant-pool01
-          external: external-pool01
-
-        l2_network_devices:
-          private:
-            address_pool: private-pool01
-            dhcp: false
-            forward:
-              mode: route
-
-          admin:
-            address_pool: admin-pool01
-            dhcp: true
-            forward:
-              mode: nat
-
-          tenant:
-            address_pool: tenant-pool01
-            dhcp: false
-
-          external:
-            address_pool: external-pool01
-            dhcp: true
-            forward:
-              mode: nat
-
-
-        group_volumes:
-         - name: cloudimage1604    # This name is used for 'backing_store' option for node volumes.
-           source_image: !os_env IMAGE_PATH1604  # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
-           format: qcow2
-
-         - name: cfg01_day01_image               # Pre-configured day01 image
-           source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01', os_env('IMAGE_PATH1604')) }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
-           format: qcow2
-
-        nodes:
-          - name: {{ HOSTNAME_CFG01 }}
-            role: salt_master
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: cfg01_day01_image
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_cfg01
-
-              interfaces:
-                - label: ens3
-                  l2_network_device: admin
-                  interface_model: *interface_model
-                - label: ens4
-                  l2_network_device: private
-                  interface_model: *interface_model
-              network_config:
-                ens3:
-                  networks:
-                    - admin
-                ens4:
-                  networks:
-                    - private
-
-          - name: {{ HOSTNAME_CTL01 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 8192
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: cloudimage1604
-                  format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: &interfaces
-                - label: ens3
-                  l2_network_device: admin
-                  interface_model: *interface_model
-                - label: ens4
-                  l2_network_device: private
-                  interface_model: *interface_model
-              network_config: &network_config
-                ens3:
-                  networks:
-                    - admin
-                ens4:
-                  networks:
-                    - private
-
-          - name: {{ HOSTNAME_CTL02 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 8192
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: cloudimage1604
-                  format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: *interfaces
-              network_config: *network_config
-
-          - name: {{ HOSTNAME_CTL03 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 8192
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: cloudimage1604
-                  format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: *interfaces
-              network_config: *network_config
-
-          - name: {{ HOSTNAME_PRX01 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 1
-              memory: !os_env SLAVE_NODE_MEMORY, 8192
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: cloudimage1604
-                  format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: *interfaces
-              network_config: *network_config
-
-
-          - name: {{ HOSTNAME_CMP01 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 12
-              memory: !os_env SLAVE_NODE_MEMORY, 8192
-              numa:
-              - cpus: 0,1,2,3,4,5
-                memory: 4096
-              - cpus: 6,7,8,9,10,11
-                memory: 4096
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: cloudimage1604
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-
-              interfaces: &all_interfaces
-                - label: ens3
-                  l2_network_device: admin
-                  interface_model: *interface_model
-                - label: ens4
-                  l2_network_device: private
-                  interface_model: *interface_model
-                - label: ens5
-                  l2_network_device: tenant
-                  interface_model: *interface_model
-                - label: ens6
-                  l2_network_device: external
-                  interface_model: *interface_model
-              network_config: &all_network_config
-                ens3:
-                  networks:
-                    - admin
-                ens4:
-                  networks:
-                    - private
-                ens5:
-                  networks:
-                    - tenant
-                ens6:
-                  networks:
-                    - external
-
-          - name: {{ HOSTNAME_CMP02 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 12
-              memory: !os_env SLAVE_NODE_MEMORY, 8192
-              numa:
-              - cpus: 0,1,2,3,4,5
-                memory: 4096
-              - cpus: 6,7,8,9,10,11
-                memory: 4096
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: cloudimage1604
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: *all_interfaces
-              network_config: *all_network_config
-
-          - name: {{ HOSTNAME_GTW01 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 1
-              memory: !os_env SLAVE_NODE_MEMORY, 2048
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: cloudimage1604
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: *all_interfaces
-              network_config: *all_network_config
diff --git a/tcp_tests/templates/virtual-mcp11-ovs.new/core.yaml b/tcp_tests/templates/virtual-mcp11-ovs.new/core.yaml
deleted file mode 100644
index 7d13f72..0000000
--- a/tcp_tests/templates/virtual-mcp11-ovs.new/core.yaml
+++ /dev/null
@@ -1,118 +0,0 @@
-{% from 'virtual-mcp11-ovs/underlay.yaml' import HOSTNAME_CFG01 with context %}
-
-# Install support services
-- description: Install keepalived on ctl01
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keepalived:cluster and *01*' state.sls keepalived
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: true
-
-- description: Install keepalived
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keepalived:cluster' state.sls keepalived
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: true
-
-- description: Check the VIP
-  cmd: |
-    OPENSTACK_CONTROL_ADDRESS=`salt-call --out=newline_values_only pillar.get _param:openstack_control_address`;
-    echo "_param:openstack_control_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
-    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-
-- description: Install glusterfs
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@glusterfs:server' state.sls glusterfs.server.service
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Setup glusterfs on primary controller
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 5}
-  skip_fail: false
-
-- description: Check the gluster status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install RabbitMQ on ctl01
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install RabbitMQ
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@rabbitmq:server' state.sls rabbitmq
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check the rabbitmq status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install Galera on first server
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:master' state.sls galera
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install Galera on other servers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:slave' state.sls galera -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check mysql status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: true
-
-
-- description: Install haproxy
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' state.sls haproxy
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check haproxy status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' service.status haproxy
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Restart rsyslog
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' service.restart rsyslog
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install memcached on all controllers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@memcached:server' state.sls memcached
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp11-ovs.new/openstack.yaml b/tcp_tests/templates/virtual-mcp11-ovs.new/openstack.yaml
deleted file mode 100644
index 66d7614..0000000
--- a/tcp_tests/templates/virtual-mcp11-ovs.new/openstack.yaml
+++ /dev/null
@@ -1,256 +0,0 @@
-################### Install OpenStack control ##########################
-
-# // Install horizon dashboard
-# salt.enforceState(saltMaster, 'I@horizon:server', 'horizon', true)
-# salt.enforceState(saltMaster, 'I@nginx:server', 'nginx', true)
-
-- description: Install Horizon
-  do: enforceState
-  target: I@horizon:server
-  state: horizon
-
-- description: Update certs on nginx servers
-  do: enforceState
-  target: I@nginx:server
-  state: salt.minion.cert
-
-- description: Install nginx
-  do: enforceState
-  target: I@nginx:server
-  state: nginx
-
-# // setup keystone service
-# //runSaltProcessStep(saltMaster, 'I@keystone:server', 'state.sls', ['keystone.server'], 1)
-# salt.enforceState(saltMaster, 'I@keystone:server and *01*', 'keystone.server', true)
-# salt.enforceState(saltMaster, 'I@keystone:server', 'keystone.server', true)
-# // populate keystone services/tenants/roles/users
-
-- description: Install Keystone on 01
-  do: enforceState
-  target: I@keystone:server and *01*
-  state: keystone.server
-  retry: {count: 3, delay: 5}
-
-- description: Install Keystone
-  do: enforceState
-  target: I@keystone:server
-  state: keystone.server
-  retry: {count: 3, delay: 5}
-
-# // keystone:client must be called locally
-# //salt.runSaltProcessStep(saltMaster, 'I@keystone:client', 'cmd.run', ['salt-call state.sls keystone.client'], null, true)
-# salt.runSaltProcessStep(saltMaster, 'I@keystone:server', 'service.restart', ['apache2'])
-# salt.enforceState(saltMaster, 'I@keystone:client', 'keystone.client', true)
-# salt.enforceState(saltMaster, 'I@keystone:client', 'keystone.client', true)
-# salt.runSaltProcessStep(saltMaster, 'I@keystone:server', 'cmd.run', ['. /root/keystonerc; keystone service-list'], null, true)
-
-# - description: Install Keystone client
-#   do: runState
-#   target: I@keystone:client
-#   state: cmd.run
-#   args: ['salt-call state.sls keystone.client']
-
-- description: Restart apache on Keystone servers
-  do: runState
-  target: I@keystone:server
-  state: service.restart
-  args: ['apache2']
-
-- description: Install Keystone Client
-  do: enforceState
-  target: I@keystone:client
-  state: keystone.client
-
-- description: Install Keystone Client
-  do: enforceState
-  target: I@keystone:client
-  state: keystone.client
-
-- description: Show Keystone config
-  do: runState
-  target: I@keystone:server
-  state: cmd.run
-  args: ['. /root/keystonerc; keystone service-list']
-
-
-# // Install glance and ensure glusterfs clusters
-# //runSaltProcessStep(saltMaster, 'I@glance:server', 'state.sls', ['glance.server'], 1)
-# salt.enforceState(saltMaster, 'I@glance:server and *01*', 'glance.server', true)
-# salt.enforceState(saltMaster, 'I@glance:server', 'glance.server', true)
-# salt.enforceState(saltMaster, 'I@glance:server', 'glusterfs.client', true)
-
-
-- description: Install glance on 01
-  do: enforceState
-  target: I@glance:server and *01*
-  state: glance.server
-
-- description: Install glance
-  do: enforceState
-  target: I@glance:server
-  state: glance.server
-
-- description: Install gluster client on glance servers
-  do: enforceState
-  target: I@glance:server
-  state: glusterfs.client
-
-# // Update fernet tokens before doing request on keystone server
-# salt.enforceState(saltMaster, 'I@keystone:server', 'keystone.server', true)
-
-- description: Update fernet tokens
-  do: enforceState
-  target: I@keystone:server
-  state: keystone.server
-
-# // Check glance service
-# salt.runSaltProcessStep(saltMaster, 'I@keystone:server', 'cmd.run', ['. /root/keystonerc; glance image-list'], null, true)
-
-- description: Show glance images via keystone node
-  do: runState
-  target: I@keystone:server
-  state: cmd.run
-  args: ['. /root/keystonerc; glance image-list']
-
-# // Install and check nova service
-# //runSaltProcessStep(saltMaster, 'I@nova:controller', 'state.sls', ['nova'], 1)
-# salt.enforceState(saltMaster, 'I@nova:controller and *01*', 'nova.controller', true)
-# salt.enforceState(saltMaster, 'I@nova:controller', 'nova.controller', true)
-# salt.runSaltProcessStep(saltMaster, 'I@keystone:server', 'cmd.run', ['. /root/keystonerc; nova service-list'], null, true)
-
-- description: Install nova on controllers on 01
-  do: enforceState
-  target: I@nova:controller and *01*
-  state: nova.controller
-
-- description: Install Keystone
-  do: enforceState
-  target: I@nova:controller
-  state: nova.controller
-
-- description: Show nova services via keystone node
-  do: runState
-  target: I@keystone:server
-  state: cmd.run
-  args: ['. /root/keystonerc; nova service-list']
-
-
-
-# // Install and check cinder service
-# //runSaltProcessStep(saltMaster, 'I@cinder:controller', 'state.sls', ['cinder'], 1)
-# salt.enforceState(saltMaster, 'I@cinder:controller and *01*', 'cinder', true)
-# salt.enforceState(saltMaster, 'I@cinder:controller', 'cinder', true)
-# salt.runSaltProcessStep(saltMaster, 'I@keystone:server', 'cmd.run', ['. /root/keystonerc; cinder list'], null, true)
-
-
-- description: Install cinder on controllers on 01
-  do: enforceState
-  target: I@cinder:controller and *01*
-  state: cinder
-
-- description: Install cinder on controllers
-  do: enforceState
-  target: I@cinder:controller
-  state: cinder
-
-- description: Show cinder list via keystone node
-  do: runState
-  target: I@keystone:server
-  state: cmd.run
-  args: ['. /root/keystonerc; nova list']
-
-
-# // Install neutron service
-# //runSaltProcessStep(saltMaster, 'I@neutron:server', 'state.sls', ['neutron'], 1)
-
-# salt.enforceState(saltMaster, 'I@neutron:server and *01*', 'neutron.server', true)
-# salt.enforceState(saltMaster, 'I@neutron:server', 'neutron.server', true)
-# salt.runSaltProcessStep(saltMaster, 'I@keystone:server', 'cmd.run', ['. /root/keystonerc; neutron agent-list'], null, true)
-
-- description: Install neutron on controllers on 01
-  do: enforceState
-  target: I@neutron:server and *01*
-  state: neutron.server
-
-- description: Install neutron on controllers
-  do: enforceState
-  target: I@neutron:server
-  state: neutron.server
-
-- description: Show neutron agent list via keystone node
-  do: runState
-  target: I@keystone:server
-  state: cmd.run
-  args: ['. /root/keystonerc; neutron agent-list']
-
-# // Install heat service
-# //runSaltProcessStep(saltMaster, 'I@heat:server', 'state.sls', ['heat'], 1)
-# salt.enforceState(saltMaster, 'I@heat:server and *01*', 'heat', true)
-# salt.enforceState(saltMaster, 'I@heat:server', 'heat', true)
-# salt.runSaltProcessStep(saltMaster, 'I@keystone:server', 'cmd.run', ['. /root/keystonerc; heat resource-type-list'], null, true)
-
-- description: Install heat on controllers on 01
-  do: enforceState
-  target: I@heat:server and *01*
-  state: heat
-
-- description: Install heat on controllers
-  do: enforceState
-  target: I@heat:server
-  state: heat
-
-- description: Show heat resource type list via keystone node
-  do: runState
-  target: I@keystone:server
-  state: cmd.run
-  args: ['. /root/keystonerc;  heat resource-type-list']
-
-# // Restart nova api
-# salt.runSaltProcessStep(saltMaster, 'I@nova:controller', 'service.restart', ['nova-api'])
-
-- description: Restart nova-api
-  do: runState
-  target: I@nova:controller
-  state: service.restart
-  args: ['nova-api']
-
-################### Install OpenStack network ##########################
-
-# // Apply gateway
-# salt.runSaltProcessStep(saltMaster, 'I@neutron:gateway', 'state.apply', [], null, true)
-
-- description: Apply gateway
-  do: runState
-  target: I@neutron:gateway
-  state: state.apply
-
-# // Pring information
-# salt.runSaltProcessStep(saltMaster, 'I@keystone:server', 'cmd.run', ['. /root/keystonerc; neutron net-list'], null, true)
-# salt.runSaltProcessStep(saltMaster, 'I@keystone:server', 'cmd.run', ['. /root/keystonerc; nova net-list'], null, true)
-
-- description: Show neutron networks via keystone node
-  do: runState
-  target: I@keystone:server
-  state: cmd.run
-  args: ['. /root/keystonerc; neutron net-list']
-
-- description: Show nova networks via keystone node
-  do: runState
-  target: I@keystone:server
-  state: cmd.run
-  args: ['. /root/keystonerc; nova net-list']
-
-
-################### Install OpenStack compute ##########################
-
-#  //orchestrate.installOpenstackMkCompute(saltMaster, physical)
-#  // Configure compute nodes
-#  retry(2) {
-#      salt.runSaltProcessStep(saltMaster, 'I@nova:compute', 'state.apply', [], null, true)
-#  }
-
-- description: Install Nova compute
-  do: runState
-  target: I@nova:compute
-  state: state.apply
-  retry: {count: 2, delay: 5}
diff --git a/tcp_tests/templates/virtual-mcp11-ovs.new/salt.yaml b/tcp_tests/templates/virtual-mcp11-ovs.new/salt.yaml
deleted file mode 100644
index 46c02a0..0000000
--- a/tcp_tests/templates/virtual-mcp11-ovs.new/salt.yaml
+++ /dev/null
@@ -1,44 +0,0 @@
-{% from 'virtual-mcp11-ovs/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'virtual-mcp11-ovs/underlay.yaml' import LAB_CONFIG_NAME with context %}
-{% from 'virtual-mcp11-ovs/underlay.yaml' import DOMAIN_NAME with context %}
-
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
-# Other salt model repository parameters see in shared-salt.yaml
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-{{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
-
-{{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
-
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "logrotate"') }}
-
-{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
-
-{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
-
-{{ SHARED.MACRO_GENERATE_INVENTORY() }}
-
-{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
-
-#- description: Hack gtw node
-#  cmd: salt 'gtw*' cmd.run "ip addr del {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.110/24 dev ens4; ip addr flush dev ens4";
-#  node_name: {{ HOSTNAME_CFG01 }}
-#  retry: {count: 1, delay: 10}
-#  skip_fail: false
-#
-#- description: Hack cmp01 node
-#  cmd: salt 'cmp01*' cmd.run "ip addr del {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.105/24 dev ens4; ip addr flush dev ens4";
-#  node_name: {{ HOSTNAME_CFG01 }}
-#  retry: {count: 1, delay: 10}
-#  skip_fail: false
-#
-#- description: Hack cmp02 node
-#  cmd: salt 'cmp02*' cmd.run "ip addr del {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.106/24 dev ens4; ip addr flush dev ens4";
-#  node_name: {{ HOSTNAME_CFG01 }}
-#  retry: {count: 1, delay: 10}
-#  skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp11-ovs.new/underlay--meta-data.yaml b/tcp_tests/templates/virtual-mcp11-ovs.new/underlay--meta-data.yaml
deleted file mode 100644
index 3699401..0000000
--- a/tcp_tests/templates/virtual-mcp11-ovs.new/underlay--meta-data.yaml
+++ /dev/null
@@ -1,4 +0,0 @@
-| # All the data below will be stored as a string object
-  instance-id: iid-local1
-  hostname: {hostname}
-  local-hostname: {hostname}
diff --git a/tcp_tests/templates/virtual-mcp11-ovs.new/underlay--user-data-cfg01.yaml b/tcp_tests/templates/virtual-mcp11-ovs.new/underlay--user-data-cfg01.yaml
deleted file mode 100644
index ecd79db..0000000
--- a/tcp_tests/templates/virtual-mcp11-ovs.new/underlay--user-data-cfg01.yaml
+++ /dev/null
@@ -1,83 +0,0 @@
-| # All the data below will be stored as a string object
-  #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
-  ssh_pwauth: True
-  users:
-   - name: root
-     sudo: ALL=(ALL) NOPASSWD:ALL
-     shell: /bin/bash
-     ssh_authorized_keys:
-     {% for key in config.underlay.ssh_keys %}
-      - ssh-rsa {{ key['public'] }}
-     {% endfor %}
-
-  disable_root: false
-  chpasswd:
-   list: |
-    root:r00tme
-   expire: False
-
-  bootcmd:
-   # Block access to SSH while node is preparing
-   - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
-   # Enable root access
-   - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
-   - service sshd restart
-  output:
-    all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
-  runcmd:
-   # Configure dhclient
-   - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
-   - sudo resolvconf -u
-
-   # Prepare network connection
-   - sudo ifup ens3
-   #- sudo route add default gw {gateway} {interface_name}
-
-   # Create swap
-   - fallocate -l 4G /swapfile
-   - chmod 600 /swapfile
-   - mkswap /swapfile
-   - swapon /swapfile
-   - echo "/swapfile   none    swap    defaults    0   0" >> /etc/fstab
-
-   ############## TCP Cloud cfg01 node ##################
-   #- sleep 120
-   - echo "Preparing base OS"
-
-   - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
-   - which wget >/dev/null || (apt-get update; apt-get install -y wget);
-
-   - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
-   - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
-   - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/{{ SALT_VERSION }} xenial main" > /etc/apt/sources.list.d/saltstack.list;
-   - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/{{ SALT_VERSION }}/SALTSTACK-GPG-KEY.pub | apt-key add -;
-
-   - apt-get clean
-   - apt-get update
-
-   # Install common packages
-   - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree
-
-   ########################################################
-   # Node is ready, allow SSH access
-   - echo "Allow SSH access ..."
-   - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
-   ########################################################
-
-  write_files:
-   - path: /etc/network/interfaces
-     content: |
-          auto ens3
-          iface ens3 inet dhcp
-
-   - path: /root/.ssh/config
-     owner: root:root
-     permissions: '0600'
-     content: |
-          Host *
-            ServerAliveInterval 300
-            ServerAliveCountMax 10
-            StrictHostKeyChecking no
-            UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/virtual-mcp11-ovs.new/underlay--user-data1604.yaml b/tcp_tests/templates/virtual-mcp11-ovs.new/underlay--user-data1604.yaml
deleted file mode 100644
index 29229d1..0000000
--- a/tcp_tests/templates/virtual-mcp11-ovs.new/underlay--user-data1604.yaml
+++ /dev/null
@@ -1,75 +0,0 @@
-| # All the data below will be stored as a string object
-  #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
-  ssh_pwauth: True
-  users:
-   - name: root
-     sudo: ALL=(ALL) NOPASSWD:ALL
-     shell: /bin/bash
-     ssh_authorized_keys:
-     {% for key in config.underlay.ssh_keys %}
-      - ssh-rsa {{ key['public'] }}
-     {% endfor %}
-
-  disable_root: false
-  chpasswd:
-   list: |
-    root:r00tme
-   expire: False
-
-  bootcmd:
-   # Block access to SSH while node is preparing
-   - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
-   # Enable root access
-   - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
-   - service sshd restart
-  output:
-    all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
-  runcmd:
-   - export TERM=linux
-   - export LANG=C
-   # Configure dhclient
-   - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
-   - sudo resolvconf -u
-
-   # Prepare network connection
-   - sudo ifup ens3
-   #- sudo route add default gw {gateway} {interface_name}
-
-   # Create swap
-   - fallocate -l 4G /swapfile
-   - chmod 600 /swapfile
-   - mkswap /swapfile
-   - swapon /swapfile
-   - echo "/swapfile   none    swap    defaults   0   0" >> /etc/fstab
-
-
-   ############## TCP Cloud cfg01 node ##################
-   #- sleep 120
-   - echo "Preparing base OS"
-   - which wget >/dev/null || (apt-get update; apt-get install -y wget)
- 
-   - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
-   - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
-   - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/{{ SALT_VERSION }} xenial main" > /etc/apt/sources.list.d/saltstack.list
-   - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/{{ SALT_VERSION }}/SALTSTACK-GPG-KEY.pub | apt-key add -
-
-   - apt-get clean
-   - eatmydata apt-get update && apt-get -y upgrade
-
-   # Install common packages
-   - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc
-
-   ########################################################
-   # Node is ready, allow SSH access
-   - echo "Allow SSH access ..."
-   - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
-   ########################################################
-
-  write_files:
-   - path: /etc/network/interfaces
-     content: |
-          auto ens3
-          iface ens3 inet dhcp
-
diff --git a/tcp_tests/templates/virtual-mcp11-ovs.new/underlay.yaml b/tcp_tests/templates/virtual-mcp11-ovs.new/underlay.yaml
deleted file mode 100644
index 40ea763..0000000
--- a/tcp_tests/templates/virtual-mcp11-ovs.new/underlay.yaml
+++ /dev/null
@@ -1,418 +0,0 @@
-# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
-{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
-
-{% import 'virtual-mcp11-ovs/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
-{% import 'virtual-mcp11-ovs/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
-{% import 'virtual-mcp11-ovs/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
-
----
-aliases:
- - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
- - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
- - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
- - &cloudinit_user_data_1604 {{ CLOUDINIT_USER_DATA_1604 }}
-
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'virtual-mcp11-ovs') %}
-{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME) + '.local' %}
-{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
-
-template:
-  devops_settings:
-    env_name: {{ os_env('ENV_NAME', 'virtual-mcp11-ovs_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
-
-    address_pools:
-      private-pool01:
-        net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.60.0.0/16:24') }}
-        params:
-          ip_reserved:
-            gateway: +1
-            l2_network_device: +1
-            default_{{ HOSTNAME_CFG01 }}: +100
-            default_{{ HOSTNAME_CTL01 }}: +101
-            default_{{ HOSTNAME_CTL02 }}: +102
-            default_{{ HOSTNAME_CTL03 }}: +103
-            default_{{ HOSTNAME_CMP01 }}: +105
-            default_{{ HOSTNAME_CMP02 }}: +106
-            default_{{ HOSTNAME_GTW01 }}: +110
-            default_{{ HOSTNAME_PRX01 }}: +121
-          ip_ranges:
-            dhcp: [+90, -10]
-
-      admin-pool01:
-        net: {{ os_env('ADMIN_ADDRESS_POOL01', '10.70.0.0/16:24') }}
-        params:
-          ip_reserved:
-            gateway: +1
-            l2_network_device: +1
-            default_{{ HOSTNAME_CFG01 }}: +90
-            default_{{ HOSTNAME_CTL01 }}: +101
-            default_{{ HOSTNAME_CTL02 }}: +102
-            default_{{ HOSTNAME_CTL03 }}: +103
-            default_{{ HOSTNAME_CMP01 }}: +105
-            default_{{ HOSTNAME_CMP02 }}: +106
-            default_{{ HOSTNAME_GTW01 }}: +110
-            default_{{ HOSTNAME_PRX01 }}: +121
-          ip_ranges:
-            dhcp: [+90, -10]
-
-      tenant-pool01:
-        net: {{ os_env('TENANT_ADDRESS_POOL01', '10.80.0.0/16:24') }}
-        params:
-          ip_reserved:
-            gateway: +1
-            l2_network_device: +1
-            default_{{ HOSTNAME_CFG01 }}: +100
-            default_{{ HOSTNAME_CTL01 }}: +101
-            default_{{ HOSTNAME_CTL02 }}: +102
-            default_{{ HOSTNAME_CTL03 }}: +103
-            default_{{ HOSTNAME_CMP01 }}: +105
-            default_{{ HOSTNAME_CMP02 }}: +106
-            default_{{ HOSTNAME_GTW01 }}: +110
-            default_{{ HOSTNAME_PRX01 }}: +121
-          ip_ranges:
-            dhcp: [+10, -10]
-
-      external-pool01:
-        net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '10.90.0.0/16:24') }}
-        params:
-          ip_reserved:
-            gateway: +1
-            l2_network_device: +1
-            default_{{ HOSTNAME_CFG01 }}: +100
-            default_{{ HOSTNAME_CTL01 }}: +101
-            default_{{ HOSTNAME_CTL02 }}: +102
-            default_{{ HOSTNAME_CTL03 }}: +103
-            default_{{ HOSTNAME_CMP01 }}: +105
-            default_{{ HOSTNAME_CMP02 }}: +106
-            default_{{ HOSTNAME_GTW01 }}: +110
-            default_{{ HOSTNAME_PRX01 }}: +121
-          ip_ranges:
-            dhcp: [+10, -10]
-
-
-    groups:
-      - name: default
-        driver:
-          name: devops.driver.libvirt
-          params:
-            connection_string: !os_env CONNECTION_STRING, qemu:///system
-            storage_pool_name: !os_env STORAGE_POOL_NAME, default
-            stp: False
-            hpet: False
-            enable_acpi: true
-            use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
-            use_hugepages: !os_env DRIVER_USE_HUGEPAGES, false
-
-        network_pools:
-          admin: admin-pool01
-          private: private-pool01
-          tenant: tenant-pool01
-          external: external-pool01
-
-        l2_network_devices:
-          private:
-            address_pool: private-pool01
-            dhcp: false
-            forward:
-              mode: route
-
-          admin:
-            address_pool: admin-pool01
-            dhcp: true
-            forward:
-              mode: nat
-
-          tenant:
-            address_pool: tenant-pool01
-            dhcp: false
-
-          external:
-            address_pool: external-pool01
-            dhcp: true
-            forward:
-              mode: nat
-
-
-        group_volumes:
-         - name: cloudimage1604    # This name is used for 'backing_store' option for node volumes.
-           source_image: !os_env IMAGE_PATH1604  # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img or
-                                             # http://apt.tcpcloud.eu/images/ubuntu-16-04-x64-201608231004.qcow2
-           format: qcow2
-
-        nodes:
-          - name: {{ HOSTNAME_CFG01 }}
-            role: salt_master
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: cloudimage1604
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_cfg01
-
-              interfaces:
-                - label: ens3
-                  l2_network_device: admin
-                  interface_model: *interface_model
-                - label: ens4
-                  l2_network_device: private
-                  interface_model: *interface_model
-              network_config:
-                ens3:
-                  networks:
-                    - admin
-                ens4:
-                  networks:
-                    - private
-
-          - name: {{ HOSTNAME_CTL01 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 8192
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: cloudimage1604
-                  format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: &interfaces
-                - label: ens3
-                  l2_network_device: admin
-                  interface_model: *interface_model
-                - label: ens4
-                  l2_network_device: private
-                  interface_model: *interface_model
-              network_config: &network_config
-                ens3:
-                  networks:
-                    - admin
-                ens4:
-                  networks:
-                    - private
-
-          - name: {{ HOSTNAME_CTL02 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 8192
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: cloudimage1604
-                  format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: *interfaces
-              network_config: *network_config
-
-          - name: {{ HOSTNAME_CTL03 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 8192
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: cloudimage1604
-                  format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: *interfaces
-              network_config: *network_config
-
-          - name: {{ HOSTNAME_PRX01 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 1
-              memory: !os_env SLAVE_NODE_MEMORY, 8192
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: cloudimage1604
-                  format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: *interfaces
-              network_config: *network_config
-
-
-          - name: {{ HOSTNAME_CMP01 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 3
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: cloudimage1604
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-
-              interfaces: &all_interfaces
-                - label: ens3
-                  l2_network_device: admin
-                  interface_model: *interface_model
-                - label: ens4
-                  l2_network_device: private
-                  interface_model: *interface_model
-                - label: ens5
-                  l2_network_device: tenant
-                  interface_model: *interface_model
-                - label: ens6
-                  l2_network_device: external
-                  interface_model: *interface_model
-              network_config: &all_network_config
-                ens3:
-                  networks:
-                    - admin
-                ens4:
-                  networks:
-                    - private
-                ens5:
-                  networks:
-                    - tenant
-                ens6:
-                  networks:
-                    - external
-
-          - name: {{ HOSTNAME_CMP02 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 3
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: cloudimage1604
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: *all_interfaces
-              network_config: *all_network_config
-
-          - name: {{ HOSTNAME_GTW01 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 1
-              memory: !os_env SLAVE_NODE_MEMORY, 2048
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: cloudimage1604
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: *all_interfaces
-              network_config: *all_network_config
diff --git a/tcp_tests/templates/virtual-mcp11-ovs/core.yaml b/tcp_tests/templates/virtual-mcp11-ovs/core.yaml
deleted file mode 100644
index 7daf069..0000000
--- a/tcp_tests/templates/virtual-mcp11-ovs/core.yaml
+++ /dev/null
@@ -1,125 +0,0 @@
-{% from 'virtual-mcp11-ovs/underlay.yaml' import HOSTNAME_CFG01 with context %}
-
-# Install support services
-- description: sync time
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run
-    'service ntp stop; ntpd -gq;  service ntp start'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: true
-
-- description: Install keepalived on ctl01
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keepalived:cluster and *01*' state.sls keepalived
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: true
-
-- description: Install keepalived
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keepalived:cluster' state.sls keepalived
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: true
-
-- description: Check the VIP
-  cmd: |
-    OPENSTACK_CONTROL_ADDRESS=`salt-call --out=newline_values_only pillar.get _param:openstack_control_address`;
-    echo "_param:openstack_control_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
-    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-
-- description: Install glusterfs
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@glusterfs:server' state.sls glusterfs.server.service
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Setup glusterfs on primary controller
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 20}
-  skip_fail: false
-
-- description: Check the gluster status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install RabbitMQ on ctl01
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install RabbitMQ
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@rabbitmq:server' state.sls rabbitmq
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check the rabbitmq status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install Galera on first server
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:master' state.sls galera
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install Galera on other servers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:slave' state.sls galera -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check mysql status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: true
-
-
-- description: Install haproxy
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' state.sls haproxy
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check haproxy status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' service.status haproxy
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Restart rsyslog
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' service.restart rsyslog
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install memcached on all controllers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@memcached:server' state.sls memcached
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp11-ovs/openstack.yaml b/tcp_tests/templates/virtual-mcp11-ovs/openstack.yaml
deleted file mode 100644
index f9dca1d..0000000
--- a/tcp_tests/templates/virtual-mcp11-ovs/openstack.yaml
+++ /dev/null
@@ -1,360 +0,0 @@
-{% from 'virtual-mcp11-ovs/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'virtual-mcp11-ovs/underlay.yaml' import HOSTNAME_CTL01 with context %}
-{% from 'virtual-mcp11-ovs/underlay.yaml' import HOSTNAME_CTL02 with context %}
-{% from 'virtual-mcp11-ovs/underlay.yaml' import HOSTNAME_CTL03 with context %}
-{% from 'virtual-mcp11-ovs/underlay.yaml' import HOSTNAME_GTW01 with context %}
-{% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
-{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
-
-# Install OpenStack control services
-
-- description: Install glance on all controllers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-     -C 'I@glance:server' state.sls glance -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install keystone service (note that different fernet keys are created on different nodes)
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keystone:server' state.sls keystone.server -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 15}
-  skip_fail: false
-
-- description: Restart apache due to PROD-10477
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' cmd.run "systemctl restart apache2"
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 15}
-  skip_fail: false
-
-- description: Check apache status to PROD-10477
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' cmd.run "systemctl status apache2"
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 15}
-  skip_fail: false
-
-- description: Mount glusterfs.client volumes (resuires created 'keystone' and 'glusterfs' system users)
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@glance:server' state.sls glusterfs.client
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Update fernet keys for keystone server on the mounted glusterfs volume
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keystone:server' state.sls keystone.server -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Populate keystone services/tenants/admins
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keystone:client' state.sls keystone.client
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check keystone service-list
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keystone:server' cmd.run '. /root/keystonercv3; openstack service list'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check glance image-list
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keystone:server' cmd.run '. /root/keystonerc; glance image-list'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-
-- description: Install nova on all controllers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@nova:controller' state.sls nova -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 5}
-  skip_fail: false
-
-- description: Check nova service-list
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keystone:server' cmd.run '. /root/keystonerc; nova service-list'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-
-- description: Install cinder
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@cinder:controller' state.sls cinder -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check cinder list
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keystone:server' cmd.run '. /root/keystonerc; cinder list'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-
-- description: Install neutron service
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@neutron:server' state.sls neutron -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install neutron on gtw node
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@neutron:gateway' state.sls neutron
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-
-- description: Check neutron agent-list
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keystone:server' cmd.run '. /root/keystonerc; neutron agent-list'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-
-- description: Install heat service
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@heat:server' state.sls heat -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check heat service
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keystone:server' cmd.run '. /root/keystonercv3; openstack orchestration resource type list'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 5, delay: 10}
-  skip_fail: false
-
-
-- description: Deploy horizon dashboard
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@horizon:server' state.sls horizon
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: true
-
-- description: Deploy nginx proxy
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@nginx:server' state.sls nginx
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: true
-
-
-# Install compute node
-
-- description: Apply formulas for compute node
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: true
-
-- description: Re-apply(as in doc) formulas for compute node
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check IP on computes
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
-    'ip a'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 10, delay: 30}
-  skip_fail: false
-
-
-  # Upload cirros image
-
-- description: Upload cirros image on ctl01
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-    'wget http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-i386-disk.img'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 30}
-  skip_fail: false
-
-- description: Register image in glance
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-    '. /root/keystonercv3; glance --timeout 120 image-create --name cirros --visibility public --disk-format qcow2 --container-format bare --progress < /root/cirros-0.3.4-i386-disk.img'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Create net04_external
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-    '. /root/keystonercv3; neutron net-create net04_ext --router:external True --provider:physical_network physnet1 --provider:network_type flat'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Create subnet_external
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-    '. /root/keystonercv3; neutron subnet-create net04_ext {{ IPV4_NET_EXTERNAL_PREFIX }}.0/24 --name net04_ext__subnet --disable-dhcp --allocation-pool start={{ IPV4_NET_EXTERNAL_PREFIX }}.150,end={{ IPV4_NET_EXTERNAL_PREFIX }}.180 --gateway {{ IPV4_NET_EXTERNAL_PREFIX }}.1'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Create net04
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-    '. /root/keystonercv3; neutron net-create net04'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Create subnet_net04
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-    '. /root/keystonercv3; neutron subnet-create net04 {{ IPV4_NET_TENANT_PREFIX }}.0/24 --name net04__subnet --allocation-pool start={{ IPV4_NET_TENANT_PREFIX }}.120,end={{ IPV4_NET_TENANT_PREFIX }}.240'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Create router
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-    '. /root/keystonercv3; neutron router-create net04_router01'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Set geteway
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-    '. /root/keystonercv3; neutron router-gateway-set net04_router01 net04_ext'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description:  Add interface
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-    '. /root/keystonercv3; neutron router-interface-add net04_router01 net04__subnet'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description:  Allow all tcp
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-    '. /root/keystonercv3; nova secgroup-add-rule default tcp 1 65535 0.0.0.0/0'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description:  Allow all icmp
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-    '. /root/keystonercv3; nova secgroup-add-rule default icmp -1 -1 0.0.0.0/0'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: sync time
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run
-    'service ntp stop; ntpd -gq;  service ntp start'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: true
-
-# Configure cinder-volume salt-call PROD-13167
-- description: Set disks 01
-  cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
-  node_name: {{ HOSTNAME_CTL01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Set disks 02
-  cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
-  node_name: {{ HOSTNAME_CTL02 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Set disks 03
-  cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
-  node_name: {{ HOSTNAME_CTL03 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Create partitions 01
-  cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
-  node_name: {{ HOSTNAME_CTL01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Create partitions 02
-  cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
-  node_name: {{ HOSTNAME_CTL02 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Create partitions 03
-  cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
-  node_name: {{ HOSTNAME_CTL03 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: create volume_group
-  cmd: salt "ctl*" cmd.run 'vgcreate cinder-volumes /dev/vdb1'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Install cinder-volume
-  cmd: salt 'ctl*' cmd.run 'apt-get install cinder-volume -y'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Install crudini
-  cmd: salt "ctl*" cmd.run 'apt-get install crudini -y'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Temporary WR set enabled backends value 01
-  cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
-  node_name: {{ HOSTNAME_CTL01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Temporary WR set enabled backends value 02
-  cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
-  node_name: {{ HOSTNAME_CTL02 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Temporary WR set enabled backends value 03
-  cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
-  node_name: {{ HOSTNAME_CTL03 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Restart cinder volume
-  cmd: |
-    salt -C 'I@cinder:controller' service.restart cinder-volume;
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 5}
-  skip_fail: false
-
-- description: Install docker.io on gtw
-  cmd: salt-call cmd.run 'apt-get install docker.io -y'
-  node_name: {{ HOSTNAME_GTW01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: create rc file on cfg
-  cmd: scp ctl01:/root/keystonercv3 /root
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Copy rc file
-  cmd: scp /root/keystonercv3 gtw01:/root
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp11-ovs/salt.yaml b/tcp_tests/templates/virtual-mcp11-ovs/salt.yaml
deleted file mode 100644
index cfa0272..0000000
--- a/tcp_tests/templates/virtual-mcp11-ovs/salt.yaml
+++ /dev/null
@@ -1,51 +0,0 @@
-{% from 'virtual-mcp11-ovs/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'virtual-mcp11-ovs/underlay.yaml' import LAB_CONFIG_NAME with context %}
-{% from 'virtual-mcp11-ovs/underlay.yaml' import DOMAIN_NAME with context %}
-
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
-# Other salt model repository parameters see in shared-salt.yaml
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-{{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
-
-{{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
-
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "fluentd" "logrotate"') }}
-
-{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
-
-{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
-
-{{ SHARED.MACRO_GENERATE_INVENTORY() }}
-
-{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
-
-#- description: WR run linux state to fix hosts
-#  cmd: salt "cfg*" state.sls linux
-#  node_name: {{ HOSTNAME_CFG01 }}
-#  retry: {count: 1, delay: 10}
-#  skip_fail: true
-
-#- description: Hack gtw node
-#  cmd: salt 'gtw*' cmd.run "ip addr del {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.94/24 dev ens4; ip addr flush dev ens4";
-#  node_name: {{ HOSTNAME_CFG01 }}
-#  retry: {count: 1, delay: 10}
-#  skip_fail: false
-#
-#- description: Hack cmp01 node
-#  cmd: salt 'cmp01*' cmd.run "ip addr del {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.95/24 dev ens4; ip addr flush dev ens4";
-#  node_name: {{ HOSTNAME_CFG01 }}
-#  retry: {count: 1, delay: 10}
-#  skip_fail: false
-#
-#- description: Hack cmp02 node
-#  cmd: salt 'cmp02*' cmd.run "ip addr del {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.96/24 dev ens4; ip addr flush dev ens4";
-#  node_name: {{ HOSTNAME_CFG01 }}
-#  retry: {count: 1, delay: 10}
-#  skip_fail: false
-#
diff --git a/tcp_tests/templates/virtual-mcp11-ovs/underlay--meta-data.yaml b/tcp_tests/templates/virtual-mcp11-ovs/underlay--meta-data.yaml
deleted file mode 100644
index 3699401..0000000
--- a/tcp_tests/templates/virtual-mcp11-ovs/underlay--meta-data.yaml
+++ /dev/null
@@ -1,4 +0,0 @@
-| # All the data below will be stored as a string object
-  instance-id: iid-local1
-  hostname: {hostname}
-  local-hostname: {hostname}
diff --git a/tcp_tests/templates/virtual-mcp11-ovs/underlay--user-data-cfg01.yaml b/tcp_tests/templates/virtual-mcp11-ovs/underlay--user-data-cfg01.yaml
deleted file mode 100644
index 600f6fb..0000000
--- a/tcp_tests/templates/virtual-mcp11-ovs/underlay--user-data-cfg01.yaml
+++ /dev/null
@@ -1,90 +0,0 @@
-| # All the data below will be stored as a string object
-  #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
-  ssh_pwauth: True
-  users:
-   - name: root
-     sudo: ALL=(ALL) NOPASSWD:ALL
-     shell: /bin/bash
-     ssh_authorized_keys:
-     {% for key in config.underlay.ssh_keys %}
-      - ssh-rsa {{ key['public'] }}
-     {% endfor %}
-
-  disable_root: false
-  chpasswd:
-   list: |
-    root:r00tme
-   expire: False
-
-  bootcmd:
-   # Block access to SSH while node is preparing
-   - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
-   # Enable root access
-   - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
-   - service sshd restart
-  output:
-    all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
-  runcmd:
-   # Configure dhclient
-   - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
-   - sudo resolvconf -u
-
-   # Prepare network connection
-   - sudo ifdown ens3
-   - sudo ip r d default || true  # remove existing default route to get it from dhcp
-   - sudo ifup ens3
-   #- sudo route add default gw {gateway} {interface_name}
-
-   # Create swap
-   - fallocate -l 4G /swapfile
-   - chmod 600 /swapfile
-   - mkswap /swapfile
-   - swapon /swapfile
-   - echo "/swapfile   none    swap    defaults    0   0" >> /etc/fstab
-
-   ############## TCP Cloud cfg01 node ##################
-   #- sleep 120
-   - echo "Preparing base OS"
-
-   - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
-   - which wget >/dev/null || (apt-get update; apt-get install -y wget);
-
-   # Configure Ubuntu mirrors
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
-
-   - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
-   - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
-   - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/{{ SALT_VERSION }} xenial main" > /etc/apt/sources.list.d/saltstack.list;
-   - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/{{ SALT_VERSION }}/SALTSTACK-GPG-KEY.pub | apt-key add -;
-
-   - apt-get clean
-   - apt-get update
-
-   # Install common packages
-   - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree
-
-   ########################################################
-   # Node is ready, allow SSH access
-   - echo "Allow SSH access ..."
-   - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
-   ########################################################
-
-  write_files:
-   - path: /etc/network/interfaces
-     content: |
-          auto ens3
-          iface ens3 inet dhcp
-
-   - path: /root/.ssh/config
-     owner: root:root
-     permissions: '0600'
-     content: |
-          Host *
-            ServerAliveInterval 300
-            ServerAliveCountMax 10
-            StrictHostKeyChecking no
-            UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/virtual-mcp11-ovs/underlay--user-data1604.yaml b/tcp_tests/templates/virtual-mcp11-ovs/underlay--user-data1604.yaml
deleted file mode 100644
index df91bee..0000000
--- a/tcp_tests/templates/virtual-mcp11-ovs/underlay--user-data1604.yaml
+++ /dev/null
@@ -1,80 +0,0 @@
-| # All the data below will be stored as a string object
-  #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
-  ssh_pwauth: True
-  users:
-   - name: root
-     sudo: ALL=(ALL) NOPASSWD:ALL
-     shell: /bin/bash
-     ssh_authorized_keys:
-     {% for key in config.underlay.ssh_keys %}
-      - ssh-rsa {{ key['public'] }}
-     {% endfor %}
-
-  disable_root: false
-  chpasswd:
-   list: |
-    root:r00tme
-   expire: False
-
-  bootcmd:
-   # Block access to SSH while node is preparing
-   - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
-   # Enable root access
-   - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
-   - service sshd restart
-  output:
-    all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
-  runcmd:
-   - export TERM=linux
-   - export LANG=C
-   # Configure dhclient
-   - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
-   - sudo resolvconf -u
-
-   # Prepare network connection
-   - sudo ifup ens3
-   #- sudo route add default gw {gateway} {interface_name}
-
-   # Create swap
-   - fallocate -l 4G /swapfile
-   - chmod 600 /swapfile
-   - mkswap /swapfile
-   - swapon /swapfile
-   - echo "/swapfile   none    swap    defaults   0   0" >> /etc/fstab
-
-
-   ############## TCP Cloud cfg01 node ##################
-   #- sleep 120
-   - echo "Preparing base OS"
-   - which wget >/dev/null || (apt-get update; apt-get install -y wget)
- 
-   # Configure Ubuntu mirrors
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
-
-   - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
-   - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
-   - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/{{ SALT_VERSION }} xenial main" > /etc/apt/sources.list.d/saltstack.list
-   - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/{{ SALT_VERSION }}/SALTSTACK-GPG-KEY.pub | apt-key add -
-
-   - apt-get clean
-   - eatmydata apt-get update && apt-get -y upgrade
-
-   # Install common packages
-   - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc
-
-   ########################################################
-   # Node is ready, allow SSH access
-   - echo "Allow SSH access ..."
-   - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
-   ########################################################
-
-  write_files:
-   - path: /etc/network/interfaces
-     content: |
-          auto ens3
-          iface ens3 inet dhcp
-
diff --git a/tcp_tests/templates/virtual-mcp11-ovs/underlay.yaml b/tcp_tests/templates/virtual-mcp11-ovs/underlay.yaml
deleted file mode 100644
index bd52ae0..0000000
--- a/tcp_tests/templates/virtual-mcp11-ovs/underlay.yaml
+++ /dev/null
@@ -1,421 +0,0 @@
-# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
-{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
-
-{% import 'virtual-mcp11-ovs/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
-{% import 'virtual-mcp11-ovs/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
-{% import 'virtual-mcp11-ovs/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
-
----
-aliases:
- - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
- - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
- - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
- - &cloudinit_user_data_1604 {{ CLOUDINIT_USER_DATA_1604 }}
-
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'virtual-mcp11-ovs') %}
-{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME) + '.local' %}
-{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
-
-template:
-  devops_settings:
-    env_name: {{ os_env('ENV_NAME', 'virtual-mcp11-ovs_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
-
-    address_pools:
-      private-pool01:
-        net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.60.0.0/16:24') }}
-        params:
-          ip_reserved:
-            gateway: +1
-            l2_network_device: +1
-            default_{{ HOSTNAME_CFG01 }}: +90
-            default_{{ HOSTNAME_CTL01 }}: +91
-            default_{{ HOSTNAME_CTL02 }}: +92
-            default_{{ HOSTNAME_CTL03 }}: +93
-            default_{{ HOSTNAME_CMP01 }}: +95
-            default_{{ HOSTNAME_CMP02 }}: +96
-            default_{{ HOSTNAME_GTW01 }}: +94
-            default_{{ HOSTNAME_PRX01 }}: +121
-          ip_ranges:
-            dhcp: [+90, -10]
-
-      admin-pool01:
-        net: {{ os_env('ADMIN_ADDRESS_POOL01', '10.70.0.0/16:24') }}
-        params:
-          ip_reserved:
-            gateway: +1
-            l2_network_device: +1
-            default_{{ HOSTNAME_CFG01 }}: +90
-            default_{{ HOSTNAME_CTL01 }}: +91
-            default_{{ HOSTNAME_CTL02 }}: +92
-            default_{{ HOSTNAME_CTL03 }}: +93
-            default_{{ HOSTNAME_CMP01 }}: +95
-            default_{{ HOSTNAME_CMP02 }}: +96
-            default_{{ HOSTNAME_GTW01 }}: +94
-            default_{{ HOSTNAME_PRX01 }}: +121
-          ip_ranges:
-            dhcp: [+90, -10]
-
-      tenant-pool01:
-        net: {{ os_env('TENANT_ADDRESS_POOL01', '10.80.0.0/16:24') }}
-        params:
-          ip_reserved:
-            gateway: +1
-            l2_network_device: +1
-            default_{{ HOSTNAME_CFG01 }}: +100
-            default_{{ HOSTNAME_CTL01 }}: +101
-            default_{{ HOSTNAME_CTL02 }}: +102
-            default_{{ HOSTNAME_CTL03 }}: +103
-            default_{{ HOSTNAME_CMP01 }}: +105
-            default_{{ HOSTNAME_CMP02 }}: +106
-            default_{{ HOSTNAME_GTW01 }}: +110
-            default_{{ HOSTNAME_PRX01 }}: +121
-          ip_ranges:
-            dhcp: [+10, -10]
-
-      external-pool01:
-        net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '10.90.0.0/16:24') }}
-        params:
-          ip_reserved:
-            gateway: +1
-            l2_network_device: +1
-            default_{{ HOSTNAME_CFG01 }}: +100
-            default_{{ HOSTNAME_CTL01 }}: +101
-            default_{{ HOSTNAME_CTL02 }}: +102
-            default_{{ HOSTNAME_CTL03 }}: +103
-            default_{{ HOSTNAME_CMP01 }}: +105
-            default_{{ HOSTNAME_CMP02 }}: +106
-            default_{{ HOSTNAME_GTW01 }}: +110
-            default_{{ HOSTNAME_PRX01 }}: +121
-          ip_ranges:
-            dhcp: [+10, -10]
-
-
-    groups:
-      - name: default
-        driver:
-          name: devops.driver.libvirt
-          params:
-            connection_string: !os_env CONNECTION_STRING, qemu:///system
-            storage_pool_name: !os_env STORAGE_POOL_NAME, default
-            stp: False
-            hpet: False
-            enable_acpi: true
-            use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
-            use_hugepages: !os_env DRIVER_USE_HUGEPAGES, false
-
-        network_pools:
-          admin: admin-pool01
-          private: private-pool01
-          tenant: tenant-pool01
-          external: external-pool01
-
-        l2_network_devices:
-          private:
-            address_pool: private-pool01
-            dhcp: false
-            forward:
-              mode: route
-
-          admin:
-            address_pool: admin-pool01
-            dhcp: true
-            forward:
-              mode: nat
-
-          tenant:
-            address_pool: tenant-pool01
-            dhcp: false
-
-          external:
-            address_pool: external-pool01
-            dhcp: true
-            forward:
-              mode: nat
-
-
-        group_volumes:
-         - name: cloudimage1604    # This name is used for 'backing_store' option for node volumes.
-           source_image: !os_env IMAGE_PATH1604  # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
-           format: qcow2
-
-         - name: cfg01_day01_image               # Pre-configured day01 image
-           source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01', os_env('IMAGE_PATH1604')) }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
-           format: qcow2
-
-        nodes:
-          - name: {{ HOSTNAME_CFG01 }}
-            role: salt_master
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: cfg01_day01_image
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_cfg01
-
-              interfaces:
-                - label: ens3
-                  l2_network_device: admin
-                  interface_model: *interface_model
-                - label: ens4
-                  l2_network_device: private
-                  interface_model: *interface_model
-              network_config:
-                ens3:
-                  networks:
-                    - admin
-                ens4:
-                  networks:
-                    - private
-
-          - name: {{ HOSTNAME_CTL01 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 3
-              memory: !os_env SLAVE_NODE_MEMORY, 12288
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: cloudimage1604
-                  format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: &interfaces
-                - label: ens3
-                  l2_network_device: admin
-                  interface_model: *interface_model
-                - label: ens4
-                  l2_network_device: private
-                  interface_model: *interface_model
-              network_config: &network_config
-                ens3:
-                  networks:
-                    - admin
-                ens4:
-                  networks:
-                    - private
-
-          - name: {{ HOSTNAME_CTL02 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 3
-              memory: !os_env SLAVE_NODE_MEMORY, 12288
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: cloudimage1604
-                  format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: *interfaces
-              network_config: *network_config
-
-          - name: {{ HOSTNAME_CTL03 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 3
-              memory: !os_env SLAVE_NODE_MEMORY, 12288
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: cloudimage1604
-                  format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: *interfaces
-              network_config: *network_config
-
-          - name: {{ HOSTNAME_PRX01 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 1
-              memory: !os_env SLAVE_NODE_MEMORY, 8192
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: cloudimage1604
-                  format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: *interfaces
-              network_config: *network_config
-
-
-          - name: {{ HOSTNAME_CMP01 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 3
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: cloudimage1604
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-
-              interfaces: &all_interfaces
-                - label: ens3
-                  l2_network_device: admin
-                  interface_model: *interface_model
-                - label: ens4
-                  l2_network_device: private
-                  interface_model: *interface_model
-                - label: ens5
-                  l2_network_device: tenant
-                  interface_model: *interface_model
-                - label: ens6
-                  l2_network_device: external
-                  interface_model: *interface_model
-              network_config: &all_network_config
-                ens3:
-                  networks:
-                    - admin
-                ens4:
-                  networks:
-                    - private
-                ens5:
-                  networks:
-                    - tenant
-                ens6:
-                  networks:
-                    - external
-
-          - name: {{ HOSTNAME_CMP02 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 3
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: cloudimage1604
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: *all_interfaces
-              network_config: *all_network_config
-
-          - name: {{ HOSTNAME_GTW01 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 1
-              memory: !os_env SLAVE_NODE_MEMORY, 2048
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: cloudimage1604
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: *all_interfaces
-              network_config: *all_network_config