Merge "Remove old templates (mk24/physical mcp11)"
diff --git a/tcp_tests/templates/mk24_lab_ovs_dvr_vlan_bm/common-services.yaml b/tcp_tests/templates/mk24_lab_ovs_dvr_vlan_bm/common-services.yaml
deleted file mode 100644
index fbf3a06..0000000
--- a/tcp_tests/templates/mk24_lab_ovs_dvr_vlan_bm/common-services.yaml
+++ /dev/null
@@ -1,118 +0,0 @@
-{% from 'mk24_lab_ovs_dvr_vlan_bm/underlay.yaml' import HOSTNAME_CFG01 with context %}
-
-# Install support services
-- description: Install keepalived on ctl01
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keepalived:cluster and *01*' state.sls keepalived
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: true
-
-- description: Install keepalived
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keepalived:cluster' state.sls keepalived
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: true
-
-- description: Check the VIP
-  cmd: |
-    OPENSTACK_CONTROL_ADDRESS=`salt-call --out=newline_values_only pillar.get _param:openstack_control_address`;
-    echo "_param:openstack_control_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
-    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-
-- description: Install glusterfs
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@glusterfs:server' state.sls glusterfs.server.service
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Setup glusterfs on primary controller
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 5}
-  skip_fail: false
-
-- description: Check the gluster status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install RabbitMQ on ctl01
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install RabbitMQ
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@rabbitmq:server' state.sls rabbitmq
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check the rabbitmq status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install Galera on first server
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:master' state.sls galera
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install Galera on other servers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:slave' state.sls galera -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check mysql status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: true
-
-
-- description: Install haproxy
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' state.sls haproxy
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check haproxy status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' service.status haproxy
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Restart rsyslog
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' service.restart rsyslog
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install memcached on all controllers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@memcached:server' state.sls memcached
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
diff --git a/tcp_tests/templates/mk24_lab_ovs_dvr_vlan_bm/openstack.yaml b/tcp_tests/templates/mk24_lab_ovs_dvr_vlan_bm/openstack.yaml
deleted file mode 100644
index 441b300..0000000
--- a/tcp_tests/templates/mk24_lab_ovs_dvr_vlan_bm/openstack.yaml
+++ /dev/null
@@ -1,169 +0,0 @@
-{% from 'mk24_lab_ovs_dvr_vlan_bm/underlay.yaml' import HOSTNAME_CFG01 with context %}
-
-# Install OpenStack control services
-
-- description: Install glance on all controllers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-     -C 'I@glance:server' state.sls glance -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install keystone service (note that different fernet keys are created on different nodes)
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keystone:server' state.sls keystone.server -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 15}
-  skip_fail: false
-
-- description: Restart apache due to PROD-10477
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' cmd.run "systemctl restart apache2"
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 15}
-  skip_fail: false
-
-- description: Check apache status to PROD-10477
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' cmd.run "systemctl status apache2"
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 15}
-  skip_fail: false
-
-- description: Mount glusterfs.client volumes (resuires created 'keystone' and 'glusterfs' system users)
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@glance:server' state.sls glusterfs.client
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Update fernet keys for keystone server on the mounted glusterfs volume
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keystone:server' state.sls keystone.server -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Populate keystone services/tenants/admins
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keystone:client' state.sls keystone.client
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check keystone service-list
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keystone:server' cmd.run '. /root/keystonercv3; openstack service list'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check glance image-list
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keystone:server' cmd.run '. /root/keystonerc; glance image-list'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-
-- description: Install nova on all controllers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@nova:controller' state.sls nova -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 5}
-  skip_fail: false
-
-- description: Check nova service-list
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keystone:server' cmd.run '. /root/keystonerc; nova service-list'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-
-- description: Install cinder
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@cinder:controller' state.sls cinder -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check cinder list
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keystone:server' cmd.run '. /root/keystonerc; cinder list'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-
-- description: Install neutron service
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@neutron:server' state.sls neutron -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install neutron on gtw node
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@neutron:gateway' state.sls neutron
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-
-- description: Check neutron agent-list
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keystone:server' cmd.run '. /root/keystonerc; neutron agent-list'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-
-- description: Install heat service
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@heat:server' state.sls heat -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check heat service
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keystone:server' cmd.run '. /root/keystonerc; heat resource-type-list'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-
-- description: Deploy horizon dashboard
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@horizon:server' state.sls horizon
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: true
-
-- description: Deploy nginx proxy
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@nginx:server' state.sls nginx
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: true
-
-
-# Install compute node
-
-- description: Apply formulas for compute node
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: true
-
-- description: Re-apply(as in doc) formulas for compute node
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check IP on computes
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
-    'ip a'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 10, delay: 30}
-  skip_fail: false
diff --git a/tcp_tests/templates/mk24_lab_ovs_dvr_vlan_bm/salt.yaml b/tcp_tests/templates/mk24_lab_ovs_dvr_vlan_bm/salt.yaml
deleted file mode 100644
index 3dbefb8..0000000
--- a/tcp_tests/templates/mk24_lab_ovs_dvr_vlan_bm/salt.yaml
+++ /dev/null
@@ -1,109 +0,0 @@
-{% from 'mk24_lab_ovs_dvr_vlan_bm/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'mk24_lab_ovs_dvr_vlan_bm/underlay.yaml' import LAB_CONFIG_NAME with context %}
-{% from 'mk24_lab_ovs_dvr_vlan_bm/underlay.yaml' import DOMAIN_NAME with context %}
-
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/qa') %}
-# Other salt model repository parameters see in shared-salt.yaml
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-{{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
-
-{{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
-
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch"') }}
-
-{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
-
-{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
-
-{{ SHARED.MACRO_GENERATE_INVENTORY() }}
-
-########################################
-# Spin up Control Plane VMs on KVM nodes
-########################################
-
-- description: '*Workaround 1/2* of the bug PROD-9576 to get bond0-connectivity *without* rebooting nodes'
-  cmd: salt-call --hard-crash --state-output=mixed --state-verbose=False cmd.run
-    "mkdir -p /tmp/PROD-9576; cd /tmp/PROD-9576; git clone https://gerrit.mcp.mirantis.net/salt-formulas/linux; cd linux;
-    git fetch https://gerrit.mcp.mirantis.net/salt-formulas/linux refs/changes/54/2354/16 && git checkout FETCH_HEAD;
-    cp -f linux/network/interface.sls /srv/salt/env/prd/linux/network/;
-    cp -f linux/map.jinja /srv/salt/env/prd/linux/;"
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: '*Workaround 2/2* of the bug PROD-9576 to get bond0-connectivity on cfg01 *without* reboot'
-  cmd: cat /etc/network/interfaces | grep bond-slaves | awk '{print $2}' | xargs -I {} ifenslave bond0 {}
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Refresh pillars for present baremetal nodes
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.refresh_pillar
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Sync all salt resources for present baremetal nodes
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.sync_all && sleep 5
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Configure linux for present baremetal nodes
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C '* and not
-    cfg01*' state.sls linux
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 3, delay: 5}
-  skip_fail: false
-
-- description: ovs-dvr-vlan model specific Execute 'libvirt' states to create necessary libvirt networks
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'kvm*' state.sls libvirt
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-- description: Create VMs for control plane
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'kvm*' state.sls salt.control
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 3, delay: 10}
-  skip_fail: false
-
-- description: '*Workaround* for waiting the control-plane VMs in the salt-key (instead of sleep)'
-  cmd: |
-    salt-key -l acc| sort > /tmp/current_keys.txt &&
-    salt 'kvm*' cmd.run 'virsh list --name' | grep -v 'kvm'|sort|xargs -I {} fgrep {} /tmp/current_keys.txt
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 20, delay: 30}
-  skip_fail: false
-
-#########################################
-# Configure all running salt minion nodes
-#########################################
-
-- description: Refresh pillars on all minions
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.refresh_pillar
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Sync all salt resources
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.sync_all && sleep 5
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Show  reclass-salt --top for generated nodes
-  cmd: reclass-salt --top -u /srv/salt/reclass/nodes/_generated/
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-#- description: Execute salt.minion.cert
-#  cmd: salt-call --no-color state.sls salt.minion.cert -l info;
-#  node_name: {{ HOSTNAME_CFG01 }}
-#  retry: {count: 1, delay: 5}
-#  skip_fail: false
-
-{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
diff --git a/tcp_tests/templates/mk24_lab_ovs_dvr_vlan_bm/underlay--meta-data.yaml b/tcp_tests/templates/mk24_lab_ovs_dvr_vlan_bm/underlay--meta-data.yaml
deleted file mode 100644
index 3699401..0000000
--- a/tcp_tests/templates/mk24_lab_ovs_dvr_vlan_bm/underlay--meta-data.yaml
+++ /dev/null
@@ -1,4 +0,0 @@
-| # All the data below will be stored as a string object
-  instance-id: iid-local1
-  hostname: {hostname}
-  local-hostname: {hostname}
diff --git a/tcp_tests/templates/mk24_lab_ovs_dvr_vlan_bm/underlay--user-data-cfg01.yaml b/tcp_tests/templates/mk24_lab_ovs_dvr_vlan_bm/underlay--user-data-cfg01.yaml
deleted file mode 100644
index e386337..0000000
--- a/tcp_tests/templates/mk24_lab_ovs_dvr_vlan_bm/underlay--user-data-cfg01.yaml
+++ /dev/null
@@ -1,86 +0,0 @@
-| # All the data below will be stored as a string object
-  #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
-  ssh_pwauth: True
-  users:
-   - name: root
-     sudo: ALL=(ALL) NOPASSWD:ALL
-     shell: /bin/bash
-     ssh_authorized_keys:
-     {% for key in config.underlay.ssh_keys %}
-      - ssh-rsa {{ key['public'] }}
-     {% endfor %}
-
-  disable_root: false
-  chpasswd:
-   list: |
-    root:r00tme
-   expire: False
-
-  bootcmd:
-   # Block access to SSH while node is preparing
-   - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
-   # Enable root access
-   - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
-   - service sshd restart
-  output:
-    all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
-  runcmd:
-   # Configure dhclient
-   - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
-   - sudo resolvconf -u
-
-   # Prepare network connection
-   - sudo ifup enp8s0f0
-   #- sudo route add default gw {gateway} {interface_name}
-   - sudo ifup enp8s0f1
-
-   # Create swap
-   - fallocate -l 4G /swapfile
-   - chmod 600 /swapfile
-   - mkswap /swapfile
-   - swapon /swapfile
-   - echo "/swapfile   none    swap    defaults    0   0" >> /etc/fstab
-
-   ############## TCP Cloud cfg01 node ##################
-   #- sleep 120
-   - echo "Preparing base OS"
-
-   - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
-   - which wget >/dev/null || (apt-get update; apt-get install -y wget);
-
-   - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
-   - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
-   - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;
-   - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;
-
-   - apt-get clean
-   - apt-get update
-
-   # Install common packages
-   - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree
-
-   ########################################################
-   # Node is ready, allow SSH access
-   - echo "Allow SSH access ..."
-   - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
-   ########################################################
-
-  write_files:
-   - path: /etc/network/interfaces
-     content: |
-          auto enp8s0f0
-          iface enp8s0f0 inet dhcp
-          auto enp8s0f1
-          iface enp8s0f1 inet manual
-
-   - path: /root/.ssh/config
-     owner: root:root
-     permissions: '0600'
-     content: |
-          Host *
-            ServerAliveInterval 300
-            ServerAliveCountMax 10
-            StrictHostKeyChecking no
-            UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/mk24_lab_ovs_dvr_vlan_bm/underlay--user-data1604.yaml b/tcp_tests/templates/mk24_lab_ovs_dvr_vlan_bm/underlay--user-data1604.yaml
deleted file mode 100644
index a426f10..0000000
--- a/tcp_tests/templates/mk24_lab_ovs_dvr_vlan_bm/underlay--user-data1604.yaml
+++ /dev/null
@@ -1,77 +0,0 @@
-| # All the data below will be stored as a string object
-  #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
-  ssh_pwauth: True
-  users:
-   - name: root
-     sudo: ALL=(ALL) NOPASSWD:ALL
-     shell: /bin/bash
-     ssh_authorized_keys:
-     {% for key in config.underlay.ssh_keys %}
-      - ssh-rsa {{ key['public'] }}
-     {% endfor %}
-
-  disable_root: false
-  chpasswd:
-   list: |
-    root:r00tme
-   expire: False
-
-  bootcmd:
-   # Block access to SSH while node is preparing
-   - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
-   # Enable root access
-   - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
-   - service sshd restart
-  output:
-    all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
-  runcmd:
-   - export TERM=linux
-   - export LANG=C
-   # Configure dhclient
-   - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
-   - sudo resolvconf -u
-
-   # Prepare network connection
-   - sudo ifup enp8s0f0
-   #- sudo route add default gw {gateway} {interface_name}
-   - sudo ifup enp8s0f1
-
-   # Create swap
-   - fallocate -l 4G /swapfile
-   - chmod 600 /swapfile
-   - mkswap /swapfile
-   - swapon /swapfile
-   - echo "/swapfile   none    swap    defaults   0   0" >> /etc/fstab
-
-
-   ############## TCP Cloud cfg01 node ##################
-   #- sleep 120
-   - echo "Preparing base OS"
-   - which wget >/dev/null || (apt-get update; apt-get install -y wget)
- 
-   - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
-   - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
-   - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list
-   - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -
-
-   - apt-get clean
-   - eatmydata apt-get update && apt-get -y upgrade
-
-   # Install common packages
-   - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc
-
-   ########################################################
-   # Node is ready, allow SSH access
-   - echo "Allow SSH access ..."
-   - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
-   ########################################################
-
-  write_files:
-   - path: /etc/network/interfaces
-     content: |
-          auto enp8s0f0
-          iface enp8s0f0 inet dhcp
-          auto enp8s0f1
-          iface enp8s0f1 inet dhcp
diff --git a/tcp_tests/templates/mk24_lab_ovs_dvr_vlan_bm/underlay.yaml b/tcp_tests/templates/mk24_lab_ovs_dvr_vlan_bm/underlay.yaml
deleted file mode 100644
index 6b3b0c5..0000000
--- a/tcp_tests/templates/mk24_lab_ovs_dvr_vlan_bm/underlay.yaml
+++ /dev/null
@@ -1,366 +0,0 @@
-# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
-{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
-
-{% import 'mk24_lab_ovs_dvr_vlan_bm/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
-{% import 'mk24_lab_ovs_dvr_vlan_bm/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
-{% import 'mk24_lab_ovs_dvr_vlan_bm/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
-
----
-aliases:
- - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
- - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
- - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
- - &cloudinit_user_data {{ CLOUDINIT_USER_DATA_1604 }}
-
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'mk24_lab_ovs_dvr_vlan_bm') %}
-{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME) + '.local' %}
-{% set HOSTNAME_KVM01 = os_env('HOSTNAME_KVM01', 'kvm01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_KVM02 = os_env('HOSTNAME_KVM02', 'kvm02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_KVM03 = os_env('HOSTNAME_KVM03', 'kvm03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
-
-template:
-  devops_settings:
-    env_name: {{ os_env('ENV_NAME', 'mk24_lab_ovs_dvr_vlan_bm_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
-
-    address_pools:
-      admin-pool01:
-        net: {{ os_env('ADMIN_ADDRESS_POOL01', '172.18.173.96/27:27') }}
-        params:
-          ip_reserved:
-            gateway: +1
-            l2_network_device: +1
-            default_{{ HOSTNAME_KVM01 }}: 172.18.173.112
-            default_{{ HOSTNAME_KVM02 }}: 172.18.173.113
-            default_{{ HOSTNAME_KVM03 }}: 172.18.173.114
-            default_{{ HOSTNAME_CMP01 }}: 172.18.173.115
-            default_{{ HOSTNAME_CMP02 }}: 172.18.173.116
-            default_{{ HOSTNAME_GTW01 }}: 172.18.173.117
-            default_{{ HOSTNAME_CFG01 }}: 172.18.173.118
-
-    groups:
-      - name: default
-        driver:
-          name: devops_driver_ironic
-          params:
-            os_auth_token: fake-token
-            ironic_url: !os_env IRONIC_URL  # URL that will be used by fuel-devops
-                                            # to access Ironic API
-            # Agent URL that is accessible from deploying node when nodes
-            # are bootstrapped with PXE. Usually PXE/provision network address is used.
-            agent_kernel_url: !os_env IRONIC_AGENT_KERNEL_URL
-            agent_ramdisk_url: !os_env IRONIC_AGENT_RAMDISK_URL
-
-        network_pools:
-          admin: admin-pool01
-
-        l2_network_devices:
-          admin:
-            address_pool: admin-pool01
-
-
-        nodes:
-
-          - name: {{ HOSTNAME_CFG01 }}
-            role: salt_master
-            params:
-              ipmi_user: !os_env IPMI_USER
-              ipmi_password: !os_env IPMI_PASSWORD
-              ipmi_previlegies: OPERATOR
-              ipmi_host: !os_env IPMI_HOST_CFG01  # hostname or IP address
-              ipmi_lan_interface: lanplus
-              ipmi_port: 623
-
-              root_volume_name: system     # see 'volumes' below
-              cloud_init_volume_name: iso  # see 'volumes' below
-              cloud_init_iface_up: enp8s0f1  # see 'interfaces' below.
-                                             # this interface is passed to 'user-data'
-                                             # to substitute {interface_name} variable if it is used there
-
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 200
-
-                  # The same as for agent URL, here is an URL to the image that should be
-                  # used for deploy the node. It should also be accessible from deploying
-                  # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
-                  source_image: !os_env IRONIC_SOURCE_IMAGE_URL
-                  source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
-
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_cfg01
-
-              interfaces:
-                - label: enp8s0f1
-                  l2_network_device: admin
-                  mac_address: !os_env PXE_MAC_ADDRESS_CFG01
-
-              network_config:
-                enp8s0f1:
-                  networks:
-                   - admin
-
-          - name: {{ HOSTNAME_KVM01 }}
-            role: salt_minion
-            params:
-              ipmi_user: !os_env IPMI_USER
-              ipmi_password: !os_env IPMI_PASSWORD
-              ipmi_previlegies: OPERATOR
-              ipmi_host: !os_env IPMI_HOST_KVM01  # hostname or IP address
-              ipmi_lan_interface: lanplus
-              ipmi_port: 623
-
-              root_volume_name: system     # see 'volumes' below
-              cloud_init_volume_name: iso  # see 'volumes' below
-              cloud_init_iface_up: enp8s0f1  # see 'interfaces' below.
-                                             # this interface is passed to 'user-data'
-                                             # to substitute {interface_name} variable if it is used there
-
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 200
-
-                  # The same as for agent URL, here is an URL to the image that should be
-                  # used for deploy the node. It should also be accessible from deploying
-                  # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
-                  source_image: !os_env IRONIC_SOURCE_IMAGE_URL
-                  source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
-
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data
-
-              interfaces:
-                - label: enp8s0f1
-                  l2_network_device: admin
-                  mac_address: !os_env PXE_MAC_ADDRESS_KVM01
-
-              network_config:
-                enp8s0f1:
-                  networks:
-                   - admin
-
-
-          - name: {{ HOSTNAME_KVM02 }}
-            role: salt_minion
-            params:
-              ipmi_user: !os_env IPMI_USER
-              ipmi_password: !os_env IPMI_PASSWORD
-              ipmi_previlegies: OPERATOR
-              ipmi_host: !os_env IPMI_HOST_KVM02  # hostname or IP address
-              ipmi_lan_interface: lanplus
-              ipmi_port: 623
-
-              root_volume_name: system     # see 'volumes' below
-              cloud_init_volume_name: iso  # see 'volumes' below
-              cloud_init_iface_up: enp8s0f1  # see 'interfaces' below.
-                                             # this interface is passed to 'user-data'
-                                             # to substitute {interface_name} variable if it is used there
-
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 200
-
-                  # The same as for agent URL, here is an URL to the image that should be
-                  # used for deploy the node. It should also be accessible from deploying
-                  # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
-                  source_image: !os_env IRONIC_SOURCE_IMAGE_URL
-                  source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
-
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data
-
-              interfaces:
-                - label: enp8s0f1
-                  l2_network_device: admin
-                  mac_address: !os_env PXE_MAC_ADDRESS_KVM02
-
-              network_config:
-                enp8s0f1:
-                  networks:
-                   - admin
-
-
-          - name: {{ HOSTNAME_KVM03 }}
-            role: salt_minion
-            params:
-              ipmi_user: !os_env IPMI_USER
-              ipmi_password: !os_env IPMI_PASSWORD
-              ipmi_previlegies: OPERATOR
-              ipmi_host: !os_env IPMI_HOST_KVM03  # hostname or IP address
-              ipmi_lan_interface: lanplus
-              ipmi_port: 623
-
-              root_volume_name: system     # see 'volumes' below
-              cloud_init_volume_name: iso  # see 'volumes' below
-              cloud_init_iface_up: enp8s0f1  # see 'interfaces' below.
-                                             # this interface is passed to 'user-data'
-                                             # to substitute {interface_name} variable if it is used there
-
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 200
-
-                  # The same as for agent URL, here is an URL to the image that should be
-                  # used for deploy the node. It should also be accessible from deploying
-                  # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
-                  source_image: !os_env IRONIC_SOURCE_IMAGE_URL
-                  source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
-
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data
-
-              interfaces:
-                - label: enp8s0f1
-                  l2_network_device: admin
-                  mac_address: !os_env PXE_MAC_ADDRESS_KVM03
-
-              network_config:
-                enp8s0f1:
-                  networks:
-                   - admin
-
-
-          - name: {{ HOSTNAME_CMP01 }}
-            role: salt_minion
-            params:
-              ipmi_user: !os_env IPMI_USER
-              ipmi_password: !os_env IPMI_PASSWORD
-              ipmi_previlegies: OPERATOR
-              ipmi_host: !os_env IPMI_HOST_CMP01  # hostname or IP address
-              ipmi_lan_interface: lanplus
-              ipmi_port: 623
-
-              root_volume_name: system     # see 'volumes' below
-              cloud_init_volume_name: iso  # see 'volumes' below
-              cloud_init_iface_up: enp8s0f1  # see 'interfaces' below.
-                                             # this interface is passed to 'user-data'
-                                             # to substitute {interface_name} variable if it is used there
-
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 200
-
-                  # The same as for agent URL, here is an URL to the image that should be
-                  # used for deploy the node. It should also be accessible from deploying
-                  # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
-                  source_image: !os_env IRONIC_SOURCE_IMAGE_URL
-                  source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
-
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data
-
-              interfaces:
-                - label: enp8s0f1
-                  l2_network_device: admin
-                  mac_address: !os_env PXE_MAC_ADDRESS_CMP01
-
-              network_config:
-                enp8s0f1:
-                  networks:
-                   - admin
-
-
-          - name: {{ HOSTNAME_CMP02 }}
-            role: salt_minion
-            params:
-              ipmi_user: !os_env IPMI_USER
-              ipmi_password: !os_env IPMI_PASSWORD
-              ipmi_previlegies: OPERATOR
-              ipmi_host: !os_env IPMI_HOST_CMP02  # hostname or IP address
-              ipmi_lan_interface: lanplus
-              ipmi_port: 623
-
-              root_volume_name: system     # see 'volumes' below
-              cloud_init_volume_name: iso  # see 'volumes' below
-              cloud_init_iface_up: enp8s0f1  # see 'interfaces' below.
-                                             # this interface is passed to 'user-data'
-                                             # to substitute {interface_name} variable if it is used there
-
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 200
-
-                  # The same as for agent URL, here is an URL to the image that should be
-                  # used for deploy the node. It should also be accessible from deploying
-                  # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
-                  source_image: !os_env IRONIC_SOURCE_IMAGE_URL
-                  source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
-
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data
-
-              interfaces:
-                - label: enp8s0f1
-                  l2_network_device: admin
-                  mac_address: !os_env PXE_MAC_ADDRESS_CMP02
-
-              network_config:
-                enp8s0f1:
-                  networks:
-                   - admin
-
-
-          - name: {{ HOSTNAME_GTW01 }}
-            role: salt_minion
-            params:
-              ipmi_user: !os_env IPMI_USER
-              ipmi_password: !os_env IPMI_PASSWORD
-              ipmi_previlegies: OPERATOR
-              ipmi_host: !os_env IPMI_HOST_GTW01  # hostname or IP address
-              ipmi_lan_interface: lanplus
-              ipmi_port: 623
-
-              root_volume_name: system     # see 'volumes' below
-              cloud_init_volume_name: iso  # see 'volumes' below
-              cloud_init_iface_up: enp8s0f1  # see 'interfaces' below.
-                                             # this interface is passed to 'user-data'
-                                             # to substitute {interface_name} variable if it is used there
-
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 200
-
-                  # The same as for agent URL, here is an URL to the image that should be
-                  # used for deploy the node. It should also be accessible from deploying
-                  # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
-                  source_image: !os_env IRONIC_SOURCE_IMAGE_URL
-                  source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
-
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data
-
-              interfaces:
-                - label: enp8s0f1
-                  l2_network_device: admin
-                  mac_address: !os_env PXE_MAC_ADDRESS_GTW01
-
-              network_config:
-                enp8s0f1:
-                  networks:
-                   - admin
-
-
diff --git a/tcp_tests/templates/physical-mcp-ocata-offline-ovs/openstack.yaml b/tcp_tests/templates/physical-mcp-ocata-offline-ovs/openstack.yaml
index 745df96..4f2df46 100644
--- a/tcp_tests/templates/physical-mcp-ocata-offline-ovs/openstack.yaml
+++ b/tcp_tests/templates/physical-mcp-ocata-offline-ovs/openstack.yaml
@@ -1,4 +1,4 @@
-{% from 'physical_mcp11_ovs_dpdk/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'physical-mcp-ocata-offline-ovs/underlay.yaml' import HOSTNAME_CFG01 with context %}
 
 # Install OpenStack control services
 
diff --git a/tcp_tests/templates/physical_mcp11_dvr/Readme.txt b/tcp_tests/templates/physical_mcp11_dvr/Readme.txt
deleted file mode 100644
index a3297a8..0000000
--- a/tcp_tests/templates/physical_mcp11_dvr/Readme.txt
+++ /dev/null
@@ -1 +0,0 @@
-PoC templates. Do not use!
\ No newline at end of file
diff --git a/tcp_tests/templates/physical_mcp11_dvr/common-services.yaml b/tcp_tests/templates/physical_mcp11_dvr/common-services.yaml
deleted file mode 100644
index cb176c1..0000000
--- a/tcp_tests/templates/physical_mcp11_dvr/common-services.yaml
+++ /dev/null
@@ -1,118 +0,0 @@
-{% from 'physical_mcp11_dvr/underlay.yaml' import HOSTNAME_CFG01 with context %}
-
-# Install support services
-- description: Install keepalived on ctl01
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keepalived:cluster and *01*' state.sls keepalived
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: true
-
-- description: Install keepalived
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keepalived:cluster' state.sls keepalived
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: true
-
-- description: Check the VIP
-  cmd: |
-    OPENSTACK_CONTROL_ADDRESS=`salt-call --out=newline_values_only pillar.get _param:openstack_control_address`;
-    echo "_param:openstack_control_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
-    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-
-- description: Install glusterfs
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@glusterfs:server' state.sls glusterfs.server.service
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Setup glusterfs on primary controller
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 5}
-  skip_fail: false
-
-- description: Check the gluster status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install RabbitMQ on ctl01
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install RabbitMQ
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@rabbitmq:server' state.sls rabbitmq
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check the rabbitmq status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install Galera on first server
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:master' state.sls galera
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install Galera on other servers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:slave' state.sls galera -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check mysql status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: true
-
-
-- description: Install haproxy
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' state.sls haproxy
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check haproxy status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' service.status haproxy
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Restart rsyslog
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' service.restart rsyslog
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install memcached on all controllers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@memcached:server' state.sls memcached
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
diff --git a/tcp_tests/templates/physical_mcp11_dvr/openstack.yaml b/tcp_tests/templates/physical_mcp11_dvr/openstack.yaml
deleted file mode 100644
index e01ee0f..0000000
--- a/tcp_tests/templates/physical_mcp11_dvr/openstack.yaml
+++ /dev/null
@@ -1,169 +0,0 @@
-{% from 'physical_mcp11_dvr/underlay.yaml' import HOSTNAME_CFG01 with context %}
-
-# Install OpenStack control services
-
-- description: Install glance on all controllers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-     -C 'I@glance:server' state.sls glance -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install keystone service (note that different fernet keys are created on different nodes)
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keystone:server' state.sls keystone.server -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 15}
-  skip_fail: false
-
-- description: Restart apache due to PROD-10477
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' cmd.run "systemctl restart apache2"
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 15}
-  skip_fail: false
-
-- description: Check apache status to PROD-10477
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' cmd.run "systemctl status apache2"
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 15}
-  skip_fail: false
-
-- description: Mount glusterfs.client volumes (resuires created 'keystone' and 'glusterfs' system users)
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@glance:server' state.sls glusterfs.client
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Update fernet keys for keystone server on the mounted glusterfs volume
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keystone:server' state.sls keystone.server -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Populate keystone services/tenants/admins
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keystone:client' state.sls keystone.client
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 5}
-  skip_fail: false
-
-- description: Check keystone service-list
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keystone:server' cmd.run '. /root/keystonercv3; openstack service list'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check glance image-list
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keystone:server' cmd.run '. /root/keystonerc; glance image-list'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-
-- description: Install nova on all controllers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@nova:controller' state.sls nova -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 5}
-  skip_fail: false
-
-- description: Check nova service-list
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keystone:server' cmd.run '. /root/keystonerc; nova service-list'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-
-- description: Install cinder
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@cinder:controller' state.sls cinder -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check cinder list
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keystone:server' cmd.run '. /root/keystonerc; cinder list'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-
-- description: Install neutron service
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@neutron:server' state.sls neutron -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install neutron on gtw node
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@neutron:gateway' state.sls neutron
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-
-- description: Check neutron agent-list
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keystone:server' cmd.run '. /root/keystonerc; neutron agent-list'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-
-- description: Install heat service
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@heat:server' state.sls heat -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check heat service
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keystone:server' cmd.run '. /root/keystonerc; heat resource-type-list'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-
-- description: Deploy horizon dashboard
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@horizon:server' state.sls horizon
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: true
-
-- description: Deploy nginx proxy
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@nginx:server' state.sls nginx
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: true
-
-
-# Install compute node
-
-- description: Apply formulas for compute node
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: true
-
-- description: Re-apply(as in doc) formulas for compute node
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check IP on computes
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
-    'ip a'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 10, delay: 30}
-  skip_fail: false
diff --git a/tcp_tests/templates/physical_mcp11_dvr/salt.yaml b/tcp_tests/templates/physical_mcp11_dvr/salt.yaml
deleted file mode 100644
index 65e7458..0000000
--- a/tcp_tests/templates/physical_mcp11_dvr/salt.yaml
+++ /dev/null
@@ -1,79 +0,0 @@
-{% from 'physical_mcp11_dvr/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'physical_mcp11_dvr/underlay.yaml' import LAB_CONFIG_NAME with context %}
-{% from 'physical_mcp11_dvr/underlay.yaml' import DOMAIN_NAME with context %}
-
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-baremetal-lab') %}
-# Other salt model repository parameters see in shared-salt.yaml
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-{{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
-
-{{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
-
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch"') }}
-
-{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
-
-{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
-
-{{ SHARED.MACRO_GENERATE_INVENTORY() }}
-
-
-- description: Configure linux for present baremetal nodes
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C '* and not
-    cfg01*' state.sls linux
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 3, delay: 5}
-  skip_fail: false
-
-- description: '*Workaround* of the bug https://mirantis.jira.com/browse/PROD-9576 to get bond0-connectivity *without* rebooting nodes'
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run
-    "cat /etc/network/interfaces | grep bond-slaves | awk '{print \$2}' | xargs -I {} ifenslave bond0 {}"
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: ovs-dvr-vlan model specific Execute 'libvirt' states to create necessary libvirt networks
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'kvm*' state.sls libvirt
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-- description: Create VMs for control plane
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'kvm*' state.sls salt.control
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 3, delay: 10}
-  skip_fail: false
-
-
-
-- description: Refresh pillars on all minions
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.refresh_pillar
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Sync all salt resources
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.sync_all && sleep 5
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Show  reclass-salt --top for generated nodes
-  cmd: reclass-salt --top -u /srv/salt/reclass/nodes/_generated/
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Execute salt.minion.cert
-  cmd: salt-call --no-color state.sls salt.minion.cert -l info;
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
diff --git a/tcp_tests/templates/physical_mcp11_dvr/underlay--meta-data.yaml b/tcp_tests/templates/physical_mcp11_dvr/underlay--meta-data.yaml
deleted file mode 100644
index 3699401..0000000
--- a/tcp_tests/templates/physical_mcp11_dvr/underlay--meta-data.yaml
+++ /dev/null
@@ -1,4 +0,0 @@
-| # All the data below will be stored as a string object
-  instance-id: iid-local1
-  hostname: {hostname}
-  local-hostname: {hostname}
diff --git a/tcp_tests/templates/physical_mcp11_dvr/underlay--user-data-cfg01.yaml b/tcp_tests/templates/physical_mcp11_dvr/underlay--user-data-cfg01.yaml
deleted file mode 100644
index f64ad5e..0000000
--- a/tcp_tests/templates/physical_mcp11_dvr/underlay--user-data-cfg01.yaml
+++ /dev/null
@@ -1,87 +0,0 @@
-| # All the data below will be stored as a string object
-  #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
-  ssh_pwauth: True
-  users:
-   - name: root
-     sudo: ALL=(ALL) NOPASSWD:ALL
-     shell: /bin/bash
-     ssh_authorized_keys:
-     {% for key in config.underlay.ssh_keys %}
-      - ssh-rsa {{ key['public'] }}
-     {% endfor %}
-
-  disable_root: false
-  chpasswd:
-   list: |
-    root:r00tme
-   expire: False
-
-  bootcmd:
-   # Block access to SSH while node is preparing
-   - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
-   # Enable root access
-   - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
-   - service sshd restart
-  output:
-    all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
-  runcmd:
-   # Configure dhclient
-   - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
-   - sudo resolvconf -u
-
-   # Prepare network connection
-   - sudo ifup eth0
-
-   # Create swap
-   - fallocate -l 4G /swapfile
-   - chmod 600 /swapfile
-   - mkswap /swapfile
-   - swapon /swapfile
-   - echo "/swapfile   none    swap    defaults    0   0" >> /etc/fstab
-
-   ############## TCP Cloud cfg01 node ##################
-   #- sleep 120
-   - echo "Preparing base OS"
-
-   - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
-   - which wget >/dev/null || (apt-get update; apt-get install -y wget);
-
-   # Configure Ubuntu mirrors
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
-
-   - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
-   - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
-   - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/{{ SALT_VERSION }} xenial main" > /etc/apt/sources.list.d/saltstack.list;
-   - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/{{ SALT_VERSION }}/SALTSTACK-GPG-KEY.pub | apt-key add -;
-
-   - apt-get clean
-   - apt-get update
-
-   # Install common packages
-   - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree
-
-   ########################################################
-   # Node is ready, allow SSH access
-   - echo "Allow SSH access ..."
-   - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
-   ########################################################
-
-  write_files:
-   - path: /etc/network/interfaces
-     content: |
-          auto eth0
-          iface eth0 inet dhcp
-
-   - path: /root/.ssh/config
-     owner: root:root
-     permissions: '0600'
-     content: |
-          Host *
-            ServerAliveInterval 300
-            ServerAliveCountMax 10
-            StrictHostKeyChecking no
-            UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/physical_mcp11_dvr/underlay--user-data1604.yaml b/tcp_tests/templates/physical_mcp11_dvr/underlay--user-data1604.yaml
deleted file mode 100644
index 43d9175..0000000
--- a/tcp_tests/templates/physical_mcp11_dvr/underlay--user-data1604.yaml
+++ /dev/null
@@ -1,78 +0,0 @@
-| # All the data below will be stored as a string object
-  #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
-  ssh_pwauth: True
-  users:
-   - name: root
-     sudo: ALL=(ALL) NOPASSWD:ALL
-     shell: /bin/bash
-     ssh_authorized_keys:
-     {% for key in config.underlay.ssh_keys %}
-      - ssh-rsa {{ key['public'] }}
-     {% endfor %}
-
-  disable_root: false
-  chpasswd:
-   list: |
-    root:r00tme
-   expire: False
-
-  bootcmd:
-   # Block access to SSH while node is preparing
-   - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
-   # Enable root access
-   - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
-   - service sshd restart
-  output:
-    all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
-  runcmd:
-   - export TERM=linux
-   - export LANG=C
-   # Configure dhclient
-   - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
-   - sudo resolvconf -u
-
-   # Prepare network connection
-   - sudo ifup eth0
-
-   # Create swap
-   - fallocate -l 4G /swapfile
-   - chmod 600 /swapfile
-   - mkswap /swapfile
-   - swapon /swapfile
-   - echo "/swapfile   none    swap    defaults   0   0" >> /etc/fstab
-
-
-   ############## TCP Cloud cfg01 node ##################
-   #- sleep 120
-   - echo "Preparing base OS"
-   - which wget >/dev/null || (apt-get update; apt-get install -y wget)
- 
-   # Configure Ubuntu mirrors
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
-
-   - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
-   - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
-   - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/{{ SALT_VERSION }} xenial main" > /etc/apt/sources.list.d/saltstack.list
-   - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/{{ SALT_VERSION }}/SALTSTACK-GPG-KEY.pub | apt-key add -
-
-   - apt-get clean
-   - eatmydata apt-get update && apt-get -y upgrade
-
-   # Install common packages
-   - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc
-
-   ########################################################
-   # Node is ready, allow SSH access
-   - echo "Allow SSH access ..."
-   - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
-   ########################################################
-
-  write_files:
-   - path: /etc/network/interfaces
-     content: |
-          auto eth0
-          iface eth0 inet dhcp
diff --git a/tcp_tests/templates/physical_mcp11_dvr/underlay.yaml b/tcp_tests/templates/physical_mcp11_dvr/underlay.yaml
deleted file mode 100644
index dbd5f35..0000000
--- a/tcp_tests/templates/physical_mcp11_dvr/underlay.yaml
+++ /dev/null
@@ -1,458 +0,0 @@
-# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
-{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
-
-{% import 'physical_mcp11_dvr/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
-{% import 'physical_mcp11_dvr/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
-{% import 'physical_mcp11_dvr/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
-
----
-aliases:
- - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
- - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
- - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
- - &cloudinit_user_data {{ CLOUDINIT_USER_DATA_1604 }}
-
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'physical_mcp11_dvr') %}
-{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME) + '.local' %}
-{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_KVM01 = os_env('HOSTNAME_KVM01', 'kvm01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_KVM02 = os_env('HOSTNAME_KVM02', 'kvm02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_KVM03 = os_env('HOSTNAME_KVM03', 'kvm03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP001 = os_env('HOSTNAME_CMP001', 'cmp001.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP002 = os_env('HOSTNAME_CMP002', 'cmp002.' + DOMAIN_NAME) %}
-{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_GTW02 = os_env('HOSTNAME_GTW01', 'gtw02.' + DOMAIN_NAME) %}
-
-template:
-  devops_settings:
-    env_name: {{ os_env('ENV_NAME', 'physical_mcp11_dvr_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
-
-    address_pools:
-      admin-pool01:
-        net: {{ os_env('ADMIN_ADDRESS_POOL01', '172.16.164.0/26:26') }}
-        params:
-          ip_reserved:
-            gateway: +1
-            l2_network_device: +1
-            default_{{ HOSTNAME_CFG01 }}: 172.16.164.10
-            default_{{ HOSTNAME_KVM01 }}: 172.16.164.6
-            default_{{ HOSTNAME_KVM02 }}: 172.16.164.7
-            default_{{ HOSTNAME_KVM03 }}: 172.16.164.8
-            default_{{ HOSTNAME_CMP001 }}: 172.16.164.2
-            default_{{ HOSTNAME_CMP002 }}: 172.16.164.3
-            default_{{ HOSTNAME_GTW01 }}: 172.16.164.61
-            default_{{ HOSTNAME_GTW02 }}: 172.16.164.5
-
-    groups:
-      - name: default
-        driver:
-          name: devops_driver_ironic
-          params:
-            os_auth_token: fake-token
-            ironic_url: !os_env IRONIC_URL  # URL that will be used by fuel-devops
-                                            # to access Ironic API
-            # Agent URL that is accessible from deploying node when nodes
-            # are bootstrapped with PXE. Usually PXE/provision network address is used.
-            agent_kernel_url: !os_env IRONIC_AGENT_KERNEL_URL
-            agent_ramdisk_url: !os_env IRONIC_AGENT_RAMDISK_URL
-
-        network_pools:
-          admin: admin-pool01
-
-        l2_network_devices:
-          admin:
-            address_pool: admin-pool01
-
-
-        nodes:
-
-          - name: {{ HOSTNAME_CFG01 }}
-            role: salt_master
-            params:
-              ipmi_user: !os_env IPMI_USER
-              ipmi_password: !os_env IPMI_PASSWORD
-              ipmi_previlegies: OPERATOR
-              ipmi_host: !os_env IPMI_HOST_CFG01  # hostname or IP address
-              ipmi_lan_interface: lanplus
-              ipmi_port: 623
-
-              root_volume_name: system     # see 'volumes' below
-              cloud_init_volume_name: iso  # see 'volumes' below
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 200
-
-                  # The same as for agent URL, here is an URL to the image that should be
-                  # used for deploy the node. It should also be accessible from deploying
-                  # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
-                  source_image: !os_env IRONIC_SOURCE_IMAGE_URL
-                  source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
-
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_cfg01
-
-              interfaces:
-                - label: eth0
-                  mac_address: !os_env ETH0_MAC_ADDRESS_CFG01
-                - label: eth1
-                  l2_network_device: admin
-                  mac_address: !os_env ETH1_MAC_ADDRESS_CFG01
-
-              network_config:
-                eth0:
-                  networks:
-                   - infra
-                eth1:
-                  networks:
-                   - admin
-
-          - name: {{ HOSTNAME_KVM01 }}
-            role: salt_minion
-            params:
-              ipmi_user: !os_env IPMI_USER
-              ipmi_password: !os_env IPMI_PASSWORD
-              ipmi_previlegies: OPERATOR
-              ipmi_host: !os_env IPMI_HOST_KVM01  # hostname or IP address
-              ipmi_lan_interface: lanplus
-              ipmi_port: 623
-
-              root_volume_name: system     # see 'volumes' below
-              cloud_init_volume_name: iso  # see 'volumes' below
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 200
-
-                  # The same as for agent URL, here is an URL to the image that should be
-                  # used for deploy the node. It should also be accessible from deploying
-                  # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
-                  source_image: !os_env IRONIC_SOURCE_IMAGE_URL
-                  source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
-
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data
-
-              interfaces:
-                - label: eth0
-                  l2_network_device: admin
-                  mac_address: !os_env ETH0_MAC_ADDRESS_KVM01
-                - label: eth1
-                  mac_address: !os_env ETH1_MAC_ADDRESS_KVM01
-                # there is no eth2 interface on the node
-                #- label: eth2
-                #  mac_address: !os_env ETH2_MAC_ADDRESS_KVM01
-
-              network_config:
-                eth0:
-                  networks:
-                   - admin
-                bond0:
-                  networks:
-                   - control
-                  aggregation: active-backup
-                  parents:
-                   - eth1
-                   #- eth2
-
-          - name: {{ HOSTNAME_KVM02 }}
-            role: salt_minion
-            params:
-              ipmi_user: !os_env IPMI_USER
-              ipmi_password: !os_env IPMI_PASSWORD
-              ipmi_previlegies: OPERATOR
-              ipmi_host: !os_env IPMI_HOST_KVM02  # hostname or IP address
-              ipmi_lan_interface: lanplus
-              ipmi_port: 623
-
-              root_volume_name: system     # see 'volumes' below
-              cloud_init_volume_name: iso  # see 'volumes' below
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 200
-
-                  # The same as for agent URL, here is an URL to the image that should be
-                  # used for deploy the node. It should also be accessible from deploying
-                  # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
-                  source_image: !os_env IRONIC_SOURCE_IMAGE_URL
-                  source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
-
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data
-
-              interfaces:
-                - label: eth0
-                  l2_network_device: admin
-                  mac_address: !os_env ETH0_MAC_ADDRESS_KVM02
-                - label: eth1
-                  mac_address: !os_env ETH1_MAC_ADDRESS_KVM02
-                - label: eth2
-                  mac_address: !os_env ETH2_MAC_ADDRESS_KVM02
-
-              network_config:
-                eth0:
-                  networks:
-                   - admin
-                bond0:
-                  networks:
-                   - control
-                  aggregation: active-backup
-                  parents:
-                   - eth1
-                   - eth2
-
-          - name: {{ HOSTNAME_KVM03 }}
-            role: salt_minion
-            params:
-              ipmi_user: !os_env IPMI_USER
-              ipmi_password: !os_env IPMI_PASSWORD
-              ipmi_previlegies: OPERATOR
-              ipmi_host: !os_env IPMI_HOST_KVM03  # hostname or IP address
-              ipmi_lan_interface: lanplus
-              ipmi_port: 623
-
-              root_volume_name: system     # see 'volumes' below
-              cloud_init_volume_name: iso  # see 'volumes' below
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 200
-
-                  # The same as for agent URL, here is an URL to the image that should be
-                  # used for deploy the node. It should also be accessible from deploying
-                  # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
-                  source_image: !os_env IRONIC_SOURCE_IMAGE_URL
-                  source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
-
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data
-
-              interfaces:
-                - label: eth0
-                  l2_network_device: admin
-                  mac_address: !os_env ETH0_MAC_ADDRESS_KVM03
-                - label: eth1
-                  mac_address: !os_env ETH1_MAC_ADDRESS_KVM03
-                - label: eth2
-                  mac_address: !os_env ETH2_MAC_ADDRESS_KVM03
-
-              network_config:
-                eth0:
-                  networks:
-                   - admin
-                bond0:
-                  networks:
-                   - control
-                  aggregation: active-backup
-                  parents:
-                   - eth1
-                   - eth2
-
-
-          - name: {{ HOSTNAME_CMP001 }}
-            role: salt_minion
-            params:
-              ipmi_user: !os_env IPMI_USER
-              ipmi_password: !os_env IPMI_PASSWORD
-              ipmi_previlegies: OPERATOR
-              ipmi_host: !os_env IPMI_HOST_CMP001  # hostname or IP address
-              ipmi_lan_interface: lanplus
-              ipmi_port: 623
-
-              root_volume_name: system     # see 'volumes' below
-              cloud_init_volume_name: iso  # see 'volumes' below
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 200
-
-                  # The same as for agent URL, here is an URL to the image that should be
-                  # used for deploy the node. It should also be accessible from deploying
-                  # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
-                  source_image: !os_env IRONIC_SOURCE_IMAGE_URL
-                  source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
-
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data
-
-              interfaces:
-                - label: eth0
-                  l2_network_device: admin
-                  mac_address: !os_env ETH0_MAC_ADDRESS_CMP001
-                - label: eth1
-                  mac_address: !os_env ETH1_MAC_ADDRESS_CMP001
-                - label: eth2
-                  mac_address: !os_env ETH2_MAC_ADDRESS_CMP001
-
-              network_config:
-                eth0:
-                  networks:
-                   - admin
-                bond0:
-                  networks:
-                   - control
-                  aggregation: active-backup
-                  parents:
-                   - eth1
-                   - eth2
-
-
-          - name: {{ HOSTNAME_CMP002 }}
-            role: salt_minion
-            params:
-              ipmi_user: !os_env IPMI_USER
-              ipmi_password: !os_env IPMI_PASSWORD
-              ipmi_previlegies: OPERATOR
-              ipmi_host: !os_env IPMI_HOST_CMP002  # hostname or IP address
-              ipmi_lan_interface: lanplus
-              ipmi_port: 623
-
-              root_volume_name: system     # see 'volumes' below
-              cloud_init_volume_name: iso  # see 'volumes' below
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 200
-
-                  # The same as for agent URL, here is an URL to the image that should be
-                  # used for deploy the node. It should also be accessible from deploying
-                  # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
-                  source_image: !os_env IRONIC_SOURCE_IMAGE_URL
-                  source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
-
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data
-
-              interfaces:
-                - label: eth0
-                  l2_network_device: admin
-                  mac_address: !os_env ETH0_MAC_ADDRESS_CMP002
-                - label: eth1
-                  mac_address: !os_env ETH1_MAC_ADDRESS_CMP002
-                - label: eth2
-                  mac_address: !os_env ETH2_MAC_ADDRESS_CMP002
-
-              network_config:
-                eth0:
-                  networks:
-                   - admin
-                bond0:
-                  networks:
-                   - control
-                  aggregation: active-backup
-                  parents:
-                   - eth1
-                   - eth2
-
-
-          - name: {{ HOSTNAME_GTW01 }}
-            role: salt_minion
-            params:
-              ipmi_user: !os_env IPMI_USER
-              ipmi_password: !os_env IPMI_PASSWORD
-              ipmi_previlegies: OPERATOR
-              ipmi_host: !os_env IPMI_HOST_GTW01  # hostname or IP address
-              ipmi_lan_interface: lanplus
-              ipmi_port: 623
-
-              root_volume_name: system     # see 'volumes' below
-              cloud_init_volume_name: iso  # see 'volumes' below
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 200
-
-                  # The same as for agent URL, here is an URL to the image that should be
-                  # used for deploy the node. It should also be accessible from deploying
-                  # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
-                  source_image: !os_env IRONIC_SOURCE_IMAGE_URL
-                  source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
-
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data
-
-              interfaces:
-                - label: eth0
-                  l2_network_device: admin
-                  mac_address: !os_env ETH0_MAC_ADDRESS_GTW01
-                - label: eth1
-                  mac_address: !os_env ETH1_MAC_ADDRESS_GTW01
-                - label: eth2
-                  mac_address: !os_env ETH2_MAC_ADDRESS_GTW01
-
-              network_config:
-                eth0:
-                  networks:
-                   - admin
-                bond0:
-                  networks:
-                   - control
-                  aggregation: active-backup
-                  parents:
-                   - eth1
-                   - eth2
-
-          - name: {{ HOSTNAME_GTW02 }}
-            role: salt_minion
-            params:
-              ipmi_user: !os_env IPMI_USER
-              ipmi_password: !os_env IPMI_PASSWORD
-              ipmi_previlegies: OPERATOR
-              ipmi_host: !os_env IPMI_HOST_GTW02  # hostname or IP address
-              ipmi_lan_interface: lanplus
-              ipmi_port: 623
-
-              root_volume_name: system     # see 'volumes' below
-              cloud_init_volume_name: iso  # see 'volumes' below
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 200
-
-                  # The same as for agent URL, here is an URL to the image that should be
-                  # used for deploy the node. It should also be accessible from deploying
-                  # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
-                  source_image: !os_env IRONIC_SOURCE_IMAGE_URL
-                  source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
-
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data
-
-              interfaces:
-                - label: eth0
-                  l2_network_device: admin
-                  mac_address: !os_env ETH0_MAC_ADDRESS_GTW02
-                - label: eth1
-                  mac_address: !os_env ETH1_MAC_ADDRESS_GTW02
-                # there is no eth2 interface on the node
-                #- label: eth2
-                #  mac_address: !os_env ETH2_MAC_ADDRESS_GTW02
-
-              network_config:
-                eth0:
-                  networks:
-                   - admin
-                bond0:
-                  networks:
-                   - control
-                  aggregation: active-backup
-                  parents:
-                   - eth1
-                   #- eth2
diff --git a/tcp_tests/templates/physical_mcp11_ovs_dpdk/Readme.txt b/tcp_tests/templates/physical_mcp11_ovs_dpdk/Readme.txt
deleted file mode 100644
index a3297a8..0000000
--- a/tcp_tests/templates/physical_mcp11_ovs_dpdk/Readme.txt
+++ /dev/null
@@ -1 +0,0 @@
-PoC templates. Do not use!
\ No newline at end of file
diff --git a/tcp_tests/templates/physical_mcp11_ovs_dpdk/common-services.yaml b/tcp_tests/templates/physical_mcp11_ovs_dpdk/common-services.yaml
deleted file mode 100644
index 0c30920..0000000
--- a/tcp_tests/templates/physical_mcp11_ovs_dpdk/common-services.yaml
+++ /dev/null
@@ -1,131 +0,0 @@
-{% from 'physical_mcp11_ovs_dpdk/underlay.yaml' import HOSTNAME_CFG01 with context %}
-
-- description: Refresh grains
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' state.sls salt.minion.grains
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 15}
-  skip_fail: false
-
-# Install support services
-- description: Install keepalived on ctl01
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keepalived:cluster and *01*' state.sls keepalived
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: true
-
-- description: Install keepalived
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keepalived:cluster' state.sls keepalived
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: true
-
-- description: Check the VIP
-  cmd: |
-    OPENSTACK_CONTROL_ADDRESS=`salt-call --out=newline_values_only pillar.get _param:openstack_control_address`;
-    echo "_param:openstack_control_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
-    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-
-- description: Install glusterfs
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@glusterfs:server' state.sls glusterfs.server.service
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Setup glusterfs on primary controller
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 5}
-  skip_fail: false
-
-- description: Check the gluster status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install RabbitMQ on ctl01
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install RabbitMQ
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@rabbitmq:server' state.sls rabbitmq
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check the rabbitmq status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install Galera on first server
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:master' state.sls galera
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install Galera on other servers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:slave' state.sls galera -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check mysql status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: true
-
-
-- description: Install haproxy
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' state.sls haproxy
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check haproxy status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' service.status haproxy
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Deploy nginx proxy
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@nginx:server' state.sls nginx
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Restart rsyslog
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' service.restart rsyslog
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install memcached on all controllers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@memcached:server' state.sls memcached
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
diff --git a/tcp_tests/templates/physical_mcp11_ovs_dpdk/openstack.yaml b/tcp_tests/templates/physical_mcp11_ovs_dpdk/openstack.yaml
deleted file mode 100644
index 83674b2..0000000
--- a/tcp_tests/templates/physical_mcp11_ovs_dpdk/openstack.yaml
+++ /dev/null
@@ -1,161 +0,0 @@
-{% from 'physical_mcp11_ovs_dpdk/underlay.yaml' import HOSTNAME_CFG01 with context %}
-
-# Install OpenStack control services
-
-- description: Install glance on all controllers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-     -C 'I@glance:server' state.sls glance -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install keystone service (note that different fernet keys are created on different nodes)
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keystone:server' state.sls keystone.server -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 15}
-  skip_fail: false
-
-- description: Restart apache due to PROD-10477
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' cmd.run "systemctl restart apache2"
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 15}
-  skip_fail: false
-
-- description: Check apache status to PROD-10477
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' cmd.run "systemctl status apache2"
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 15}
-  skip_fail: false
-
-- description: Mount glusterfs.client volumes (resuires created 'keystone' and 'glusterfs' system users)
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@glance:server' state.sls glusterfs.client
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Update fernet keys for keystone server on the mounted glusterfs volume
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keystone:server' state.sls keystone.server -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Populate keystone services/tenants/admins
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keystone:client' state.sls keystone.client
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 5}
-  skip_fail: false
-
-- description: Check keystone service-list
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keystone:server' cmd.run '. /root/keystonercv3; openstack service list'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check glance image-list
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keystone:server' cmd.run '. /root/keystonerc; glance image-list'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-
-- description: Install nova on all controllers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@nova:controller' state.sls nova -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 5}
-  skip_fail: false
-
-- description: Check nova service-list
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keystone:server' cmd.run '. /root/keystonerc; nova service-list'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-
-- description: Install cinder
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@cinder:controller' state.sls cinder -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check cinder list
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keystone:server' cmd.run '. /root/keystonerc; cinder list'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-
-- description: Install neutron service
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@neutron:server' state.sls neutron -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install neutron on gtw node
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@neutron:gateway' state.sls neutron
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-
-- description: Check neutron agent-list
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keystone:server' cmd.run '. /root/keystonerc; neutron agent-list'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-
-- description: Install heat service
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@heat:server' state.sls heat -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check heat service
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keystone:server' cmd.run '. /root/keystonercv3; openstack orchestration resource type list'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-
-- description: Deploy horizon dashboard
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@horizon:server' state.sls horizon
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: true
-
-# Install compute node
-
-- description: Apply formulas for compute node
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: true
-
-- description: Re-apply(as in doc) formulas for compute node
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check IP on computes
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
-    'ip a'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 10, delay: 30}
-  skip_fail: false
diff --git a/tcp_tests/templates/physical_mcp11_ovs_dpdk/salt.yaml b/tcp_tests/templates/physical_mcp11_ovs_dpdk/salt.yaml
deleted file mode 100644
index 1a14b23..0000000
--- a/tcp_tests/templates/physical_mcp11_ovs_dpdk/salt.yaml
+++ /dev/null
@@ -1,136 +0,0 @@
-{% from 'physical_mcp11_ovs_dpdk/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'physical_mcp11_ovs_dpdk/underlay.yaml' import LAB_CONFIG_NAME with context %}
-{% from 'physical_mcp11_ovs_dpdk/underlay.yaml' import DOMAIN_NAME with context %}
-
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-baremetal-lab') %}
-# Other salt model repository parameters see in shared-salt.yaml
-
-# Environment model name stored in https://github.com/Mirantis/tcp-qa/tree/master/tcp_tests/environments
-{% set ENVIRONMENT_MODEL_NAME = os_env('ENVIRONMENT_MODEL_NAME','lab03_ovs_dpdk') %}
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-{{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
-
-{{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
-
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch"') }}
-
-- description: "[EXPERIMENTAL] Remove linux.network.interface object from the cluster/system models and use fixed 'environment' model instead"
-  cmd: |
-    apt-get -y install python-virtualenv python-pip build-essential python-dev libssl-dev;
-    [[ -d /root/venv-reclass-tools ]] || virtualenv /root/venv-reclass-tools;
-    . /root/venv-reclass-tools/bin/activate;
-    pip install git+https://github.com/dis-xcom/reclass-tools;
-    reclass-tools del-key parameters.linux.network.interface /srv/salt/reclass/classes/cluster/;
-    reclass-tools del-key parameters.linux.network.interface /srv/salt/reclass/classes/system/;
-    reclass-tools del-key parameters.linux.network.interface /usr/share/salt-formulas/reclass/;
-    git clone https://github.com/Mirantis/tcp-qa /tmp/tcp-qa;
-    ln -s /tmp/tcp-qa/tcp_tests/environment/ /srv/salt/reclass/classes;
-    if ! reclass-tools get-key 'classes' /srv/salt/reclass/nodes/{{ HOSTNAME_CFG01 }}.yml | grep -q "environment.{{ ENVIRONMENT_MODEL_NAME }}$"; then
-      reclass-tools add-key 'classes' 'environment.{{ ENVIRONMENT_MODEL_NAME }}' /srv/salt/reclass/nodes/{{ HOSTNAME_CFG01 }}.yml --merge ;
-    fi;
-
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
-
-{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
-
-{{ SHARED.MACRO_GENERATE_INVENTORY() }}
-
-########################################
-# Spin up Control Plane VMs on KVM nodes
-########################################
-
-{{ SHARED.MACRO_NETWORKING_WORKAROUNDS() }}
-
-- description: Refresh pillars for present baremetal nodes
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.refresh_pillar
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: '*Workaround* enable hugepages on cmp* nodes for OVS setup in linux formula'
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
-      'sudo apt-get install -y hugepages; sudo echo 2048 > /proc/sys/vm/nr_hugepages'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Sync all salt resources for present baremetal nodes
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.sync_all && sleep 5
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-
-#- description: '*Workaround* Avoid reboot when IP addresses are doubled on interfaces and bridges at the same time. For test environments only!'
-#  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'kvm*' cmd.run
-#    "salt-call state.sls linux.network.interface && ls -1 /var/run/ | grep dhclient | awk -F'.' '{print \$2}' | xargs -I {} ifconfig {} 0.0.0.0"
-#  node_name: {{ HOSTNAME_CFG01 }}
-#  retry: {count: 3, delay: 5}
-#  skip_fail: false
-
-- description: Configure linux for present baremetal nodes
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C '* and not
-    cfg01*' state.sls linux
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 3, delay: 5}
-  skip_fail: false
-
-- description: ovs-dvr-vlan model specific Execute 'libvirt' states to create necessary libvirt networks
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'kvm*' state.sls libvirt
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-- description: Create VMs for control plane
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'kvm*' state.sls salt.control
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 3, delay: 10}
-  skip_fail: false
-
-- description: '*Workaround* for waiting the control-plane VMs in the salt-key (instead of sleep)'
-  cmd: |
-    salt-key -l acc| sort > /tmp/current_keys.txt &&
-    salt 'kvm*' cmd.run 'virsh list --name' | grep -v 'kvm'|sort|xargs -I {} fgrep {} /tmp/current_keys.txt
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 20, delay: 30}
-  skip_fail: false
-
-#########################################
-# Configure all running salt minion nodes
-#########################################
-
-- description: Refresh pillars on all minions
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.refresh_pillar
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Sync all salt resources
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.sync_all && sleep 5
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Show  reclass-salt --top for generated nodes
-  cmd: reclass-salt --top -u /srv/salt/reclass/nodes/_generated/
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Execute salt.minion on config node to generate certificate
-  cmd: salt-call --hard-crash --state-output=mixed --state-verbose=False state.sls salt.minion;
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
diff --git a/tcp_tests/templates/physical_mcp11_ovs_dpdk/underlay--meta-data.yaml b/tcp_tests/templates/physical_mcp11_ovs_dpdk/underlay--meta-data.yaml
deleted file mode 100644
index 3699401..0000000
--- a/tcp_tests/templates/physical_mcp11_ovs_dpdk/underlay--meta-data.yaml
+++ /dev/null
@@ -1,4 +0,0 @@
-| # All the data below will be stored as a string object
-  instance-id: iid-local1
-  hostname: {hostname}
-  local-hostname: {hostname}
diff --git a/tcp_tests/templates/physical_mcp11_ovs_dpdk/underlay--user-data-cfg01.yaml b/tcp_tests/templates/physical_mcp11_ovs_dpdk/underlay--user-data-cfg01.yaml
deleted file mode 100644
index ee878a2..0000000
--- a/tcp_tests/templates/physical_mcp11_ovs_dpdk/underlay--user-data-cfg01.yaml
+++ /dev/null
@@ -1,92 +0,0 @@
-| # All the data below will be stored as a string object
-  #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
-  ssh_pwauth: True
-  users:
-   - name: root
-     sudo: ALL=(ALL) NOPASSWD:ALL
-     shell: /bin/bash
-     ssh_authorized_keys:
-     {% for key in config.underlay.ssh_keys %}
-      - ssh-rsa {{ key['public'] }}
-     {% endfor %}
-
-  disable_root: false
-  chpasswd:
-   list: |
-    root:r00tme
-   expire: False
-
-  bootcmd:
-   # Block access to SSH while node is preparing
-   - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
-   # Enable root access
-   - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
-   - service sshd restart
-  output:
-    all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
-  runcmd:
-   # Configure dhclient
-   - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
-   - sudo resolvconf -u
-
-   # Prepare network connection
-   - sudo ifdown eth0
-   - sudo ip r d default || true  # remove existing default route to get it from dhcp
-   - sudo ifup eth0
-
-   # Create swap
-   - fallocate -l 4G /swapfile
-   - chmod 600 /swapfile
-   - mkswap /swapfile
-   - swapon /swapfile
-   - echo "/swapfile   none    swap    defaults    0   0" >> /etc/fstab
-
-   ############## TCP Cloud cfg01 node ##################
-   #- sleep 120
-   - echo "Preparing base OS"
-
-   - echo "nameserver 172.18.208.44" > /etc/resolv.conf;
-   - which wget >/dev/null || (apt-get update; apt-get install -y wget);
-
-   # Configure Ubuntu mirrors
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
-
-   - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
-   - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
-   - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/{{ SALT_VERSION }} xenial main" > /etc/apt/sources.list.d/saltstack.list;
-   - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/{{ SALT_VERSION }}/SALTSTACK-GPG-KEY.pub | apt-key add -;
-
-   - apt-get clean
-   - apt-get update
-
-   # Install common packages
-   - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree sshuttle
-
-   # Use sshuttle to allow SSH access to the model-related control network 10.167.4.0/24 on baremetal/VM nodes from cfg01
-   - sshuttle -r {{ ETH0_IP_ADDRESS_KVM01 }} 10.167.4.0/24 -D
-
-   ########################################################
-   # Node is ready, allow SSH access
-   - echo "Allow SSH access ..."
-   - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
-   ########################################################
-
-  write_files:
-   - path: /etc/network/interfaces
-     content: |
-          auto eth0
-          iface eth0 inet dhcp
-
-   - path: /root/.ssh/config
-     owner: root:root
-     permissions: '0600'
-     content: |
-          Host *
-            ServerAliveInterval 300
-            ServerAliveCountMax 10
-            StrictHostKeyChecking no
-            UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/physical_mcp11_ovs_dpdk/underlay--user-data-hwe.yaml b/tcp_tests/templates/physical_mcp11_ovs_dpdk/underlay--user-data-hwe.yaml
deleted file mode 100644
index a8e8250..0000000
--- a/tcp_tests/templates/physical_mcp11_ovs_dpdk/underlay--user-data-hwe.yaml
+++ /dev/null
@@ -1,116 +0,0 @@
-| # All the data below will be stored as a string object
-  #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
-  ssh_pwauth: True
-  users:
-   - name: root
-     sudo: ALL=(ALL) NOPASSWD:ALL
-     shell: /bin/bash
-     ssh_authorized_keys:
-     {% for key in config.underlay.ssh_keys %}
-      - ssh-rsa {{ key['public'] }}
-     {% endfor %}
-
-  disable_root: false
-  chpasswd:
-   list: |
-    root:r00tme
-   expire: False
-
-  bootcmd:
-   # Block access to SSH while node is preparing
-   - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
-   # Enable root access
-   - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
-   - service sshd restart
-  output:
-    all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
-  runcmd:
-   - export TERM=linux
-   - export LANG=C
-   # Configure dhclient
-   - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
-   - sudo resolvconf -u
-
-   # Prepare network connection
-   #- sudo ifup eth0
-
-   # Create swap
-   - fallocate -l 4G /swapfile
-   - chmod 600 /swapfile
-   - mkswap /swapfile
-   - swapon /swapfile
-   - echo "/swapfile   none    swap    defaults   0   0" >> /etc/fstab
-
-
-   ############## TCP Cloud cfg01 node ##################
-   #- sleep 120
-   - echo "Preparing base OS"
-   - which wget >/dev/null || (apt-get update; apt-get install -y wget)
- 
-   # Configure Ubuntu mirrors
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
-
-   - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
-   - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
-   - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/{{ SALT_VERSION }} xenial main" > /etc/apt/sources.list.d/saltstack.list
-   - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/{{ SALT_VERSION }}/SALTSTACK-GPG-KEY.pub | apt-key add -
-
-   - apt-get clean
-   - eatmydata apt-get update && apt-get -y upgrade
-
-   # Install common packages
-   - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc
-
-   ########################################################
-   # Node is ready, allow SSH access
-   #- echo "Allow SSH access ..."
-   #- sudo iptables -D INPUT -p tcp --dport 22 -j DROP
-   - apt-get install {{ os_env('LINUX_KERNEL_HWE_PACKAGE_NAME', 'linux-image-extra-4.10.0-42-generic') }} -y
-   - reboot
-   ########################################################
-
-  write_files:
-   - path: /etc/network/interfaces
-     content: |
-          # The loopback network interface
-          auto lo
-          iface lo inet loopback
-
-          auto {interface_name}
-          iface {interface_name} inet dhcp
-
-   - path: /etc/udev/rules.d/70-persistent-net.rules
-     owner: root:root
-     permissions: '0644'
-     content: |
-       # kvm01
-       SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ATTR{{ '{{address}}' }}=="{{ os_env('ETH0_MAC_ADDRESS_KVM01') }}", NAME="enp2s0f0"
-       SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ATTR{{ '{{address}}' }}=="{{ os_env('ETH1_MAC_ADDRESS_KVM01') }}", NAME="enp2s0f1"
-       # kvm02
-       SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ATTR{{ '{{address}}' }}=="{{ os_env('ETH0_MAC_ADDRESS_KVM02') }}", NAME="enp2s0f0"
-       SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ATTR{{ '{{address}}' }}=="{{ os_env('ETH1_MAC_ADDRESS_KVM02') }}", NAME="enp2s0f1"
-       # kvm03
-       SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ATTR{{ '{{address}}' }}=="{{ os_env('ETH0_MAC_ADDRESS_KVM03') }}", NAME="eno1"
-       SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ATTR{{ '{{address}}' }}=="{{ os_env('ETH1_MAC_ADDRESS_KVM03') }}", NAME="eno2"
-       # cmp001
-       SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ATTR{{ '{{address}}' }}=="{{ os_env('ETH0_MAC_ADDRESS_CMP001') }}", NAME="enp3s0f0"
-       SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ATTR{{ '{{address}}' }}=="{{ os_env('ETH1_MAC_ADDRESS_CMP001') }}", NAME="enp3s0f1"
-       SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ATTR{{ '{{address}}' }}=="{{ os_env('ETH2_MAC_ADDRESS_CMP001') }}", NAME="enp5s0f0"
-       SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ATTR{{ '{{address}}' }}=="{{ os_env('ETH3_MAC_ADDRESS_CMP001') }}", NAME="enp5s0f1"
-       SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ATTR{{ '{{address}}' }}=="{{ os_env('ETH4_MAC_ADDRESS_CMP001') }}", NAME="enp5s0f2"
-       # cmp002
-       SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ATTR{{ '{{address}}' }}=="{{ os_env('ETH0_MAC_ADDRESS_CMP002') }}", NAME="eno1"
-       SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ATTR{{ '{{address}}' }}=="{{ os_env('ETH1_MAC_ADDRESS_CMP002') }}", NAME="eth0"
-       SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ATTR{{ '{{address}}' }}=="{{ os_env('ETH2_MAC_ADDRESS_CMP002') }}", NAME="eth3"
-       SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ATTR{{ '{{address}}' }}=="{{ os_env('ETH3_MAC_ADDRESS_CMP002') }}", NAME="eth2"
-       SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ATTR{{ '{{address}}' }}=="{{ os_env('ETH4_MAC_ADDRESS_CMP002') }}", NAME="eth4"
-       # gtw01
-       SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ATTR{{ '{{address}}' }}=="{{ os_env('ETH0_MAC_ADDRESS_GTW01') }}", NAME="enp3s0f0"
-       SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ATTR{{ '{{address}}' }}=="{{ os_env('ETH1_MAC_ADDRESS_GTW01') }}", NAME="enp3s0f1"
-       # gtw02
-       SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ATTR{{ '{{address}}' }}=="{{ os_env('ETH0_MAC_ADDRESS_GTW02') }}", NAME="eno1"
-       SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ATTR{{ '{{address}}' }}=="{{ os_env('ETH1_MAC_ADDRESS_GTW02') }}", NAME="eno2"
diff --git a/tcp_tests/templates/physical_mcp11_ovs_dpdk/underlay--user-data.yaml b/tcp_tests/templates/physical_mcp11_ovs_dpdk/underlay--user-data.yaml
deleted file mode 100644
index e555d3c..0000000
--- a/tcp_tests/templates/physical_mcp11_ovs_dpdk/underlay--user-data.yaml
+++ /dev/null
@@ -1,114 +0,0 @@
-| # All the data below will be stored as a string object
-  #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
-  ssh_pwauth: True
-  users:
-   - name: root
-     sudo: ALL=(ALL) NOPASSWD:ALL
-     shell: /bin/bash
-     ssh_authorized_keys:
-     {% for key in config.underlay.ssh_keys %}
-      - ssh-rsa {{ key['public'] }}
-     {% endfor %}
-
-  disable_root: false
-  chpasswd:
-   list: |
-    root:r00tme
-   expire: False
-
-  bootcmd:
-   # Block access to SSH while node is preparing
-   - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
-   # Enable root access
-   - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
-   - service sshd restart
-  output:
-    all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
-  runcmd:
-   - export TERM=linux
-   - export LANG=C
-   # Configure dhclient
-   - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
-   - sudo resolvconf -u
-
-   # Prepare network connection
-   #- sudo ifup eth0
-
-   # Create swap
-   - fallocate -l 4G /swapfile
-   - chmod 600 /swapfile
-   - mkswap /swapfile
-   - swapon /swapfile
-   - echo "/swapfile   none    swap    defaults   0   0" >> /etc/fstab
-
-
-   ############## TCP Cloud cfg01 node ##################
-   #- sleep 120
-   - echo "Preparing base OS"
-   - which wget >/dev/null || (apt-get update; apt-get install -y wget)
- 
-   # Configure Ubuntu mirrors
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
-
-   - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
-   - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
-   - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/{{ SALT_VERSION }} xenial main" > /etc/apt/sources.list.d/saltstack.list
-   - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/{{ SALT_VERSION }}/SALTSTACK-GPG-KEY.pub | apt-key add -
-
-   - apt-get clean
-   - eatmydata apt-get update && apt-get -y upgrade
-
-   # Install common packages
-   - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc
-
-   ########################################################
-   # Node is ready, allow SSH access
-   - echo "Allow SSH access ..."
-   - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
-   ########################################################
-
-  write_files:
-   - path: /etc/network/interfaces
-     content: |
-          # The loopback network interface
-          auto lo
-          iface lo inet loopback
-
-          auto {interface_name}
-          iface {interface_name} inet dhcp
-
-   - path: /etc/udev/rules.d/70-persistent-net.rules
-     owner: root:root
-     permissions: '0644'
-     content: |
-       # kvm01
-       SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ATTR{{ '{{address}}' }}=="{{ os_env('ETH0_MAC_ADDRESS_KVM01') }}", NAME="enp2s0f0"
-       SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ATTR{{ '{{address}}' }}=="{{ os_env('ETH1_MAC_ADDRESS_KVM01') }}", NAME="enp2s0f1"
-       # kvm02
-       SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ATTR{{ '{{address}}' }}=="{{ os_env('ETH0_MAC_ADDRESS_KVM02') }}", NAME="enp2s0f0"
-       SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ATTR{{ '{{address}}' }}=="{{ os_env('ETH1_MAC_ADDRESS_KVM02') }}", NAME="enp2s0f1"
-       # kvm03
-       SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ATTR{{ '{{address}}' }}=="{{ os_env('ETH0_MAC_ADDRESS_KVM03') }}", NAME="eno1"
-       SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ATTR{{ '{{address}}' }}=="{{ os_env('ETH1_MAC_ADDRESS_KVM03') }}", NAME="eno2"
-       # cmp001
-       SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ATTR{{ '{{address}}' }}=="{{ os_env('ETH0_MAC_ADDRESS_CMP001') }}", NAME="enp3s0f0"
-       SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ATTR{{ '{{address}}' }}=="{{ os_env('ETH1_MAC_ADDRESS_CMP001') }}", NAME="enp3s0f1"
-       SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ATTR{{ '{{address}}' }}=="{{ os_env('ETH2_MAC_ADDRESS_CMP001') }}", NAME="enp5s0f0"
-       SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ATTR{{ '{{address}}' }}=="{{ os_env('ETH3_MAC_ADDRESS_CMP001') }}", NAME="enp5s0f1"
-       SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ATTR{{ '{{address}}' }}=="{{ os_env('ETH4_MAC_ADDRESS_CMP001') }}", NAME="enp5s0f2"
-       # cmp002
-       SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ATTR{{ '{{address}}' }}=="{{ os_env('ETH0_MAC_ADDRESS_CMP002') }}", NAME="eno1"
-       SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ATTR{{ '{{address}}' }}=="{{ os_env('ETH1_MAC_ADDRESS_CMP002') }}", NAME="eth0"
-       SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ATTR{{ '{{address}}' }}=="{{ os_env('ETH2_MAC_ADDRESS_CMP002') }}", NAME="eth3"
-       SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ATTR{{ '{{address}}' }}=="{{ os_env('ETH3_MAC_ADDRESS_CMP002') }}", NAME="eth2"
-       SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ATTR{{ '{{address}}' }}=="{{ os_env('ETH4_MAC_ADDRESS_CMP002') }}", NAME="eth4"
-       # gtw01
-       SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ATTR{{ '{{address}}' }}=="{{ os_env('ETH0_MAC_ADDRESS_GTW01') }}", NAME="enp3s0f0"
-       SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ATTR{{ '{{address}}' }}=="{{ os_env('ETH1_MAC_ADDRESS_GTW01') }}", NAME="enp3s0f1"
-       # gtw02
-       SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ATTR{{ '{{address}}' }}=="{{ os_env('ETH0_MAC_ADDRESS_GTW02') }}", NAME="eno1"
-       SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ATTR{{ '{{address}}' }}=="{{ os_env('ETH1_MAC_ADDRESS_GTW02') }}", NAME="eno2"
diff --git a/tcp_tests/templates/physical_mcp11_ovs_dpdk/underlay.yaml b/tcp_tests/templates/physical_mcp11_ovs_dpdk/underlay.yaml
deleted file mode 100644
index c27ccb4..0000000
--- a/tcp_tests/templates/physical_mcp11_ovs_dpdk/underlay.yaml
+++ /dev/null
@@ -1,572 +0,0 @@
-# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
-{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
-
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'physical_mcp11_ovs_dpdk') %}
-{# set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME) + '.local' #}
-{% set DOMAIN_NAME = os_env('DOMAIN_NAME', 'mcp11-ovs-dpdk.local') %}
-{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_KVM01 = os_env('HOSTNAME_KVM01', 'kvm01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_KVM02 = os_env('HOSTNAME_KVM02', 'kvm02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_KVM03 = os_env('HOSTNAME_KVM03', 'kvm03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP001 = os_env('HOSTNAME_CMP001', 'cmp001.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP002 = os_env('HOSTNAME_CMP002', 'cmp002.' + DOMAIN_NAME) %}
-{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_GTW02 = os_env('HOSTNAME_GTW02', 'gtw02.' + DOMAIN_NAME) %}
-
-{% set ETH1_IP_ADDRESS_CFG01 = os_env('ETH1_IP_ADDRESS_CFG01', '172.16.49.2') %}
-{% set ETH0_IP_ADDRESS_KVM01 = os_env('ETH0_IP_ADDRESS_KVM01', '172.16.49.11') %}
-{% set ETH0_IP_ADDRESS_KVM02 = os_env('ETH0_IP_ADDRESS_KVM02', '172.16.49.12') %}
-{% set ETH0_IP_ADDRESS_KVM03 = os_env('ETH0_IP_ADDRESS_KVM03', '172.16.49.13') %}
-{% set ETH0_IP_ADDRESS_CMP001 = os_env('ETH0_IP_ADDRESS_CMP001', '172.16.49.3') %}
-{% set ETH0_IP_ADDRESS_CMP002 = os_env('ETH0_IP_ADDRESS_CMP002', '172.16.49.31') %}
-{% set ETH0_IP_ADDRESS_GTW01 = os_env('ETH0_IP_ADDRESS_GTW01', '172.16.49.5') %}
-{% set ETH0_IP_ADDRESS_GTW02 = os_env('ETH0_IP_ADDRESS_GTW02', '172.16.49.4') %}
-
-{% import 'physical_mcp11_ovs_dpdk/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
-{% import 'physical_mcp11_ovs_dpdk/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
-{% import 'physical_mcp11_ovs_dpdk/underlay--user-data.yaml' as CLOUDINIT_USER_DATA with context %}
-{% import 'physical_mcp11_ovs_dpdk/underlay--user-data-hwe.yaml' as CLOUDINIT_USER_DATA_HWE with context %}
-
----
-aliases:
- - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
- - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
- - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
- - &cloudinit_user_data {{ CLOUDINIT_USER_DATA }}
- - &cloudinit_user_data_hwe {{ CLOUDINIT_USER_DATA_HWE }}
-
-
-template:
-  devops_settings:
-    env_name: {{ os_env('ENV_NAME', 'physical_mcp11_ovs_dpdk_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
-
-    address_pools:
-      admin-pool01:
-        net: {{ os_env('ADMIN_ADDRESS_POOL01', '172.16.49.0/26:26') }}
-        params:
-          ip_reserved:
-            gateway: +62
-            l2_network_device: +61
-            default_{{ HOSTNAME_CFG01 }}: {{ ETH1_IP_ADDRESS_CFG01 }}
-            default_{{ HOSTNAME_KVM01 }}: {{ ETH0_IP_ADDRESS_KVM01 }}
-            default_{{ HOSTNAME_KVM02 }}: {{ ETH0_IP_ADDRESS_KVM02 }}
-            default_{{ HOSTNAME_KVM03 }}: {{ ETH0_IP_ADDRESS_KVM03 }}
-            default_{{ HOSTNAME_CMP001 }}: {{ ETH0_IP_ADDRESS_CMP001 }}
-            default_{{ HOSTNAME_CMP002 }}: {{ ETH0_IP_ADDRESS_CMP002 }}
-            default_{{ HOSTNAME_GTW01 }}: {{ ETH0_IP_ADDRESS_GTW01 }}
-            default_{{ HOSTNAME_GTW02 }}: {{ ETH0_IP_ADDRESS_GTW02 }}
-            virtual_{{ HOSTNAME_CFG01 }}: {{ ETH1_IP_ADDRESS_CFG01 }}
-            virtual_{{ HOSTNAME_KVM01 }}: {{ ETH0_IP_ADDRESS_KVM01 }}
-            virtual_{{ HOSTNAME_KVM02 }}: {{ ETH0_IP_ADDRESS_KVM02 }}
-            virtual_{{ HOSTNAME_KVM03 }}: {{ ETH0_IP_ADDRESS_KVM03 }}
-            virtual_{{ HOSTNAME_CMP001 }}: {{ ETH0_IP_ADDRESS_CMP001 }}
-            virtual_{{ HOSTNAME_CMP002 }}: {{ ETH0_IP_ADDRESS_CMP002 }}
-            virtual_{{ HOSTNAME_GTW01 }}: {{ ETH0_IP_ADDRESS_GTW01 }}
-            virtual_{{ HOSTNAME_GTW02 }}: {{ ETH0_IP_ADDRESS_GTW02 }}
-          #ip_ranges:
-          #    dhcp: [+2, -4]
-      private-pool01:
-        net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.167.4.0/24:24') }}
-        params:
-          ip_reserved:
-            gateway: +1
-            l2_network_device: +1
-
-      tenant-pool01:
-        net: {{ os_env('TENANT_ADDRESS_POOL01', '10.167.6.0/24:24') }}
-        params:
-          ip_reserved:
-            gateway: +1
-            l2_network_device: +1
-
-      external-pool01:
-        net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '172.17.42.128/26:26') }}
-        params:
-          ip_reserved:
-            gateway: +1
-            l2_network_device: -2
-
-    groups:
-
-      - name: virtual
-        driver:
-          name: devops.driver.libvirt
-          params:
-            connection_string: !os_env CONNECTION_STRING, qemu:///system
-            storage_pool_name: !os_env STORAGE_POOL_NAME, default
-            stp: False
-            hpet: False
-            enable_acpi: true
-            use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
-
-        network_pools:
-          admin: admin-pool01
-
-        l2_network_devices:
-          # Ironic management interface
-          admin:
-            address_pool: admin-pool01
-            dhcp: false
-            parent_iface:
-              phys_dev: !os_env IRONIC_LAB_PXE_IFACE_0
-
-        group_volumes:
-         - name: cloudimage1604    # This name is used for 'backing_store' option for node volumes.
-           source_image: !os_env IMAGE_PATH1604  # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
-           format: qcow2
-         - name: cfg01_day01_image               # Pre-configured day01 image
-           source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01', os_env('IMAGE_PATH1604')) }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
-           format: qcow2
-
-        nodes:
-          - name: {{ HOSTNAME_CFG01 }}
-            role: salt_master
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 4
-              memory: !os_env SLAVE_NODE_MEMORY, 8192
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: cfg01_day01_image
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_cfg01
-
-              interfaces:
-                - label: ens3
-                  l2_network_device: admin
-                  interface_model: *interface_model
-                  mac_address: !os_env ETH1_MAC_ADDRESS_CFG01
-                #- label: ens4
-                #  l2_network_device: private
-                #  interface_model: *interface_model
-              network_config:
-                ens3:
-                  networks:
-                    - admin
-                #ens4:
-                #  networks:
-                #    - private
-
-
-      - name: default
-        driver:
-          name: devops_driver_ironic
-          params:
-            os_auth_token: fake-token
-            ironic_url: !os_env IRONIC_URL  # URL that will be used by fuel-devops
-                                            # to access Ironic API
-            # Agent URL that is accessible from deploying node when nodes
-            # are bootstrapped with PXE. Usually PXE/provision network address is used.
-            agent_kernel_url: !os_env IRONIC_AGENT_KERNEL_URL
-            agent_ramdisk_url: !os_env IRONIC_AGENT_RAMDISK_URL
-
-        network_pools:
-          admin: admin-pool01
-
-        nodes:
-
-        #  - name: {{ HOSTNAME_CFG01 }}
-        #    role: salt_master
-        #    params:
-        #      ipmi_user: !os_env IPMI_USER
-        #      ipmi_password: !os_env IPMI_PASSWORD
-        #      ipmi_previlegies: OPERATOR
-        #      ipmi_host: !os_env IPMI_HOST_CFG01  # hostname or IP address
-        #      ipmi_lan_interface: lanplus
-        #      ipmi_port: 623
-
-        #      root_volume_name: system     # see 'volumes' below
-        #      cloud_init_volume_name: iso  # see 'volumes' below
-        #      cloud_init_iface_up: enp3s0f1  # see 'interfaces' below.
-        #      volumes:
-        #        - name: system
-        #          capacity: !os_env NODE_VOLUME_SIZE, 200
-
-        #          # The same as for agent URL, here is an URL to the image that should be
-        #          # used for deploy the node. It should also be accessible from deploying
-        #          # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
-        #          source_image: !os_env IRONIC_SOURCE_IMAGE_URL
-        #          source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
-
-        #        - name: iso  # Volume with name 'iso' will be used
-        #                     # for store image with cloud-init metadata.
-
-        #          cloudinit_meta_data: *cloudinit_meta_data
-        #          cloudinit_user_data: *cloudinit_user_data_cfg01
-
-        #      interfaces:
-        #        - label: enp3s0f0  # Infra interface
-        #          mac_address: !os_env ETH0_MAC_ADDRESS_CFG01
-        #        - label: enp3s0f1
-        #          l2_network_device: admin
-        #          mac_address: !os_env ETH1_MAC_ADDRESS_CFG01
-
-        #      network_config:
-        #        enp3s0f0:
-        #          networks:
-        #           - infra
-        #        enp3s0f1:
-        #          networks:
-        #           - admin
-
-          - name: {{ HOSTNAME_KVM01 }}
-            role: salt_minion
-            params:
-              ipmi_user: !os_env IPMI_USER
-              ipmi_password: !os_env IPMI_PASSWORD
-              ipmi_previlegies: OPERATOR
-              ipmi_host: !os_env IPMI_HOST_KVM01  # hostname or IP address
-              ipmi_lan_interface: lanplus
-              ipmi_port: 623
-
-              root_volume_name: system     # see 'volumes' below
-              cloud_init_volume_name: iso  # see 'volumes' below
-              cloud_init_iface_up: enp2s0f0  # see 'interfaces' below.
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 200
-
-                  # The same as for agent URL, here is an URL to the image that should be
-                  # used for deploy the node. It should also be accessible from deploying
-                  # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
-                  source_image: !os_env IRONIC_SOURCE_IMAGE_URL
-                  source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
-
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data
-
-              interfaces:
-                - label: enp2s0f0
-                  l2_network_device: admin
-                  mac_address: !os_env ETH0_MAC_ADDRESS_KVM01
-                - label: enp2s0f1
-                  mac_address: !os_env ETH1_MAC_ADDRESS_KVM01
-
-              network_config:
-                enp2s0f0:
-                  networks:
-                   - admin
-                bond0:
-                  networks:
-                   - control
-                  aggregation: active-backup
-                  parents:
-                   - enp2s0f1
-
-          - name: {{ HOSTNAME_KVM02 }}
-            role: salt_minion
-            params:
-              ipmi_user: !os_env IPMI_USER
-              ipmi_password: !os_env IPMI_PASSWORD
-              ipmi_previlegies: OPERATOR
-              ipmi_host: !os_env IPMI_HOST_KVM02  # hostname or IP address
-              ipmi_lan_interface: lanplus
-              ipmi_port: 623
-
-              root_volume_name: system     # see 'volumes' below
-              cloud_init_volume_name: iso  # see 'volumes' below
-              cloud_init_iface_up: enp2s0f0  # see 'interfaces' below.
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 200
-
-                  # The same as for agent URL, here is an URL to the image that should be
-                  # used for deploy the node. It should also be accessible from deploying
-                  # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
-                  source_image: !os_env IRONIC_SOURCE_IMAGE_URL
-                  source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
-
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data
-
-              interfaces:
-                - label: enp2s0f0
-                  l2_network_device: admin
-                  mac_address: !os_env ETH0_MAC_ADDRESS_KVM02
-                - label: enp2s0f1
-                  mac_address: !os_env ETH1_MAC_ADDRESS_KVM02
-
-              network_config:
-                enp2s0f0:
-                  networks:
-                   - admin
-                bond0:
-                  networks:
-                   - control
-                  aggregation: active-backup
-                  parents:
-                   - enp2s0f1
-
-          - name: {{ HOSTNAME_KVM03 }}
-            role: salt_minion
-            params:
-              ipmi_user: !os_env IPMI_USER
-              ipmi_password: !os_env IPMI_PASSWORD
-              ipmi_previlegies: OPERATOR
-              ipmi_host: !os_env IPMI_HOST_KVM03  # hostname or IP address
-              ipmi_lan_interface: lanplus
-              ipmi_port: 623
-
-              root_volume_name: system     # see 'volumes' below
-              cloud_init_volume_name: iso  # see 'volumes' below
-              cloud_init_iface_up: eno1  # see 'interfaces' below.
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 200
-
-                  # The same as for agent URL, here is an URL to the image that should be
-                  # used for deploy the node. It should also be accessible from deploying
-                  # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
-                  source_image: !os_env IRONIC_SOURCE_IMAGE_URL
-                  source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
-
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data
-
-              interfaces:
-                - label: eno1
-                  l2_network_device: admin
-                  mac_address: !os_env ETH0_MAC_ADDRESS_KVM03
-                - label: eno2
-                  mac_address: !os_env ETH1_MAC_ADDRESS_KVM03
-
-              network_config:
-                eno1:
-                  networks:
-                   - admin
-                bond0:
-                  networks:
-                   - control
-                  aggregation: active-backup
-                  parents:
-                   - eno2
-
-
-          - name: {{ HOSTNAME_CMP001 }}
-            role: salt_minion
-            params:
-              ipmi_user: !os_env IPMI_USER
-              ipmi_password: !os_env IPMI_PASSWORD
-              ipmi_previlegies: OPERATOR
-              ipmi_host: !os_env IPMI_HOST_CMP001  # hostname or IP address
-              ipmi_lan_interface: lanplus
-              ipmi_port: 623
-
-              root_volume_name: system     # see 'volumes' below
-              cloud_init_volume_name: iso  # see 'volumes' below
-              cloud_init_iface_up: enp3s0f0  # see 'interfaces' below.
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 200
-
-                  # The same as for agent URL, here is an URL to the image that should be
-                  # used for deploy the node. It should also be accessible from deploying
-                  # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
-                  source_image: !os_env IRONIC_SOURCE_IMAGE_URL
-                  source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
-
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_hwe
-
-              interfaces:
-                - label: enp3s0f0
-                  l2_network_device: admin
-                  mac_address: !os_env ETH0_MAC_ADDRESS_CMP001
-                - label: enp3s0f1
-                  mac_address: !os_env ETH1_MAC_ADDRESS_CMP001
-                - label: enp5s0f0
-                  mac_address: !os_env ETH2_MAC_ADDRESS_CMP001
-                - label: enp5s0f1
-                  mac_address: !os_env ETH3_MAC_ADDRESS_CMP001
-                  features: ['dpdk', 'dpdk_pci: 0000:05:00.1']
-                - label: enp5s0f2
-                  mac_address: !os_env ETH4_MAC_ADDRESS_CMP001
-                  features: ['dpdk', 'dpdk_pci: 0000:05:00.2']
-
-              network_config:
-                enp3s0f0:
-                  networks:
-                   - admin
-                bond0:
-                  networks:
-                   - control
-                  aggregation: active-backup
-                  parents:
-                   - enp3s0f1
-                   - enp5s0f0
-
-
-
-          - name: {{ HOSTNAME_CMP002 }}
-            role: salt_minion
-            params:
-              ipmi_user: !os_env IPMI_USER
-              ipmi_password: !os_env IPMI_PASSWORD
-              ipmi_previlegies: OPERATOR
-              ipmi_host: !os_env IPMI_HOST_CMP002  # hostname or IP address
-              ipmi_lan_interface: lanplus
-              ipmi_port: 623
-
-              root_volume_name: system     # see 'volumes' below
-              cloud_init_volume_name: iso  # see 'volumes' below
-              cloud_init_iface_up: eno1  # see 'interfaces' below.
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 200
-
-                  # The same as for agent URL, here is an URL to the image that should be
-                  # used for deploy the node. It should also be accessible from deploying
-                  # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
-                  source_image: !os_env IRONIC_SOURCE_IMAGE_URL
-                  source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
-
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_hwe
-
-              interfaces:
-                - label: eno1
-                  l2_network_device: admin
-                  mac_address: !os_env ETH0_MAC_ADDRESS_CMP002
-                - label: eth0
-                  mac_address: !os_env ETH1_MAC_ADDRESS_CMP002
-                - label: eth3
-                  mac_address: !os_env ETH2_MAC_ADDRESS_CMP002
-                - label: eth2
-                  mac_address: !os_env ETH3_MAC_ADDRESS_CMP002
-                  features: ['dpdk', 'dpdk_pci: 0000:05:00.1']
-                - label: eth4
-                  mac_address: !os_env ETH4_MAC_ADDRESS_CMP002
-                  features: ['dpdk', 'dpdk_pci: 0000:0b:00.0']
-
-              network_config:
-                eno1:
-                  networks:
-                   - admin
-                bond0:
-                  networks:
-                   - control
-                  aggregation: active-backup
-                  parents:
-                   - eth0
-                   - eth3
-
-
-          - name: {{ HOSTNAME_GTW01 }}
-            role: salt_minion
-            params:
-              ipmi_user: !os_env IPMI_USER
-              ipmi_password: !os_env IPMI_PASSWORD
-              ipmi_previlegies: OPERATOR
-              ipmi_host: !os_env IPMI_HOST_GTW01  # hostname or IP address
-              ipmi_lan_interface: lanplus
-              ipmi_port: 623
-
-              root_volume_name: system     # see 'volumes' below
-              cloud_init_volume_name: iso  # see 'volumes' below
-              cloud_init_iface_up: enp3s0f0  # see 'interfaces' below.
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 200
-
-                  # The same as for agent URL, here is an URL to the image that should be
-                  # used for deploy the node. It should also be accessible from deploying
-                  # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
-                  source_image: !os_env IRONIC_SOURCE_IMAGE_URL
-                  source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
-
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_hwe
-
-              interfaces:
-                - label: enp3s0f0
-                  l2_network_device: admin
-                  mac_address: !os_env ETH0_MAC_ADDRESS_GTW01
-                - label: enp3s0f1
-                  mac_address: !os_env ETH1_MAC_ADDRESS_GTW01
-
-              network_config:
-                enp3s0f0:
-                  networks:
-                   - admin
-                bond0:
-                  networks:
-                   - control
-                  aggregation: active-backup
-                  parents:
-                   - enp3s0f1
-
-          - name: {{ HOSTNAME_GTW02 }}
-            role: salt_minion
-            params:
-              ipmi_user: !os_env IPMI_USER
-              ipmi_password: !os_env IPMI_PASSWORD
-              ipmi_previlegies: OPERATOR
-              ipmi_host: !os_env IPMI_HOST_GTW02  # hostname or IP address
-              ipmi_lan_interface: lanplus
-              ipmi_port: 623
-
-              root_volume_name: system     # see 'volumes' below
-              cloud_init_volume_name: iso  # see 'volumes' below
-              cloud_init_iface_up: eno1  # see 'interfaces' below.
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 200
-
-                  # The same as for agent URL, here is an URL to the image that should be
-                  # used for deploy the node. It should also be accessible from deploying
-                  # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
-                  source_image: !os_env IRONIC_SOURCE_IMAGE_URL
-                  source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
-
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_hwe
-
-              interfaces:
-                - label: eno1
-                  l2_network_device: admin
-                  mac_address: !os_env ETH0_MAC_ADDRESS_GTW02
-                - label: eno2
-                  mac_address: !os_env ETH1_MAC_ADDRESS_GTW02
-
-              network_config:
-                eno1:
-                  networks:
-                   - admin
-                bond0:
-                  networks:
-                   - control
-                  aggregation: active-backup
-                  parents:
-                   - eno2