Merge branch 'master' of https://github.com/Mirantis/tcp-qa into add_k8s_contrail_test
diff --git a/tcp_tests/managers/common_services_manager.py b/tcp_tests/managers/common_services_manager.py
index 658657a..ec9cdc2 100644
--- a/tcp_tests/managers/common_services_manager.py
+++ b/tcp_tests/managers/common_services_manager.py
@@ -31,4 +31,4 @@
     def install(self, commands):
         self.execute_commands(commands,
                               label='Install common services')
-        self.__config.common_services.common_services_installed = True
+        self.__config.openstack.openstack_installed = True
diff --git a/tcp_tests/requirements.txt b/tcp_tests/requirements.txt
index d1b3ec5..b323301 100644
--- a/tcp_tests/requirements.txt
+++ b/tcp_tests/requirements.txt
@@ -1,4 +1,5 @@
 git+git://github.com/openstack/fuel-devops.git
+git+git://github.com/dis-xcom/fuel-devops-driver-ironic
 paramiko
 six
 requests>=2.2.0
diff --git a/tcp_tests/templates/ironic_standalone/underlay.yaml b/tcp_tests/templates/ironic_standalone/underlay.yaml
index 664665c..0753174 100644
--- a/tcp_tests/templates/ironic_standalone/underlay.yaml
+++ b/tcp_tests/templates/ironic_standalone/underlay.yaml
@@ -67,7 +67,7 @@
             address_pool: provisioning-pool01
             dhcp: false
             parent_iface:
-              phys_dev: !os_env BAREMETAL_ADMIN_IFACE, enp8s0f1
+              phys_dev: !os_env IRONIC_LAB_PXE_IFACE
 
 
         nodes:
diff --git a/tcp_tests/templates/mk24_lab_ovs_dvr_vlan_bm/common-services.yaml b/tcp_tests/templates/mk24_lab_ovs_dvr_vlan_bm/common-services.yaml
new file mode 100644
index 0000000..742a4d0
--- /dev/null
+++ b/tcp_tests/templates/mk24_lab_ovs_dvr_vlan_bm/common-services.yaml
@@ -0,0 +1,118 @@
+{% from 'mk24_lab_ovs_dvr_vlan_bm/underlay.yaml' import HOSTNAME_CFG01 with context %}
+
+# Install support services
+- description: Install keepalived on ctl01
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keepalived:cluster and *01*' state.sls keepalived
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: true
+
+- description: Install keepalived
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keepalived:cluster' state.sls keepalived
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: true
+
+- description: Check the VIP
+  cmd: |
+    OPENSTACK_CONTROL_ADDRESS=`salt-call --out=newline_values_only pillar.get _param:openstack_control_address`;
+    echo "_param:openstack_control_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
+    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+
+- description: Install glusterfs
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@glusterfs:server' state.sls glusterfs.server.service
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Setup glusterfs on primary controller
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Check the gluster status
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Install RabbitMQ on ctl01
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Install RabbitMQ
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@rabbitmq:server' state.sls rabbitmq
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Check the rabbitmq status
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Install Galera on first server
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@galera:master' state.sls galera
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Install Galera on other servers
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@galera:slave' state.sls galera
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Check mysql status
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: true
+
+
+- description: Install haproxy
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@haproxy:proxy' state.sls haproxy
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Check haproxy status
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@haproxy:proxy' service.status haproxy
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Restart rsyslog
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@haproxy:proxy' service.restart rsyslog
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Install memcached on all controllers
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@memcached:server' state.sls memcached
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
diff --git a/tcp_tests/templates/mk24_lab_ovs_dvr_vlan_bm/openstack.yaml b/tcp_tests/templates/mk24_lab_ovs_dvr_vlan_bm/openstack.yaml
new file mode 100644
index 0000000..e791352
--- /dev/null
+++ b/tcp_tests/templates/mk24_lab_ovs_dvr_vlan_bm/openstack.yaml
@@ -0,0 +1,169 @@
+{% from 'mk24_lab_ovs_dvr_vlan_bm/underlay.yaml' import HOSTNAME_CFG01 with context %}
+
+# Install OpenStack control services
+
+- description: Install glance on all controllers
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+     -C 'I@glance:server' state.sls glance -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Install keystone service (note that different fernet keys are created on different nodes)
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keystone:server' state.sls keystone.server -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 15}
+  skip_fail: false
+
+- description: Restart apache due to PROD-10477
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' cmd.run "systemctl restart apache2"
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 15}
+  skip_fail: false
+
+- description: Check apache status to PROD-10477
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' cmd.run "systemctl status apache2"
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 15}
+  skip_fail: false
+
+- description: Mount glusterfs.client volumes (resuires created 'keystone' and 'glusterfs' system users)
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@glance:server' state.sls glusterfs.client
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Update fernet keys for keystone server on the mounted glusterfs volume
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keystone:server' state.sls keystone.server -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Populate keystone services/tenants/admins
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keystone:client' state.sls keystone.client
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Check keystone service-list
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keystone:server' cmd.run '. /root/keystonerc; openstack service list'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Check glance image-list
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keystone:server' cmd.run '. /root/keystonerc; glance image-list'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+
+- description: Install nova on all controllers
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@nova:controller' state.sls nova -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 5}
+  skip_fail: false
+
+- description: Check nova service-list
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keystone:server' cmd.run '. /root/keystonerc; nova service-list'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+
+- description: Install cinder
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@cinder:controller' state.sls cinder -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Check cinder list
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keystone:server' cmd.run '. /root/keystonerc; cinder list'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+
+- description: Install neutron service
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@neutron:server' state.sls neutron -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Install neutron on gtw node
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@neutron:gateway' state.sls neutron
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+
+- description: Check neutron agent-list
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keystone:server' cmd.run '. /root/keystonerc; neutron agent-list'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+
+- description: Install heat service
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@heat:server' state.sls heat -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Check heat service
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keystone:server' cmd.run '. /root/keystonerc; heat resource-type-list'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+
+- description: Deploy horizon dashboard
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@horizon:server' state.sls horizon
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: true
+
+- description: Deploy nginx proxy
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@nginx:server' state.sls nginx
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: true
+
+
+# Install compute node
+
+- description: Apply formulas for compute node
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: true
+
+- description: Re-apply(as in doc) formulas for compute node
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: true
+
+- description: Check IP on computes
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
+    'ip a'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 10, delay: 30}
+  skip_fail: false
diff --git a/tcp_tests/templates/mk24_lab_ovs_dvr_vlan_bm/salt.yaml b/tcp_tests/templates/mk24_lab_ovs_dvr_vlan_bm/salt.yaml
new file mode 100644
index 0000000..ebf2825
--- /dev/null
+++ b/tcp_tests/templates/mk24_lab_ovs_dvr_vlan_bm/salt.yaml
@@ -0,0 +1,341 @@
+{% from 'mk24_lab_ovs_dvr_vlan_bm/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'mk24_lab_ovs_dvr_vlan_bm/underlay.yaml' import REPOSITORY_SUITE with context %}
+
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/qa') %}
+{% set SALT_MODELS_COMMIT = os_env('SALT_MODELS_COMMIT','master') %}
+
+# Address pools for reclass cluster model are taken in the following order:
+# 1. environment variables,
+# 2. config.underlay.address_pools based on fuel-devops address pools
+#    (see generated '.ini' file after underlay is created),
+# 3. defaults
+{% set address_pools = config.underlay.address_pools %}
+
+# Install salt to the config node
+
+#- description: (moved to cloud-init config) Configure repository on the cfg01 node
+#  cmd:
+#    echo "172.18.248.114    jenkins.mcp.mirantis.net  gerrit.mcp.mirantis.net" >> /etc/hosts;
+#    echo "185.135.196.10    apt-mk.mirantis.com" >> /etc/hosts;
+#    echo "nameserver 172.18.208.44 >> /etc/resolv.conf;
+#    echo "nameserver 8.8.8.8 >> /etc/resolv.conf;
+#    which wget >/dev/null || (apt-get update; apt-get install -y wget);
+#    echo "deb [arch=amd64] http://apt-mk.mirantis.com/xenial nightly salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
+#    wget -O - http://apt-mk.mirantis.com/public.gpg | apt-key add -;
+#    echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;
+#    wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;
+#  node_name: {{ HOSTNAME_CFG01 }}
+#  retry: {count: 1, delay: 1}
+#  skip_fail: false
+
+#- description: Update packages on cfg01
+#  cmd: apt-get clean; eatmydata apt-get update
+#  node_name: {{ HOSTNAME_CFG01 }}
+#  retry: {count: 1, delay: 1}
+#  skip_fail: false
+
+- description: Installing salt master on cfg01
+  cmd:  eatmydata apt-get install -y reclass git salt-master
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 1}
+  skip_fail: false
+
+#- description: (moved to cloud-init config) Install common packages on cfg01
+#  cmd: eatmydata apt-get install -y python-pip wget curl tmux byobu iputils-ping traceroute htop tree
+#  node_name: {{ HOSTNAME_CFG01 }}
+#  retry: {count: 1, delay: 1}
+#  skip_fail: false
+
+- description: Configure salt-master on cfg01
+  cmd: |
+    cat << 'EOF' >> /etc/salt/master.d/master.conf
+    file_roots:
+      base:
+      - /usr/share/salt-formulas/env
+    pillar_opts: False
+    open_mode: True
+    reclass: &reclass
+      storage_type: yaml_fs
+      inventory_base_uri: /srv/salt/reclass
+    ext_pillar:
+      - reclass: *reclass
+    master_tops:
+      reclass: *reclass
+    EOF
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 1}
+  skip_fail: false
+
+- description: Configure GIT settings and certificates
+  cmd: touch /root/.git_trusted_certs.pem;
+    for server in git.tcpcloud.eu github.com; do
+        openssl s_client -showcerts -connect $server:443 </dev/null
+        | openssl x509 -outform PEM
+        >> /root/.git_trusted_certs.pem;
+    done;
+    HOME=/root git config --global http.sslCAInfo /root/.git_trusted_certs.pem;
+    HOME=/root git config --global user.email "tcp-qa@example.com";
+    HOME=/root git config --global user.name "TCP QA";
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 1}
+  skip_fail: false
+
+
+- description: Clone reclass models with submodules
+  cmd: |
+    ssh-keyscan -H github.com >> ~/.ssh/known_hosts;
+    git clone -b {{ SALT_MODELS_COMMIT }} --recurse-submodules {{ SALT_MODELS_REPOSITORY }} /srv/salt/reclass;
+
+    mkdir -p /srv/salt/reclass/classes/service;
+
+    # Replace firstly to an intermediate value to avoid intersection between
+    # already replaced and replacing networks.
+    # For example, if generated IPV4_NET_ADMIN_PREFIX=10.16.0 , then there is a risk of replacing twice:
+    # 192.168.10 -> 10.16.0 (generated network for admin)
+    # 10.16.0 -> <external network>
+    # So let's replace constant networks to the keywords, and then keywords to the desired networks.
+
+    find /srv/salt/reclass/ -type f -exec sed -i 's/apt_mk_version:.*/apt_mk_version: {{ REPOSITORY_SUITE }}/g' {} +
+
+    # Disable checkouting the model from remote repository
+    cat << 'EOF' >> /srv/salt/reclass/nodes/{{ HOSTNAME_CFG01 }}.yml
+    # local storage
+      reclass:
+        storage:
+          data_source:
+            engine: local
+    EOF
+
+    # Show the changes to the console
+    cd /srv/salt/reclass/; git diff
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 1}
+  skip_fail: false
+
+- description: Configure reclass
+  cmd: |
+    FORMULA_PATH=${FORMULA_PATH:-/usr/share/salt-formulas};
+    FORMULA_REPOSITORY=${FORMULA_REPOSITORY:-deb [arch=amd64] http://apt-mk.mirantis.com/xenial stable salt};
+    FORMULA_GPG=${FORMULA_GPG:-http://apt-mk.mirantis.com/public.gpg};
+    which wget > /dev/null || (apt-get update; apt-get install -y wget);
+    echo "${FORMULA_REPOSITORY}" > /etc/apt/sources.list.d/mcp_salt.list;
+    wget -O - "${FORMULA_GPG}" | apt-key add -;
+    apt-get clean; apt-get update;
+    [ ! -d /srv/salt/reclass/classes/service ] && mkdir -p /srv/salt/reclass/classes/service;
+    declare -a formula_services=("linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon");
+    echo -e "\nInstalling all required salt formulas\n";
+    eatmydata apt-get install -y "${formula_services[@]/#/salt-formula-}";
+    for formula_service in "${formula_services[@]}"; do
+      echo -e "\nLink service metadata for formula ${formula_service} ...\n";
+      [ ! -L "/srv/salt/reclass/classes/service/${formula_service}" ] && ln -s ${FORMULA_PATH}/reclass/service/${formula_service} /srv/salt/reclass/classes/service/${formula_service};
+    done;
+    [ ! -d /srv/salt/env ] && mkdir -p /srv/salt/env;
+    [ ! -L /srv/salt/env/prd ] && ln -s ${FORMULA_PATH}/env /srv/salt/env/prd;
+    [ ! -d /etc/reclass ] && mkdir /etc/reclass;
+
+    cat << 'EOF' >> /etc/reclass/reclass-config.yml
+    storage_type: yaml_fs
+    pretty_print: True
+    output: yaml
+    inventory_base_uri: /srv/salt/reclass
+    EOF
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 1}
+  skip_fail: false
+
+- description: Restart salt-master service
+  cmd: |
+     systemctl restart salt-master;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+{% for ssh in config.underlay.ssh %}
+- description: Configure salt-minion on {{ ssh['node_name'] }}
+  cmd: |
+    [ ! -d /etc/salt/minion.d ] && mkdir -p /etc/salt/minion.d;
+    cat << "EOF" >> /etc/salt/minion.d/minion.conf
+    id: {{ ssh['node_name'] }}
+    master: {{ config.salt.salt_master_host }}
+    EOF
+    eatmydata apt-get install -y salt-minion;
+    echo "Check for system info and metadata availability ...";
+    salt-call --no-color grains.items;
+    salt-call --no-color pillar.items;
+  node_name: {{ ssh['node_name'] }}
+  retry: {count: 1, delay: 1}
+  skip_fail: false
+{% endfor %}
+
+
+- description: Accept salt keys from all the nodes
+  cmd: salt-key -A -y
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: true
+
+
+- description: Configure salt adoptors on cfg01
+  cmd: |
+    ln -s /usr/lib/python2.7/dist-packages/reclass/adapters/salt.py /usr/local/sbin/reclass-salt;
+    chmod +x /usr/lib/python2.7/dist-packages/reclass/adapters/salt.py
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 1}
+  skip_fail: false
+
+
+# Prepare salt services and nodes settings
+- description: Run 'linux' formula on cfg01
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@salt:master' state.sls linux;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+
+- description: Run 'openssh' formula on cfg01
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@salt:master' state.sls openssh;
+    salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@salt:master' cmd.run "sed -i 's/PasswordAuthentication no/PasswordAuthentication
+    yes/' /etc/ssh/sshd_config && service ssh restart";
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 3, delay: 5}
+  skip_fail: false
+
+
+- description: '*Workaround* of the bug https://mirantis.jira.com/browse/PROD-7962'
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    '*' cmd.run "echo '    StrictHostKeyChecking no' >> /root/.ssh/config"
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 1}
+  skip_fail: false
+
+- description: Run 'salt.master' formula on cfg01
+  cmd: timeout 120 salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@salt:master' state.sls salt.master.service;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 5}
+  skip_fail: false
+
+- description: Run 'salt' formula on cfg01 with workaround proposed in PROD-10894
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@salt:master' state.sls salt;
+    salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@salt:master' saltutil.sync_all
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 5, delay: 5}
+  skip_fail: false
+
+- description: Generate inventory for all the nodes to the /srv/salt/reclass/nodes/_generated
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@salt:master' state.sls reclass
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+
+
+- description: ovs-dvr-vlan model specific Execute 'libvirt' states to create necessary libvirt networks
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'kvm*' state.sls libvirt
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Create VMs for control plane
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'kvm*' state.sls salt.control
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+
+
+
+
+
+- description: Refresh pillars on all minions
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.refresh_pillar
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Sync all salt resources
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.sync_all
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Show  reclass-salt --top
+  cmd: reclass-salt --top
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Execute salt.minion.cert
+  cmd: salt-call --no-color state.sls salt.minion.cert -l info;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+
+# Bootstrap all nodes
+
+- description: Configure linux on other nodes
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C '* and not
+    cfg01*' state.sls linux
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Configure openssh on all nodes
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C '* and not
+    cfg01*' state.sls openssh;salt --hard-crash --state-output=mixed --state-verbose=False
+    -C '* and not cfg*' cmd.run "sed -i 's/PasswordAuthentication no/PasswordAuthentication
+    yes/' /etc/ssh/sshd_config && service ssh restart"
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Configure salt.minion on other nodes
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C '* and not
+    cfg01*' state.sls salt.minion
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 3, delay: 5}
+  skip_fail: false
+
+- description: Check salt minion versions on slaves
+  cmd: salt '*' test.version
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Check salt top states on nodes
+  cmd: salt '*' state.show_top
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Configure ntp and rsyslog on nodes
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' state.sls ntp,rsyslog
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+
+#- description: Hack gtw node
+#  cmd: salt 'gtw*' cmd.run "ip addr del {{ IPV4_NET_CONTROL_PREFIX }}.110/24 dev ens4; ip addr flush dev ens4";
+#  node_name: {{ HOSTNAME_CFG01 }}
+#  retry: {count: 1, delay: 10}
+#  skip_fail: false
+
+#- description: Hack cmp01 node
+#  cmd: salt 'cmp01*' cmd.run "ip addr del {{ IPV4_NET_CONTROL_PREFIX }}.105/24 dev ens4; ip addr flush dev ens4";
+#  node_name: {{ HOSTNAME_CFG01 }}
+#  retry: {count: 1, delay: 10}
+#  skip_fail: false
+
+#- description: Hack cmp02 node
+#  cmd: salt 'cmp02*' cmd.run "ip addr del {{ IPV4_NET_CONTROL_PREFIX }}.106/24 dev ens4; ip addr flush dev ens4";
+#  node_name: {{ HOSTNAME_CFG01 }}
+#  retry: {count: 1, delay: 10}
+#  skip_fail: false
diff --git a/tcp_tests/templates/virtual_mcp11_k8s_calico/underlay--meta-data.yaml b/tcp_tests/templates/mk24_lab_ovs_dvr_vlan_bm/underlay--meta-data.yaml
similarity index 100%
rename from tcp_tests/templates/virtual_mcp11_k8s_calico/underlay--meta-data.yaml
rename to tcp_tests/templates/mk24_lab_ovs_dvr_vlan_bm/underlay--meta-data.yaml
diff --git a/tcp_tests/templates/virtual_mcp11_k8s_calico/underlay--user-data-cfg01.yaml b/tcp_tests/templates/mk24_lab_ovs_dvr_vlan_bm/underlay--user-data-cfg01.yaml
similarity index 76%
rename from tcp_tests/templates/virtual_mcp11_k8s_calico/underlay--user-data-cfg01.yaml
rename to tcp_tests/templates/mk24_lab_ovs_dvr_vlan_bm/underlay--user-data-cfg01.yaml
index 443e50c..f2cf8ca 100644
--- a/tcp_tests/templates/virtual_mcp11_k8s_calico/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/mk24_lab_ovs_dvr_vlan_bm/underlay--user-data-cfg01.yaml
@@ -30,9 +30,9 @@
    - sudo resolvconf -u
 
    # Prepare network connection
-   - sudo ifup ens3
+   - sudo ifup enp8s0f0
    #- sudo route add default gw {gateway} {interface_name}
-   - sudo ifup ens4
+   - sudo ifup enp8s0f1
 
    # Create swap
    - fallocate -l 4G /swapfile
@@ -41,6 +41,27 @@
    - swapon /swapfile
    - echo "/swapfile   none    swap    defaults    0   0" >> /etc/fstab
 
+   ############## TCP Cloud cfg01 node ##################
+   #- sleep 120
+   - echo "Preparing base OS"
+
+   - echo "172.18.248.114    jenkins.mcp.mirantis.net  gerrit.mcp.mirantis.net" >> /etc/hosts;
+   - echo "185.135.196.10    apt-mk.mirantis.com" >> /etc/hosts;
+   - echo "nameserver 172.18.208.44 >> /etc/resolv.conf;
+   - echo "nameserver 8.8.8.8 >> /etc/resolv.conf;
+   - which wget >/dev/null || (apt-get update; apt-get install -y wget);
+
+   - echo "deb [arch=amd64] http://apt-mk.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
+   - wget -O - http://apt-mk.mirantis.com/public.gpg | apt-key add -;
+   - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;
+   - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;
+
+   - apt-get clean
+   - apt-get update
+
+   # Install common packages
+   - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree
+
    ########################################################
    # Node is ready, allow SSH access
    - echo "Allow SSH access ..."
@@ -50,10 +71,10 @@
   write_files:
    - path: /etc/network/interfaces
      content: |
-          auto ens3
-          iface ens3 inet dhcp
-          auto ens4
-          iface ens4 inet dhcp
+          auto enp8s0f0
+          iface enp8s0f0 inet dhcp
+          auto enp8s0f1
+          iface enp8s0f1 inet manual
 
    - path: /root/.ssh/id_rsa
      owner: root:root
diff --git a/tcp_tests/templates/virtual_mcp11_k8s_calico/underlay--user-data1604.yaml b/tcp_tests/templates/mk24_lab_ovs_dvr_vlan_bm/underlay--user-data1604.yaml
similarity index 61%
rename from tcp_tests/templates/virtual_mcp11_k8s_calico/underlay--user-data1604.yaml
rename to tcp_tests/templates/mk24_lab_ovs_dvr_vlan_bm/underlay--user-data1604.yaml
index 1013571..dd6c909 100644
--- a/tcp_tests/templates/virtual_mcp11_k8s_calico/underlay--user-data1604.yaml
+++ b/tcp_tests/templates/mk24_lab_ovs_dvr_vlan_bm/underlay--user-data1604.yaml
@@ -32,9 +32,9 @@
    - sudo resolvconf -u
 
    # Prepare network connection
-   - sudo ifup ens3
+   - sudo ifup enp8s0f0
    #- sudo route add default gw {gateway} {interface_name}
-   - sudo ifup ens4
+   - sudo ifup enp8s0f1
 
    # Create swap
    - fallocate -l 4G /swapfile
@@ -43,56 +43,33 @@
    - swapon /swapfile
    - echo "/swapfile   none    swap    defaults   0   0" >> /etc/fstab
 
+
+   ############## TCP Cloud cfg01 node ##################
+   #- sleep 120
+   - echo "Preparing base OS"
+   - which wget >/dev/null || (apt-get update; apt-get install -y wget)
+ 
+   - echo "deb [arch=amd64] http://apt-mk.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
+   - wget -O - http://apt-mk.mirantis.com/public.gpg | apt-key add -;
+   - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list
+   - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -
+
+   - apt-get clean
+   - eatmydata apt-get update && apt-get -y upgrade
+
+   # Install common packages
+   - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc
+
    ########################################################
    # Node is ready, allow SSH access
    - echo "Allow SSH access ..."
    - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
    ########################################################
 
-   ############## TCP Cloud cfg01 node ##################
-   #- sleep 120
-   - echo "Preparing base OS"
-   - which wget >/dev/null || (apt-get update; apt-get install -y wget)
-   - echo "deb [arch=amd64] http://apt.tcpcloud.eu/nightly/ xenial main security extra tcp tcp-salt" > /etc/apt/sources.list
-   - wget -O - http://apt.tcpcloud.eu/public.gpg | apt-key add -
-   # saltstack repo is for minions that have the same version in the xenial and trusty (2016.3.3)
-   #- echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/latest xenial main" > /etc/apt/sources.list.d/saltstack.list
-   #- wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/latest/SALTSTACK-GPG-KEY.pub | apt-key add -
-   - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list
-   - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -
-
-   - apt-get clean
-   - apt-get update
-   - apt-get -y upgrade
-
-   # Install common packages
-   - apt-get install -y python-pip git
-   - apt-get install -y curl tmux byobu iputils-ping traceroute htop tree
-
-   - apt-get install -y salt-minion
-
-   # To be configured from inventory/fuel-devops by operator or autotests
-   - 'echo "id: {hostname}" >> /etc/salt/minion'
-   - 'echo "master: 192.168.10.100" >> /etc/salt/minion'
-
-   - echo "Restarting minion service with workarounds..."
-   - rm -f /etc/salt/pki/minion/minion_master.pub
-   - service salt-minion restart
-   - sleep 5
-   - rm -f /etc/salt/pki/minion/minion_master.pub
-   - service salt-minion restart
-
-   #- echo "Showing node metadata..."
-   #- salt-call pillar.data
-
-   #- echo "Running complete state ..."
-   #- salt-call state.sls linux,openssh,salt
-
   write_files:
    - path: /etc/network/interfaces
      content: |
-          auto ens3
-          iface ens3 inet dhcp
-          auto ens4
-          iface ens4 inet dhcp
-
+          auto enp8s0f0
+          iface enp8s0f0 inet dhcp
+          auto enp8s0f1
+          iface enp8s0f1 inet dhcp
diff --git a/tcp_tests/templates/mk24_lab_ovs_dvr_vlan_bm/underlay.yaml b/tcp_tests/templates/mk24_lab_ovs_dvr_vlan_bm/underlay.yaml
new file mode 100644
index 0000000..0c0910e
--- /dev/null
+++ b/tcp_tests/templates/mk24_lab_ovs_dvr_vlan_bm/underlay.yaml
@@ -0,0 +1,365 @@
+# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
+{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'stable') %}
+
+{% import 'mk24_lab_ovs_dvr_vlan_bm/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
+{% import 'mk24_lab_ovs_dvr_vlan_bm/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
+{% import 'mk24_lab_ovs_dvr_vlan_bm/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
+
+---
+aliases:
+ - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
+ - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
+ - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
+ - &cloudinit_user_data_1604 {{ CLOUDINIT_USER_DATA_1604 }}
+
+{% set DOMAIN_NAME = os_env('LAB_CONFIG_NAME', 'mk24_lab_ovs_dvr_vlan_bm') + '.local' %}
+{% set HOSTNAME_KVM01 = os_env('HOSTNAME_KVM01', 'kvm01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_KVM02 = os_env('HOSTNAME_KVM02', 'kvm02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_KVM03 = os_env('HOSTNAME_KVM03', 'kvm03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
+
+template:
+  devops_settings:
+    env_name: {{ os_env('ENV_NAME', 'mk24_lab_ovs_dvr_vlan_bm_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
+
+    address_pools:
+      admin-pool01:
+        net: {{ os_env('ADMIN_ADDRESS_POOL01', '172.18.173.96/27:27') }}
+        params:
+          ip_reserved:
+            gateway: +1
+            l2_network_device: +1
+            default_{{ HOSTNAME_KVM01 }}: 172.18.173.112
+            default_{{ HOSTNAME_KVM02 }}: 172.18.173.113
+            default_{{ HOSTNAME_KVM03 }}: 172.18.173.114
+            default_{{ HOSTNAME_CMP01 }}: 172.18.173.115
+            default_{{ HOSTNAME_CMP02 }}: 172.18.173.116
+            default_{{ HOSTNAME_GTW01 }}: 172.18.173.117
+            default_{{ HOSTNAME_CFG01 }}: 172.18.173.118
+
+    groups:
+      - name: default
+        driver:
+          name: devops_driver_ironic
+          params:
+            os_auth_token: fake-token
+            ironic_url: !os_env IRONIC_URL  # URL that will be used by fuel-devops
+                                            # to access Ironic API
+            # Agent URL that is accessible from deploying node when nodes
+            # are bootstrapped with PXE. Usually PXE/provision network address is used.
+            agent_kernel_url: !os_env IRONIC_AGENT_KERNEL_URL
+            agent_ramdisk_url: !os_env IRONIC_AGENT_RAMDISK_URL
+
+        network_pools:
+          admin: admin-pool01
+
+        l2_network_devices:
+          admin:
+            address_pool: admin-pool01
+
+
+        nodes:
+
+          - name: {{ HOSTNAME_CFG01 }}
+            role: salt_master
+            params:
+              ipmi_user: !os_env IPMI_USER
+              ipmi_password: !os_env IPMI_PASSWORD
+              ipmi_previlegies: OPERATOR
+              ipmi_host: !os_env IPMI_HOST_CFG01  # hostname or IP address
+              ipmi_lan_interface: lanplus
+              ipmi_port: 623
+
+              root_volume_name: system     # see 'volumes' below
+              cloud_init_volume_name: iso  # see 'volumes' below
+              cloud_init_iface_up: enp8s0f1  # see 'interfaces' below.
+                                             # this interface is passed to 'user-data'
+                                             # to substitute {interface_name} variable if it is used there
+
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 200
+
+                  # The same as for agent URL, here is an URL to the image that should be
+                  # used for deploy the node. It should also be accessible from deploying
+                  # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
+                  source_image: !os_env IRONIC_SOURCE_IMAGE_URL
+                  source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
+
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_cfg01
+
+              interfaces:
+                - label: enp8s0f1
+                  l2_network_device: admin
+                  mac_address: !os_env PXE_MAC_ADDRESS_CFG01
+
+              network_config:
+                enp8s0f1:
+                  networks:
+                   - admin
+
+          - name: {{ HOSTNAME_KVM01 }}
+            role: salt_minion
+            params:
+              ipmi_user: !os_env IPMI_USER
+              ipmi_password: !os_env IPMI_PASSWORD
+              ipmi_previlegies: OPERATOR
+              ipmi_host: !os_env IPMI_HOST_KVM01  # hostname or IP address
+              ipmi_lan_interface: lanplus
+              ipmi_port: 623
+
+              root_volume_name: system     # see 'volumes' below
+              cloud_init_volume_name: iso  # see 'volumes' below
+              cloud_init_iface_up: enp8s0f1  # see 'interfaces' below.
+                                             # this interface is passed to 'user-data'
+                                             # to substitute {interface_name} variable if it is used there
+
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 200
+
+                  # The same as for agent URL, here is an URL to the image that should be
+                  # used for deploy the node. It should also be accessible from deploying
+                  # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
+                  source_image: !os_env IRONIC_SOURCE_IMAGE_URL
+                  source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
+
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_cfg01
+
+              interfaces:
+                - label: enp8s0f1
+                  l2_network_device: admin
+                  mac_address: !os_env PXE_MAC_ADDRESS_KVM01
+
+              network_config:
+                enp8s0f1:
+                  networks:
+                   - admin
+
+
+          - name: {{ HOSTNAME_KVM02 }}
+            role: salt_minion
+            params:
+              ipmi_user: !os_env IPMI_USER
+              ipmi_password: !os_env IPMI_PASSWORD
+              ipmi_previlegies: OPERATOR
+              ipmi_host: !os_env IPMI_HOST_KVM02  # hostname or IP address
+              ipmi_lan_interface: lanplus
+              ipmi_port: 623
+
+              root_volume_name: system     # see 'volumes' below
+              cloud_init_volume_name: iso  # see 'volumes' below
+              cloud_init_iface_up: enp8s0f1  # see 'interfaces' below.
+                                             # this interface is passed to 'user-data'
+                                             # to substitute {interface_name} variable if it is used there
+
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 200
+
+                  # The same as for agent URL, here is an URL to the image that should be
+                  # used for deploy the node. It should also be accessible from deploying
+                  # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
+                  source_image: !os_env IRONIC_SOURCE_IMAGE_URL
+                  source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
+
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_cfg01
+
+              interfaces:
+                - label: enp8s0f1
+                  l2_network_device: admin
+                  mac_address: !os_env PXE_MAC_ADDRESS_KVM02
+
+              network_config:
+                enp8s0f1:
+                  networks:
+                   - admin
+
+
+          - name: {{ HOSTNAME_KVM03 }}
+            role: salt_minion
+            params:
+              ipmi_user: !os_env IPMI_USER
+              ipmi_password: !os_env IPMI_PASSWORD
+              ipmi_previlegies: OPERATOR
+              ipmi_host: !os_env IPMI_HOST_KVM03  # hostname or IP address
+              ipmi_lan_interface: lanplus
+              ipmi_port: 623
+
+              root_volume_name: system     # see 'volumes' below
+              cloud_init_volume_name: iso  # see 'volumes' below
+              cloud_init_iface_up: enp8s0f1  # see 'interfaces' below.
+                                             # this interface is passed to 'user-data'
+                                             # to substitute {interface_name} variable if it is used there
+
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 200
+
+                  # The same as for agent URL, here is an URL to the image that should be
+                  # used for deploy the node. It should also be accessible from deploying
+                  # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
+                  source_image: !os_env IRONIC_SOURCE_IMAGE_URL
+                  source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
+
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_cfg01
+
+              interfaces:
+                - label: enp8s0f1
+                  l2_network_device: admin
+                  mac_address: !os_env PXE_MAC_ADDRESS_KVM03
+
+              network_config:
+                enp8s0f1:
+                  networks:
+                   - admin
+
+
+          - name: {{ HOSTNAME_CMP01 }}
+            role: salt_minion
+            params:
+              ipmi_user: !os_env IPMI_USER
+              ipmi_password: !os_env IPMI_PASSWORD
+              ipmi_previlegies: OPERATOR
+              ipmi_host: !os_env IPMI_HOST_CMP01  # hostname or IP address
+              ipmi_lan_interface: lanplus
+              ipmi_port: 623
+
+              root_volume_name: system     # see 'volumes' below
+              cloud_init_volume_name: iso  # see 'volumes' below
+              cloud_init_iface_up: enp8s0f1  # see 'interfaces' below.
+                                             # this interface is passed to 'user-data'
+                                             # to substitute {interface_name} variable if it is used there
+
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 200
+
+                  # The same as for agent URL, here is an URL to the image that should be
+                  # used for deploy the node. It should also be accessible from deploying
+                  # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
+                  source_image: !os_env IRONIC_SOURCE_IMAGE_URL
+                  source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
+
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_cfg01
+
+              interfaces:
+                - label: enp8s0f1
+                  l2_network_device: admin
+                  mac_address: !os_env PXE_MAC_ADDRESS_CMP01
+
+              network_config:
+                enp8s0f1:
+                  networks:
+                   - admin
+
+
+          - name: {{ HOSTNAME_CMP02 }}
+            role: salt_minion
+            params:
+              ipmi_user: !os_env IPMI_USER
+              ipmi_password: !os_env IPMI_PASSWORD
+              ipmi_previlegies: OPERATOR
+              ipmi_host: !os_env IPMI_HOST_CMP02  # hostname or IP address
+              ipmi_lan_interface: lanplus
+              ipmi_port: 623
+
+              root_volume_name: system     # see 'volumes' below
+              cloud_init_volume_name: iso  # see 'volumes' below
+              cloud_init_iface_up: enp8s0f1  # see 'interfaces' below.
+                                             # this interface is passed to 'user-data'
+                                             # to substitute {interface_name} variable if it is used there
+
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 200
+
+                  # The same as for agent URL, here is an URL to the image that should be
+                  # used for deploy the node. It should also be accessible from deploying
+                  # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
+                  source_image: !os_env IRONIC_SOURCE_IMAGE_URL
+                  source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
+
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_cfg01
+
+              interfaces:
+                - label: enp8s0f1
+                  l2_network_device: admin
+                  mac_address: !os_env PXE_MAC_ADDRESS_CMP02
+
+              network_config:
+                enp8s0f1:
+                  networks:
+                   - admin
+
+
+          - name: {{ HOSTNAME_GTW01 }}
+            role: salt_minion
+            params:
+              ipmi_user: !os_env IPMI_USER
+              ipmi_password: !os_env IPMI_PASSWORD
+              ipmi_previlegies: OPERATOR
+              ipmi_host: !os_env IPMI_HOST_GTW01  # hostname or IP address
+              ipmi_lan_interface: lanplus
+              ipmi_port: 623
+
+              root_volume_name: system     # see 'volumes' below
+              cloud_init_volume_name: iso  # see 'volumes' below
+              cloud_init_iface_up: enp8s0f1  # see 'interfaces' below.
+                                             # this interface is passed to 'user-data'
+                                             # to substitute {interface_name} variable if it is used there
+
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 200
+
+                  # The same as for agent URL, here is an URL to the image that should be
+                  # used for deploy the node. It should also be accessible from deploying
+                  # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
+                  source_image: !os_env IRONIC_SOURCE_IMAGE_URL
+                  source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
+
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_cfg01
+
+              interfaces:
+                - label: enp8s0f1
+                  l2_network_device: admin
+                  mac_address: !os_env PXE_MAC_ADDRESS_GTW01
+
+              network_config:
+                enp8s0f1:
+                  networks:
+                   - admin
+
+
diff --git a/tcp_tests/templates/virtual_mcp11_k8s_calico/common-services.yaml b/tcp_tests/templates/virtual_mcp11_k8s_calico/common-services.yaml
deleted file mode 100644
index 7c57f0b..0000000
--- a/tcp_tests/templates/virtual_mcp11_k8s_calico/common-services.yaml
+++ /dev/null
@@ -1,96 +0,0 @@
-{% from 'mcp-fuel-aio/map.jinja' import HOSTNAME_CFG01 with context %}
-
-# Install support services
-- description: Create and distribute SSL certificates for services using salt state
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' state.sls salt
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: true
-
-- description:  Install keepalived
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keepalived:cluster' state.sls keepalived -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: true
-
-- description: Check the VIP
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keepalived:cluster' cmd.run 'ip a | grep 172.16.10.2' | grep -B1 172.16.10.2
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-
-- description: Install haproxy
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' state.sls haproxy
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install haproxy.service
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' service.status haproxy
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install docker
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@docker:host' state.sls docker.host
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check docker
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@docker:host' cmd.run 'docker ps'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install etcd
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@etcd:server' state.sls etcd.server.service
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check the etcd health
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-     -C 'I@etcd:server' cmd.run '. /var/lib/etcd/configenv && etcdctl cluster-health'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install Kubernetes and Calico
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-     -C 'I@kubernetes:master' state.sls kubernetes.master.kube-addons;
-     salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kubernetes:pool' state.sls kubernetes.pool;
-     salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kubernetes:pool' cmd.run 'calicoctl node status';
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Setup NAT for Calico
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@etcd:server' --subset 1 state.sls etcd.server.setup
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Run whole master to check consistency
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-     -C 'I@kubernetes:master' state.sls kubernetes exclude=kubernetes.master.setup
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: true
-
-
-- description: Register addons
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@kubernetes:master' --subset 1 state.sls kubernetes.master.setup
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
diff --git a/tcp_tests/templates/virtual_mcp11_k8s_calico/map.jinja b/tcp_tests/templates/virtual_mcp11_k8s_calico/map.jinja
deleted file mode 100644
index 856c45b..0000000
--- a/tcp_tests/templates/virtual_mcp11_k8s_calico/map.jinja
+++ /dev/null
@@ -1,2 +0,0 @@
-{% set DOMAIN_NAME = os_env('LAB_CONFIG_NAME', 'mcp-fuel-aio') + '.local' %}
-{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
diff --git a/tcp_tests/templates/virtual_mcp11_k8s_calico/openstack.yaml b/tcp_tests/templates/virtual_mcp11_k8s_calico/openstack.yaml
deleted file mode 100644
index fb37f08..0000000
--- a/tcp_tests/templates/virtual_mcp11_k8s_calico/openstack.yaml
+++ /dev/null
@@ -1,9 +0,0 @@
-{% from 'mcp-fuel-aio/map.jinja' import HOSTNAME_CFG01 with context %}
-
-# Install OpenStack control services
-
-- description: Run tests
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run "docker run --rm --net=host -e API_SERVER='http://127.0.0.1:8080' docker-dev-virtual.docker.mirantis.net/mirantis/kubernetes/k8s-conformance:v1.6.1-1_1491395924598 >> e2e.output"
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
\ No newline at end of file
diff --git a/tcp_tests/templates/virtual_mcp11_k8s_calico/salt.yaml b/tcp_tests/templates/virtual_mcp11_k8s_calico/salt.yaml
deleted file mode 100644
index 6254414..0000000
--- a/tcp_tests/templates/virtual_mcp11_k8s_calico/salt.yaml
+++ /dev/null
@@ -1,274 +0,0 @@
-{% from 'mcp-fuel-aio/map.jinja' import HOSTNAME_CFG01 with context %}
-
-# Install salt to the config node
-
-
-- description: Configure repository on the cfg01 node
-  cmd:
-    echo "172.18.248.114    jenkins.mcp.mirantis.net  gerrit.mcp.mirantis.net" >> /etc/hosts;
-    echo "185.135.196.10    apt-mk.mirantis.com" >> /etc/hosts;
-    echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
-    echo "nameserver 8.8.8.8" >> /etc/resolv.conf;
-    which wget >/dev/null || (apt-get update; apt-get install -y wget);
-    echo "deb [arch=amd64] http://apt-mk.mirantis.com/xenial nightly salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
-    wget -O - http://apt-mk.mirantis.com/public.gpg | apt-key add -;
-    echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;
-    wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 1}
-  skip_fail: false
-
-- description: Update packages on cfg01
-  cmd: apt-get clean; apt-get update
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 1}
-  skip_fail: false
-
-- description: Installing salt master on cfg01
-  cmd:  apt-get install -y reclass git; apt-get install -y salt-master
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 1}
-  skip_fail: false
-
-- description: Install common packages on cfg01
-  cmd: apt-get install -y python-pip wget curl tmux byobu iputils-ping traceroute htop tree
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 1}
-  skip_fail: false
-
-- description: Configure salt-master on cfg01
-  cmd: |
-    cat << 'EOF' >> /etc/salt/master.d/master.conf
-    file_roots:
-      base:
-      - /usr/share/salt-formulas/env
-    pillar_opts: False
-    open_mode: True
-    reclass: &reclass
-      storage_type: yaml_fs
-      inventory_base_uri: /srv/salt/reclass
-    ext_pillar:
-      - reclass: *reclass
-    master_tops:
-      reclass: *reclass
-    EOF
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 1}
-  skip_fail: false
-
-- description: Configure GIT settings and certificates
-  cmd: touch /root/.git_trusted_certs.pem;
-    for server in git.tcpcloud.eu github.com; do
-        openssl s_client -showcerts -connect $server:443 </dev/null
-        | openssl x509 -outform PEM
-        >> /root/.git_trusted_certs.pem;
-    done;
-    HOME=/root git config --global http.sslCAInfo /root/.git_trusted_certs.pem;
-    HOME=/root git config --global user.email "tcp-qa@example.com";
-    HOME=/root git config --global user.name "TCP QA";
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 1}
-  skip_fail: false
-
-
-- description: Clone reclass models with submodules
-  cmd: |
-    ssh-keyscan -H github.com >> ~/.ssh/known_hosts;
-    git clone --recursive https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab /srv/salt/reclass;
-    git clone -b $reclass_branch --recurse-submodules https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab /srv/salt/reclass
-    mkdir -p /srv/salt/reclass/classes/service;
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 1}
-  skip_fail: false
-
-- description: Configure reclass
-  cmd: |
-    FORMULA_PATH=${FORMULA_PATH:-/usr/share/salt-formulas};
-    FORMULA_REPOSITORY=${FORMULA_REPOSITORY:-deb [arch=amd64] http://apt-mk.mirantis.com/xenial stable salt};
-    FORMULA_GPG=${FORMULA_GPG:-http://apt-mk.mirantis.com/public.gpg};
-    which wget > /dev/null || (apt-get update; apt-get install -y wget);
-    echo "${FORMULA_REPOSITORY}" > /etc/apt/sources.list.d/mcp_salt.list;
-    wget -O - "${FORMULA_GPG}" | apt-key add -;
-    apt-get clean; apt-get update;
-    [ ! -d /srv/salt/reclass/classes/service ] && mkdir -p /srv/salt/reclass/classes/service;
-    declare -a formula_services=("linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon");
-    echo -e "\nInstalling all required salt formulas\n";
-    apt-get install -y "${formula_services[@]/#/salt-formula-}";
-    for formula_service in "${formula_services[@]}"; do
-      echo -e "\nLink service metadata for formula ${formula_service} ...\n";
-      [ ! -L "/srv/salt/reclass/classes/service/${formula_service}" ] && ln -s ${FORMULA_PATH}/reclass/service/${formula_service} /srv/salt/reclass/classes/service/${formula_service};
-    done;
-    [ ! -d /srv/salt/env ] && mkdir -p /srv/salt/env;
-    [ ! -L /srv/salt/env/prd ] && ln -s ${FORMULA_PATH}/env /srv/salt/env/prd;
-    [ ! -d /etc/reclass ] && mkdir /etc/reclass;
-
-    cat << 'EOF' >> /etc/reclass/reclass-config.yml
-    storage_type: yaml_fs
-    pretty_print: True
-    output: yaml
-    inventory_base_uri: /srv/salt/reclass
-    EOF
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 1}
-  skip_fail: false
-
-- description: Configure salt-minion on cfg01
-  cmd: |
-    [ ! -d /etc/salt/minion.d ] && mkdir -p /etc/salt/minion.d;
-    cat << "EOF" >> /etc/salt/minion.d/minion.conf
-    id: {{ HOSTNAME_CFG01 }}
-    master: 127.0.0.1
-    EOF
-    apt-get install -y salt-minion;
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 1}
-  skip_fail: false
-
-- description: Configure salt adoptors on cfg01
-  cmd: |
-    ln -s /usr/lib/python2.7/dist-packages/reclass/adapters/salt.py /usr/local/sbin/reclass-salt;
-    chmod +x /usr/lib/python2.7/dist-packages/reclass/adapters/salt.py
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 1}
-  skip_fail: false
-
-- description: Restart services
-  cmd: |
-     systemctl restart salt-master;
-     systemctl restart salt-minion;
-     echo "Showing system info and metadata ...";
-     salt-call --no-color grains.items;
-     salt-call --no-color pillar.data;
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-# Prepare salt services and nodes settings
-- description: Run 'linux' formula on cfg01
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@salt:master' state.sls linux;
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Run 'openssh' formula on cfg01
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@salt:master' state.sls openssh;
-    salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@salt:master' cmd.run "sed -i 's/PasswordAuthentication no/PasswordAuthentication
-    yes/' /etc/ssh/sshd_config && service ssh restart";
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 3, delay: 5}
-  skip_fail: false
-
-- description: '*Workaround* of the bug https://mirantis.jira.com/browse/PROD-7962'
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    '*' cmd.run "echo '    StrictHostKeyChecking no' >> /root/.ssh/config"
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 1}
-  skip_fail: false
-
-- description: Run 'reclass' formula on cfg01
-  cmd: timeout 120 salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@salt:master' reclass;
-    salt-call --no-color state.sls salt.master;
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: true
-
-
-- description: Run 'salt' formula on cfg01
-  cmd: timeout 120 salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@salt:master' state.sls salt.master.service;
-    salt-call --no-color state.sls salt.master,salt.api,salt.minion.ca;
-    systemctl restart salt-minion;
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: true
-
-- description: Accept salt keys from all the nodes
-  cmd: salt-key -A -y
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Generate inventory for all the nodes to the /srv/salt/reclass/nodes/_generated
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@salt:master' state.sls reclass
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Refresh pillars on all minions
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.refresh_pillar
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Sync all salt resources
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.sync_all
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Show  reclass-salt --top
-  cmd: reclass-salt --top; salt-call --no-color state.sls salt.minion.cert -l info;
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-
-# Bootstrap all nodes
-
-- description: Configure linux on controllers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' state.sls
-    linux
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Configure linux on cmp
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.sls
-    linux
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 3, delay: 20}
-  skip_fail: false
-
-- description: Configure openssh on all nodes
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C '* and not
-    cfg*' state.sls openssh;salt --hard-crash --state-output=mixed --state-verbose=False
-    -C '* and not cfg*' cmd.run "sed -i 's/PasswordAuthentication no/PasswordAuthentication
-    yes/' /etc/ssh/sshd_config && service ssh restart"
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Configure salt.minion on ctl
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' state.sls salt.minion
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 3, delay: 5}
-  skip_fail: false
-
-- description: Configure salt.minion on cmp
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.sls salt.minion
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 3, delay: 5}
-  skip_fail: false
-
-- description: Check salt minion versions on slaves
-  cmd: salt '*' test.version
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check salt top states on nodes
-  cmd: salt '*' state.show_top
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Configure ntp and rsyslog on nodes
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' state.sls ntp,rsyslog
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
diff --git a/tcp_tests/templates/virtual_mcp11_k8s_calico/underlay.yaml b/tcp_tests/templates/virtual_mcp11_k8s_calico/underlay.yaml
deleted file mode 100644
index b1e029b..0000000
--- a/tcp_tests/templates/virtual_mcp11_k8s_calico/underlay.yaml
+++ /dev/null
@@ -1,287 +0,0 @@
----
-aliases:
-  default_interface_model:
-    - &interface_model !os_env INTERFACE_MODEL, virtio
-
-{% set DOMAIN_NAME = os_env('LAB_CONFIG_NAME', 'mcp-fuel-aio') + '.local' %}
-{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp02.' + DOMAIN_NAME) %}
-
-template:
-  devops_settings:
-    env_name: {{ os_env('ENV_NAME', 'mcp-fuel-aio') }}
-
-    address_pools:
-      admin-pool01:
-        net: 172.16.10.0/24:24
-        params:
-          ip_reserved:
-            gateway: +1
-            l2_network_device: +1
-            default_{{ HOSTNAME_CFG01 }}: +100
-            default_{{ HOSTNAME_CTL01 }}: +107
-            default_{{ HOSTNAME_CTL02 }}: +108
-            default_{{ HOSTNAME_CTL03 }}: +109
-            default_{{ HOSTNAME_CMP01 }}: +105
-            default_{{ HOSTNAME_CMP02 }}: +106
-          ip_ranges:
-            dhcp: [+90, -10]
-
-      private-pool01:
-        net: 192.168.10.0/24:24
-        params:
-          ip_reserved:
-            gateway: +1
-            l2_network_device: +1
-            default_{{ HOSTNAME_CFG01 }}: +100
-            default_{{ HOSTNAME_CTL01 }}: +107
-            default_{{ HOSTNAME_CTL02 }}: +108
-            default_{{ HOSTNAME_CTL03 }}: +109
-            default_{{ HOSTNAME_CMP01 }}: +105
-            default_{{ HOSTNAME_CMP02 }}: +106
-          ip_ranges:
-            dhcp: [+90, -10]
-
-    groups:
-      - name: default
-        driver:
-          name: devops.driver.libvirt
-          params:
-            connection_string: !os_env CONNECTION_STRING, qemu:///system
-            storage_pool_name: !os_env STORAGE_POOL_NAME, default
-            stp: False
-            hpet: False
-            enable_acpi: true
-            use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
-
-        network_pools:
-          admin: admin-pool01
-          private: private-pool01
-
-        l2_network_devices:
-          private:
-            address_pool: private-pool01
-            dhcp: true
-
-          admin:
-            address_pool: admin-pool01
-            dhcp: true
-            forward:
-              mode: nat
-
-        group_volumes:
-         - name: cloudimage1404    # This name is used for 'backing_store' option for node volumes.
-           source_image: !os_env IMAGE_PATH1404  # https://cloud-images.ubuntu.com/trusty/current/trusty-server-cloudimg-amd64-disk1.img or
-                                             # http://apt.tcpcloud.eu/images/ubuntu-14-04-x64-201608231134.qcow2
-           format: qcow2
-         - name: cloudimage1604    # This name is used for 'backing_store' option for node volumes.
-           source_image: !os_env IMAGE_PATH1604  # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img or
-                                             # http://apt.tcpcloud.eu/images/ubuntu-16-04-x64-201608231004.qcow2
-           format: qcow2
-
-        nodes:
-          - name: {{ HOSTNAME_CFG01 }}
-            role: salt_master
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 4
-              memory: !os_env SLAVE_NODE_MEMORY, 2048
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: cloudimage1604
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: !include underlay--meta-data.yaml
-                  cloudinit_user_data: !include underlay--user-data-cfg01.yaml
-
-              interfaces:
-                - label: ens3
-                  l2_network_device: admin
-                  interface_model: *interface_model
-                - label: ens4
-                  l2_network_device: private
-                  interface_model: *interface_model
-              network_config:
-                ens3:
-                  networks:
-                    - admin
-                ens4:
-                  networks:
-                    - private
-
-          - name: {{ HOSTNAME_CTL01 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 4
-              memory: !os_env SLAVE_NODE_MEMORY, 8192
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: cloudimage1604
-                  format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: !include underlay--meta-data.yaml
-                  cloudinit_user_data: !include underlay--user-data1604.yaml
-
-              interfaces: &interfaces
-                - label: ens3
-                  l2_network_device: admin
-                  interface_model: *interface_model
-                - label: ens4
-                  l2_network_device: private
-                  interface_model: *interface_model
-              network_config: &network_config
-                ens3:
-                  networks:
-                    - admin
-                ens4:
-                  networks:
-                    - private
-
-          - name: {{ HOSTNAME_CTL02 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 4
-              memory: !os_env SLAVE_NODE_MEMORY, 8192
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: cloudimage1604
-                  format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: !include underlay--meta-data.yaml
-                  cloudinit_user_data: !include underlay--user-data1604.yaml
-
-              interfaces: *interfaces
-              network_config: *network_config
-
-          - name: {{ HOSTNAME_CTL03 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 4
-              memory: !os_env SLAVE_NODE_MEMORY, 8192
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: cloudimage1604
-                  format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: !include underlay--meta-data.yaml
-                  cloudinit_user_data: !include underlay--user-data1604.yaml
-
-              interfaces: *interfaces
-              network_config: *network_config
-
-          - name: {{ HOSTNAME_CMP01 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 4
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: cloudimage1604
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: !include underlay--meta-data.yaml
-                  cloudinit_user_data: !include underlay--user-data1604.yaml
-
-
-              interfaces: &all_interfaces
-                - label: ens3
-                  l2_network_device: admin
-                  interface_model: *interface_model
-                - label: ens4
-                  l2_network_device: private
-                  interface_model: *interface_model
-              network_config: &all_network_config
-                ens3:
-                  networks:
-                    - admin
-                ens4:
-                  networks:
-                    - private
-
-          - name: {{ HOSTNAME_CMP02 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 4
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: cloudimage1604
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: !include underlay--meta-data.yaml
-                  cloudinit_user_data: !include underlay--user-data1604.yaml
-
-              interfaces: *all_interfaces
-              network_config: *all_network_config
diff --git a/tox.ini b/tox.ini
index c1ecf7f..e25d14c 100644
--- a/tox.ini
+++ b/tox.ini
@@ -17,7 +17,6 @@
 deps =
     setuptools
     -r{toxinidir}/tcp_tests/requirements.txt
-    -r{toxinidir}/test-requirements.txt
 usedevelop = False
 commands = py.test -s -vvv tcp_tests/tests/unit