add mk22-qa-lab01 templates
diff --git a/tcp_tests/templates/common-services/mk22-qa-lab01-common-services.yaml b/tcp_tests/templates/common-services/mk22-qa-lab01-common-services.yaml
new file mode 100644
index 0000000..d3980d1
--- /dev/null
+++ b/tcp_tests/templates/common-services/mk22-qa-lab01-common-services.yaml
@@ -0,0 +1,129 @@
+# Install support services
+- description: Install keepalived on primary controller
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' state.sls
+    keepalived
+  node_name: cfg01.mk22-qa-lab01.local
+  retry: {count: 3, delay: 5}
+  skip_fail: true
+- description: Install keepalived on other controllers
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' state.sls
+    keepalived -b 1
+  node_name: cfg01.mk22-qa-lab01.local
+  retry: {count: 10, delay: 10}
+  skip_fail: true
+- description: Check the VIP
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' cmd.run
+    'ip a | grep 172.16.10.254' | grep -B1 172.16.10.254
+  node_name: cfg01.mk22-qa-lab01.local
+  retry: {count: 3, delay: 5}
+  skip_fail: false
+
+- description: Install keepalived on primary database server
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'dbs01*' state.sls
+    keepalived
+  node_name: cfg01.mk22-qa-lab01.local
+  retry: {count: 3, delay: 5}
+  skip_fail: true
+- description: Install keepalived on other database servers
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'dbs*' state.sls
+    keepalived -b 1
+  node_name: cfg01.mk22-qa-lab01.local
+  retry: {count: 10, delay: 10}
+  skip_fail: true
+- description: Check the database VIP
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'dbs*' cmd.run
+    'ip a | grep 172.16.10.252' | grep -B1 172.16.10.252
+  node_name: cfg01.mk22-qa-lab01.local
+  retry: {count: 3, delay: 5}
+  skip_fail: false
+
+
+- description: Install glusterfs on all controllers
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' state.sls
+    glusterfs.server.service
+  node_name: cfg01.mk22-qa-lab01.local
+  retry: {count: 3, delay: 5}
+  skip_fail: false
+- description: Setup glusterfs on primary controller
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' state.sls
+    glusterfs.server.setup
+  node_name: cfg01.mk22-qa-lab01.local
+  retry: {count: 3, delay: 5}
+  skip_fail: false
+- description: Setup glusterfs on other controllers
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' state.sls
+    glusterfs.server.setup -b 1
+  node_name: cfg01.mk22-qa-lab01.local
+  retry: {count: 3, delay: 5}
+  skip_fail: false
+- description: Check the gluster status
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    'gluster peer status; gluster volume status'
+  node_name: cfg01.mk22-qa-lab01.local
+  retry: {count: 3, delay: 5}
+  skip_fail: false
+- description: Install RabbitMQ on all database servers
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'dbs*' state.sls
+    rabbitmq
+  node_name: cfg01.mk22-qa-lab01.local
+  retry: {count: 3, delay: 5}
+  skip_fail: false
+- description: Check the rabbitmq status
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'dbs*' cmd.run
+    'rabbitmqctl cluster_status'
+  node_name: cfg01.mk22-qa-lab01.local
+  retry: {count: 3, delay: 5}
+  skip_fail: false
+- description: '*Workaround* Update salt-formula-galera on config node to the latest
+    version'
+  cmd: apt-get -y --force-yes install salt-formula-galera
+  node_name: cfg01.mk22-qa-lab01.local
+  retry: {count: 3, delay: 5}
+  skip_fail: false
+- description: Install Galera on first dbs01 server
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'dbs01*' state.sls
+    galera
+  node_name: cfg01.mk22-qa-lab01.local
+  retry: {count: 3, delay: 5}
+  skip_fail: false
+- description: Install Galera on other dbs* servers
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'dbs*' state.sls
+    galera
+  node_name: cfg01.mk22-qa-lab01.local
+  retry: {count: 3, delay: 5}
+  skip_fail: false
+- description: Check mysql status
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' mysql.status | grep -A1 wsrep_incoming_addresses
+  node_name: cfg01.mk22-qa-lab01.local
+  retry: {count: 3, delay: 5}
+  skip_fail: true
+- description: Install haproxy on all controllers
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' state.sls
+    haproxy
+  node_name: cfg01.mk22-qa-lab01.local
+  retry: {count: 3, delay: 5}
+  skip_fail: false
+- description: Check haproxy status on controllers
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' service.status
+    haproxy
+  node_name: cfg01.mk22-qa-lab01.local
+  retry: {count: 3, delay: 5}
+  skip_fail: false
+- description: Install haproxy on all database servers
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'dbs*' state.sls
+    haproxy
+  node_name: cfg01.mk22-qa-lab01.local
+  retry: {count: 3, delay: 5}
+  skip_fail: false
+- description: Check haproxy status on database servers
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'dbs*' service.status
+    haproxy
+  node_name: cfg01.mk22-qa-lab01.local
+  retry: {count: 3, delay: 5}
+  skip_fail: false
+- description: Install memcached on all controllers
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' state.sls
+    memcached
+  node_name: cfg01.mk22-qa-lab01.local
+  retry: {count: 3, delay: 5}
+  skip_fail: false
diff --git a/tcp_tests/templates/opencontrail/mk22-qa-lab01-opencontrail.yaml b/tcp_tests/templates/opencontrail/mk22-qa-lab01-opencontrail.yaml
new file mode 100644
index 0000000..6ea540b
--- /dev/null
+++ b/tcp_tests/templates/opencontrail/mk22-qa-lab01-opencontrail.yaml
@@ -0,0 +1,110 @@
+# Prepare Juniper contrail-test
+
+- description: Allow password authentication for SSH on compute nodes
+  cmd: salt 'cmp*' cmd.run "sed -i -e 's/^PasswordAuthentication./PasswordAuthentication yes/' /etc/ssh/sshd_config ; service ssh restart"
+  node_name: cfg01.mk22-qa-lab01.local
+
+- description: Install contrail-test on ctl01
+  cmd: wget https://raw.githubusercontent.com/Juniper/contrail-test-ci/master/install.sh;
+    chmod +x ./install.sh;
+    ./install.sh install contrail-test --ci-ref R3.0.2.x --fab-ref R3.0.2.x --test-ref R3.0.2.x;
+  node_name: ctl01.mk22-qa-lab01.local
+
+- description: Install OpenJDK-7 for contrail tests
+  cmd: apt-get -y install openjdk-7-jdk
+  node_name: ctl01.mk22-qa-lab01.local
+
+- description: Create testbed.py with lab parameters
+  cmd: |
+    cat << EOF >> /opt/contrail/utils/fabfile/testbeds/testbed.py
+    from fabric.api import env
+
+    #Management ip addresses of hosts in the cluster
+    os_ctrl_1 = 'root@172.16.10.101'
+    os_ctrl_2 = 'root@172.16.10.102'
+    os_ctrl_3 = 'root@172.16.10.103'
+    c_ctrl_1 = 'root@172.16.10.101'
+    c_ctrl_2 = 'root@172.16.10.102'
+    c_ctrl_3 = 'root@172.16.10.103'
+    c_compute_1 = 'root@172.16.10.105'
+    #c_compute_2 = 'root@10.84.30.51'
+    vip_ctrl = 'root@172.16.10.254'
+
+    ext_routers = [
+    ('gateway1','192.168.10.1'),
+    #('gateway2','10.110.1.1'),
+    ]
+
+    #Autonomous system number
+    router_asn = 64512
+
+    #Host from which the fab commands are triggered to install and provision
+    deploy_node = 'root@172.16.10.101'
+
+    #Role definition of the hosts.
+    env.roledefs = {
+    'all': [os_ctrl_1,c_ctrl_1,c_ctrl_2,c_ctrl_3,os_ctrl_2,os_ctrl_3,c_compute_1],
+    'cfgm': [c_ctrl_1,c_ctrl_2,c_ctrl_3],
+    'openstack': [os_ctrl_1,os_ctrl_2,os_ctrl_3],
+    'control': [c_ctrl_1,c_ctrl_2,c_ctrl_3],
+    'compute': [c_compute_1],
+    'collector': [c_ctrl_1,c_ctrl_2,c_ctrl_3],
+    #'webui': [c_ctrl_1,c_ctrl_2,c_ctrl_3],
+    'webui': [vip_ctrl],
+    'database': [c_compute_1],
+    'build': [deploy_node],
+    }
+    #Openstack admin password
+    env.openstack_admin_password = 'admin'
+    env.password = 'r00tme'
+    #Passwords of each host
+    env.passwords = {
+    os_ctrl_1: 'r00tme',
+    c_ctrl_1: 'r00tme',
+    c_ctrl_2: 'r00tme',
+    c_ctrl_3: 'r00tme',
+    os_ctrl_2: 'r00tme',
+    os_ctrl_3: 'r00tme',
+    c_compute_1: 'r00tme',
+    deploy_node: 'r00tme',
+    }
+
+    env.ha = {
+    'internal_vip' : '172.16.10.254',
+    #'external_vip' : '10.84.30.65',
+    'contrail_internal_vip' : '172.16.10.254',
+    #'contrail_external_vip' : '192.168.200.254',
+    }
+
+    multi_tenancy = True
+    env.keystone = {
+    'admin_user' : 'admin',
+    'admin_password' : 'workshop',
+    'service_tenant': 'admin',
+    'admin_tenant' : 'admin',
+    'keystone_ip' : '172.16.10.254',
+    }
+
+    os_username = 'admin'
+    os_password = 'workshop'
+    os_tenant_name = 'admin'
+
+
+    control_data = {
+    os_ctrl_1 : { 'ip': '172.16.10.101/24', 'gw' : '172.16.10.1', 'device':'eth1' },
+    os_ctrl_2 : { 'ip': '172.16.10.102/24', 'gw' : '172.16.10.1', 'device':'eth1' },
+    os_ctrl_3 : { 'ip': '172.16.10.103/24', 'gw' : '172.16.10.1', 'device':'eth1' },
+    c_ctrl_1 : { 'ip': '172.16.10.101/24', 'gw' : '172.16.10.1', 'device':'eth1' },
+    c_ctrl_2 : { 'ip': '172.16.10.102/24', 'gw' : '172.16.10.1', 'device':'eth1' },
+    c_ctrl_3 : { 'ip': '172.16.10.103/24', 'gw' : '172.16.10.1', 'device':'eth1' },
+    c_compute_1 : { 'ip': '172.16.10.105/24', 'gw' : '172.16.10.1', 'device':'eth1' },
+    }
+
+    env.test = {
+    'mail_to': 'user@test.mlx',
+    'mail_server': '10.204.x.x',
+    'mail_sender': 'mailer@juniper.net'
+    }
+    EOF
+
+  node_name: ctl01.mk22-qa-lab01.local
diff --git a/tcp_tests/templates/openstack/mk22-qa-lab01-openstack.yaml b/tcp_tests/templates/openstack/mk22-qa-lab01-openstack.yaml
new file mode 100644
index 0000000..5959d37
--- /dev/null
+++ b/tcp_tests/templates/openstack/mk22-qa-lab01-openstack.yaml
@@ -0,0 +1,219 @@
+# Install OpenStack control services
+- description: Install keystone on primary controller
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' state.sls
+    keystone
+  node_name: cfg01.mk22-qa-lab01.local
+  retry: {count: 3, delay: 5}
+  skip_fail: false
+- description: Install keystone on all controllers
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' state.sls
+    keystone -b 1
+  node_name: cfg01.mk22-qa-lab01.local
+  retry: {count: 3, delay: 5}
+  skip_fail: false
+- description: Populate keystone services/tenants/admins
+  cmd: salt-call --hard-crash --state-output=mixed --state-verbose=False state.sls
+    keystone.client
+  node_name: cfg01.mk22-qa-lab01.local
+  retry: {count: 3, delay: 5}
+  skip_fail: false
+- description: Check keystone service-list
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonerc; keystone service-list'
+  node_name: cfg01.mk22-qa-lab01.local
+  retry: {count: 3, delay: 5}
+  skip_fail: false
+- description: Install glance on primary controller
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' state.sls
+    glance
+  node_name: cfg01.mk22-qa-lab01.local
+  retry: {count: 3, delay: 5}
+  skip_fail: false
+- description: Install glance on all controllers
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' state.sls
+    glance -b 1
+  node_name: cfg01.mk22-qa-lab01.local
+  retry: {count: 3, delay: 5}
+  skip_fail: false
+- description: Configure glusterfs.client on all controllers
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' state.sls
+    glusterfs.client
+  node_name: cfg01.mk22-qa-lab01.local
+  retry: {count: 3, delay: 5}
+  skip_fail: false
+- description: Configure(re-install) keystone on all controllers
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' state.sls
+    keystone -b 1
+  node_name: cfg01.mk22-qa-lab01.local
+  retry: {count: 3, delay: 5}
+  skip_fail: false
+- description: Check glance image-list
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonerc; glance image-list'
+  node_name: cfg01.mk22-qa-lab01.local
+  retry: {count: 3, delay: 5}
+  skip_fail: false
+- description: Install cinder on all controllers
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' state.sls
+    cinder -b 1
+  node_name: cfg01.mk22-qa-lab01.local
+  retry: {count: 3, delay: 5}
+  skip_fail: false
+- description: Check cinder list
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonerc; cinder list'
+  node_name: cfg01.mk22-qa-lab01.local
+  retry: {count: 3, delay: 5}
+  skip_fail: false
+- description: Install nova on ctl01
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' state.sls
+    nova
+  node_name: cfg01.mk22-qa-lab01.local
+  retry: {count: 3, delay: 5}
+  skip_fail: false
+- description: Install nova on all controllers
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' state.sls
+    nova
+  node_name: cfg01.mk22-qa-lab01.local
+  retry: {count: 3, delay: 5}
+  skip_fail: false
+- description: Check nova service-list
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonerc; nova service-list'
+  node_name: cfg01.mk22-qa-lab01.local
+  retry: {count: 3, delay: 5}
+  skip_fail: false
+- description: Install neutron on ctl01
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' state.sls
+    neutron
+  node_name: cfg01.mk22-qa-lab01.local
+  retry: {count: 3, delay: 5}
+  skip_fail: false
+- description: Install neutron on all controllers
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' state.sls
+    neutron
+  node_name: cfg01.mk22-qa-lab01.local
+  retry: {count: 3, delay: 5}
+  skip_fail: false
+- description: Check neutron agent-list
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonerc; neutron agent-list'
+  node_name: cfg01.mk22-qa-lab01.local
+  retry: {count: 3, delay: 5}
+  skip_fail: false
+- description: Deploy dashboard on prx*
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'prx*' state.apply
+  node_name: cfg01.mk22-qa-lab01.local
+  retry: {count: 3, delay: 5}
+  skip_fail: true
+- description: Deploy nginx proxy
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cfg*' state.sls
+    nginx
+  node_name: cfg01.mk22-qa-lab01.local
+  retry: {count: 3, delay: 5}
+  skip_fail: true
+
+# Install contrail on controllers
+
+- description: Workaround of the bug https://mirantis.jira.com/browse/PROD-8164 for opencontrail.database
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' grains.set noservices True
+  node_name: cfg01.mk22-qa-lab01.local
+  retry: {count: 3, delay: 5}
+  skip_fail: false
+
+- description: Install contrail database on controllers
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' state.sls
+    opencontrail.database -b 1
+  node_name: cfg01.mk22-qa-lab01.local
+  retry: {count: 3, delay: 5}
+  skip_fail: false
+- description: Check cassandra status on ctl01
+  cmd: salt 'ctl01*' cmd.run 'nodetool status;nodetool compactionstats;nodetool describecluster;'
+  node_name: cfg01.mk22-qa-lab01.local
+  retry: {count: 3, delay: 5}
+  skip_fail: false
+- description: Install opencontrail on controllers
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' state.sls
+    opencontrail -b 1
+  node_name: cfg01.mk22-qa-lab01.local
+  retry: {count: 3, delay: 5}
+  skip_fail: false
+- description: Check contrail status
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonerc; contrail-status; neutron net-list; nova net-list'
+  node_name: cfg01.mk22-qa-lab01.local
+  retry: {count: 3, delay: 5}
+  skip_fail: false
+- description: Add contrail bgp router on ctl01
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '/usr/share/contrail-utils/provision_control.py --oper add --api_server_ip 172.16.10.254
+    --api_server_port 8082 --host_name ctl01 --host_ip 172.16.10.101 --router_asn
+    64512 --admin_user admin --admin_password workshop --admin_tenant_name admin'
+  node_name: cfg01.mk22-qa-lab01.local
+  retry: {count: 3, delay: 5}
+  skip_fail: false
+- description: Add contrail bgp router on ctl02
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl02*' cmd.run
+    '/usr/share/contrail-utils/provision_control.py --oper add --api_server_ip 172.16.10.254
+    --api_server_port 8082 --host_name ctl02 --host_ip 172.16.10.102 --router_asn
+    64512 --admin_user admin --admin_password workshop --admin_tenant_name admin'
+  node_name: cfg01.mk22-qa-lab01.local
+  retry: {count: 3, delay: 5}
+  skip_fail: false
+- description: Add contrail bgp router on ctl03
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl03*' cmd.run
+    '/usr/share/contrail-utils/provision_control.py --oper add --api_server_ip 172.16.10.254
+    --api_server_port 8082 --host_name ctl03 --host_ip 172.16.10.103 --router_asn
+    64512 --admin_user admin --admin_password workshop --admin_tenant_name admin'
+  node_name: cfg01.mk22-qa-lab01.local
+  retry: {count: 3, delay: 5}
+  skip_fail: false
+
+
+# Install compute node
+
+- description: Workaround of the bug https://mirantis.jira.com/browse/PROD-8164 for opencontrail on compute nodes
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' grains.set noservices True
+  node_name: cfg01.mk22-qa-lab01.local
+  retry: {count: 3, delay: 5}
+  skip_fail: false
+- description: Apply formulas for compute node
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
+  node_name: cfg01.mk22-qa-lab01.local
+  retry: {count: 3, delay: 5}
+  skip_fail: true
+- description: Workaround of the bug https://mirantis.jira.com/browse/PROD-8164 for opencontrail on compute nodes
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' grains.set noservices True
+  node_name: cfg01.mk22-qa-lab01.local
+  retry: {count: 3, delay: 5}
+  skip_fail: false
+- description: Re-apply(as in doc) formulas for compute node
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
+  node_name: cfg01.mk22-qa-lab01.local
+  retry: {count: 3, delay: 5}
+  skip_fail: true
+- description: Add vrouter for cmp01
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '/usr/share/contrail-utils/provision_vrouter.py --oper add --host_name cmp01 --host_ip
+    172.16.10.105 --api_server_ip 172.16.10.254 --api_server_port 8082 --admin_user
+    admin --admin_password workshop --admin_tenant_name admin'
+  node_name: cfg01.mk22-qa-lab01.local
+  retry: {count: 3, delay: 5}
+  skip_fail: false
+- description: Reboot compute nodes
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' system.reboot
+  node_name: cfg01.mk22-qa-lab01.local
+  retry: {count: 3, delay: 5}
+  skip_fail: false
+- description: Check IP on computes
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
+    'ip a'
+  node_name: cfg01.mk22-qa-lab01.local
+  retry: {count: 10, delay: 30}
+  skip_fail: false
+- description: Check contrail status on computes
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
+    'contrail-status'
+  node_name: cfg01.mk22-qa-lab01.local
+  retry: {count: 3, delay: 5}
+  skip_fail: false
diff --git a/tcp_tests/templates/salt/mk22-qa-lab01-salt.yaml b/tcp_tests/templates/salt/mk22-qa-lab01-salt.yaml
new file mode 100644
index 0000000..c559d54
--- /dev/null
+++ b/tcp_tests/templates/salt/mk22-qa-lab01-salt.yaml
@@ -0,0 +1,233 @@
+# Install salt to the config node
+- description: Configure tcpcloud repository on the cfg01 node
+  cmd: echo 'deb [arch=amd64] http://apt.tcpcloud.eu/nightly/ xenial main security tcp tcp-salt' > /etc/apt/sources.list;
+    echo 'deb [arch=amd64] http://apt.tcpcloud.eu/nightly/ trusty tcp-salt' >> /etc/apt/sources.list;
+    wget -O - http://apt.tcpcloud.eu/public.gpg | apt-key add -;
+    echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/latest xenial main" > /etc/apt/sources.list.d/saltstack.list;
+    wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/latest/SALTSTACK-GPG-KEY.pub | apt-key add -;
+  node_name: cfg01.mk22-qa-lab01.local
+  retry: {count: 1, delay: 1}
+  skip_fail: false
+
+#- description: Configure tcpcloud and saltstack repositories on the rest of nodes
+#  cmd: echo 'deb [arch=amd64] http://apt.tcpcloud.eu/nightly/ trusty main security extra tcp tcp-salt' > /etc/apt/sources.list;
+#    wget -O - http://apt.tcpcloud.eu/public.gpg | apt-key add - ;
+#    echo 'deb http://repo.saltstack.com/apt/ubuntu/14.04/amd64/latest trusty main' > /etc/apt/sources.list.d/saltstack.list;
+#    wget -O - https://repo.saltstack.com/apt/ubuntu/14.04/amd64/latest/SALTSTACK-GPG-KEY.pub | apt-key add -
+#  node_name: ***
+#  retry: {count: 1, delay: 1}
+#  skip_fail: false
+
+- description: Update packages on cfg01
+  cmd: apt-get clean; apt-get update && apt-get -y upgrade
+  node_name: cfg01.mk22-qa-lab01.local
+  retry: {count: 3, delay: 10}
+  skip_fail: false
+
+- description: Install common packages on cfg01
+  cmd: apt-get install -y python-pip wget curl tmux byobu iputils-ping traceroute htop tree
+  node_name: cfg01.mk22-qa-lab01.local
+  retry: {count: 3, delay: 10}
+  skip_fail: false
+
+- description: Install salt formulas, master and minion on cfg01
+  cmd: apt-get install -y salt-formula-* salt-master salt-minion reclass
+  node_name: cfg01.mk22-qa-lab01.local
+  retry: {count: 3, delay: 10}
+  skip_fail: false
+
+- description: Configure salt-master on cfg01
+  cmd: |
+    cat << 'EOF' >> /etc/salt/master.d/master.conf
+    file_roots:
+      base:
+      - /usr/share/salt-formulas/env
+    pillar_opts: False
+    open_mode: True
+    reclass: &reclass
+      storage_type: yaml_fs
+      inventory_base_uri: /srv/salt/reclass
+    ext_pillar:
+      - reclass: *reclass
+    master_tops:
+      reclass: *reclass
+    EOF
+  node_name: cfg01.mk22-qa-lab01.local
+  retry: {count: 1, delay: 1}
+  skip_fail: false
+
+- description: Configure GIT settings and certificates
+  cmd: touch /root/.git_trusted_certs.pem;
+    for server in git.tcpcloud.eu github.com; do
+        openssl s_client -showcerts -connect $server:443 </dev/null
+        | openssl x509 -outform PEM
+        >> /root/.git_trusted_certs.pem;
+    done;
+    HOME=/root git config --global http.sslCAInfo /root/.git_trusted_certs.pem;
+    HOME=/root git config --global user.email "tcp-qa@example.com";
+    HOME=/root git config --global user.name "TCP QA";
+  node_name: cfg01.mk22-qa-lab01.local
+  retry: {count: 1, delay: 1}
+  skip_fail: false
+
+- description: Clone reclass models and perform a workaround for https://mirantis.jira.com/browse/PROD-8078
+  cmd: |
+    git clone https://github.com/Mirantis/mk-lab-salt-model.git /srv/salt/reclass -b dash;
+    cat << 'EOF' >> /srv/salt/reclass/nodes/control/cfg01.mk22-qa-lab01.local.yml
+    # local storage
+      reclass:
+        storage:
+          data_source:
+            engine: local
+    EOF
+    sed -i '/nagios/d' /srv/salt/reclass/classes/system/salt/master/formula/pkg/stacklight.yml
+    cd /srv/salt/reclass; git add -A;git commit -m"use dash repo";
+  node_name: cfg01.mk22-qa-lab01.local
+  retry: {count: 1, delay: 1}
+  skip_fail: false
+
+- description: Configure reclass
+  cmd: |
+    mkdir -p /srv/salt/reclass/classes/service;
+    for i in /usr/share/salt-formulas/reclass/service/*; do
+      ln -s $i /srv/salt/reclass/classes/service/;
+    done;
+    [ ! -d /etc/reclass ] && mkdir /etc/reclass;
+    cat << 'EOF' >> /etc/reclass/reclass-config.yml
+    storage_type: yaml_fs
+    pretty_print: True
+    output: yaml
+    inventory_base_uri: /srv/salt/reclass
+    EOF
+  node_name: cfg01.mk22-qa-lab01.local
+  retry: {count: 1, delay: 1}
+  skip_fail: false
+
+- description: Configure salt-minion on cfg01
+  cmd: |
+    [ ! -d /etc/salt/minion.d ] && mkdir -p /etc/salt/minion.d;
+    cat << "EOF" >> /etc/salt/minion.d/minion.conf
+    id: cfg01.mk22-qa-lab01.local
+    master: localhost
+    EOF
+  node_name: cfg01.mk22-qa-lab01.local
+  retry: {count: 1, delay: 1}
+  skip_fail: false
+
+- description: Restarting salt services with workarounds
+  cmd: service salt-master restart;
+    sleep 60;
+    rm -f /etc/salt/pki/minion/minion_master.pub;
+    service salt-minion restart;
+    reclass -n cfg01.mk22-qa-lab01.local;
+  node_name: cfg01.mk22-qa-lab01.local
+  retry: {count: 3, delay: 10}
+  skip_fail: false
+
+
+# Prepare salt services and nodes settings
+- description: Run 'linux' formula on cfg01
+  cmd: salt-call --hard-crash --state-output=mixed --state-verbose=False state.sls
+    linux
+  node_name: cfg01.mk22-qa-lab01.local
+  retry: {count: 3, delay: 5}
+  skip_fail: false
+
+- description: Run 'openssh' formula on cfg01
+  cmd: salt-call --hard-crash --state-output=mixed --state-verbose=False state.sls
+    openssh;sed -i 's/PasswordAuthentication no/PasswordAuthentication yes/' /etc/ssh/sshd_config
+    && service ssh restart
+  node_name: cfg01.mk22-qa-lab01.local
+  retry: {count: 3, delay: 5}
+  skip_fail: false
+
+- description: '*Workaround* of the bug https://mirantis.jira.com/browse/PROD-7962'
+  cmd: echo '    StrictHostKeyChecking no' >> /root/.ssh/config
+  node_name: cfg01.mk22-qa-lab01.local
+  retry: {count: 1, delay: 1}
+  skip_fail: false
+
+- description: Run 'salt' formula on cfg01
+  cmd: salt-call --hard-crash --state-output=mixed --state-verbose=False  state.sls
+    salt
+  node_name: cfg01.mk22-qa-lab01.local
+  retry: {count: 3, delay: 5}
+  skip_fail: true
+
+- description: Accept salt keys from all the nodes
+  cmd: salt-key -A -y
+  node_name: cfg01.mk22-qa-lab01.local
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Generate inventory for all the nodes to the /srv/salt/reclass/nodes/_generated
+  cmd: salt-call --hard-crash --state-output=mixed --state-verbose=False state.sls
+    reclass.storage
+  node_name: cfg01.mk22-qa-lab01.local
+  retry: {count: 3, delay: 5}
+  skip_fail: false
+
+- description: Refresh pillars on all minions
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.refresh_pillar
+  node_name: cfg01.mk22-qa-lab01.local
+  retry: {count: 3, delay: 5}
+  skip_fail: false
+
+# Bootstrap all nodes
+- description: Configure linux on controllers
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' state.sls
+    linux
+  node_name: cfg01.mk22-qa-lab01.local
+  retry: {count: 5, delay: 5}
+  skip_fail: false
+
+- description: Configure openssh on controllers
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C '* and not
+    cfg*' state.sls openssh;salt --hard-crash --state-output=mixed --state-verbose=False
+    -C '* and not cfg*' cmd.run "sed -i 's/PasswordAuthentication no/PasswordAuthentication
+    yes/' /etc/ssh/sshd_config && service ssh restart"
+  node_name: cfg01.mk22-qa-lab01.local
+  retry: {count: 3, delay: 5}
+  skip_fail: false
+
+- description: '*Workaround* for the bug https://mirantis.jira.com/browse/PROD-8025'
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run 'apt-get
+    update && apt-get -y upgrade'
+  node_name: cfg01.mk22-qa-lab01.local
+  retry: {count: 3, delay: 5}
+  skip_fail: true
+
+- description: '*Workaround* for the bug https://mirantis.jira.com/browse/PROD-8021'
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run 'apt-get
+    -y install linux-image-extra-$(uname -r)'
+  node_name: cfg01.mk22-qa-lab01.local
+  retry: {count: 3, delay: 5}
+  skip_fail: false
+
+- description: '*Workaround* for the bug https://mirantis.jira.com/browse/PROD-8025'
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run 'apt-get
+    -y install python-requests'
+  node_name: cfg01.mk22-qa-lab01.local
+  retry: {count: 3, delay: 5}
+  skip_fail: false
+
+#- description: '*Workaround* of the bug https://mirantis.jira.com/browse/PROD-8063'
+#  cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run 'dhclient
+#    -r;dhclient'
+#  node_name: cfg01.mk22-qa-lab01.local
+#  retry: {count: 1, delay: 1}
+#  skip_fail: false
+
+- description: Configure salt.minion on controllers
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' state.sls
+    salt.minion
+  node_name: cfg01.mk22-qa-lab01.local
+  retry: {count: 3, delay: 5}
+  skip_fail: false
+
+- description: Configure ntp on controllers
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' state.sls
+    ntp
+  node_name: cfg01.mk22-qa-lab01.local
+  retry: {count: 5, delay: 10}
+  skip_fail: false
diff --git a/tcp_tests/templates/underlay/mk22-lab-advanced.yaml b/tcp_tests/templates/underlay/mk22-lab-advanced.yaml
index da8b160..8a925d2 100644
--- a/tcp_tests/templates/underlay/mk22-lab-advanced.yaml
+++ b/tcp_tests/templates/underlay/mk22-lab-advanced.yaml
@@ -11,7 +11,7 @@
     env_name: !os_env ENV_NAME, tcpcloud-mk22
 
     address_pools:
-      public-pool01:
+      admin-pool01:
         net: 172.16.10.0/24:24
         params:
           ip_reserved:
@@ -54,12 +54,12 @@
             use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
 
         network_pools:
-          public: public-pool01
+          admin: admin-pool01
           private: private-pool01
 
         l2_network_devices:
-          public:
-            address_pool: public-pool01
+          admin:
+            address_pool: admin-pool01
             dhcp: true
 #            forward:
 #              mode: nat
@@ -109,7 +109,7 @@
                   l2_network_device: private
                   interface_model: *interface_model
                 - label: ens4
-                  l2_network_device: public
+                  l2_network_device: admin
                   interface_model: *interface_model
               network_config:
                 ens3:
@@ -117,7 +117,7 @@
                     - private
                 ens4:
                   networks:
-                    - public
+                    - admin
 
           - name: ctl01.mk22-lab-advanced.local
             role: salt_minion
@@ -147,7 +147,7 @@
                   l2_network_device: private
                   interface_model: *interface_model
                 - label: eth1
-                  l2_network_device: public
+                  l2_network_device: admin
                   interface_model: *interface_model
               network_config: &network_config
                 eth0:
@@ -155,7 +155,7 @@
                     - private
                 eth1:
                   networks:
-                    - public
+                    - admin
 
           - name: ctl02.mk22-lab-advanced.local
             role: salt_minion
diff --git a/tcp_tests/templates/underlay/mk22-lab-basic.yaml b/tcp_tests/templates/underlay/mk22-lab-basic.yaml
index c3cf0f9..f21fc17 100644
--- a/tcp_tests/templates/underlay/mk22-lab-basic.yaml
+++ b/tcp_tests/templates/underlay/mk22-lab-basic.yaml
@@ -11,7 +11,7 @@
     env_name: !os_env ENV_NAME, tcpcloud-mk22
 
     address_pools:
-      public-pool01:
+      admin-pool01:
         net: 172.16.10.0/24:24
         params:
           ip_reserved:
@@ -54,12 +54,12 @@
             use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
 
         network_pools:
-          public: public-pool01
+          admin: admin-pool01
           private: private-pool01
 
         l2_network_devices:
-          public:
-            address_pool: public-pool01
+          admin:
+            address_pool: admin-pool01
             dhcp: true
 #            forward:
 #              mode: nat
@@ -109,7 +109,7 @@
                   l2_network_device: private
                   interface_model: *interface_model
                 - label: ens4
-                  l2_network_device: public
+                  l2_network_device: admin
                   interface_model: *interface_model
               network_config:
                 ens3:
@@ -117,7 +117,7 @@
                     - private
                 ens4:
                   networks:
-                    - public
+                    - admin
 
           - name: ctl01.mk22-lab-basic.local
             role: salt_minion
@@ -147,7 +147,7 @@
                   l2_network_device: private
                   interface_model: *interface_model
                 - label: eth1
-                  l2_network_device: public
+                  l2_network_device: admin
                   interface_model: *interface_model
               network_config: &network_config
                 eth0:
@@ -155,7 +155,7 @@
                     - private
                 eth1:
                   networks:
-                    - public
+                    - admin
 
           - name: ctl02.mk22-lab-basic.local
             role: salt_minion
diff --git a/tcp_tests/templates/underlay/mk22-qa-lab01--meta-data.yaml b/tcp_tests/templates/underlay/mk22-qa-lab01--meta-data.yaml
new file mode 100644
index 0000000..3699401
--- /dev/null
+++ b/tcp_tests/templates/underlay/mk22-qa-lab01--meta-data.yaml
@@ -0,0 +1,4 @@
+| # All the data below will be stored as a string object
+  instance-id: iid-local1
+  hostname: {hostname}
+  local-hostname: {hostname}
diff --git a/tcp_tests/templates/underlay/mk22-qa-lab01--user-data-cfg01.yaml b/tcp_tests/templates/underlay/mk22-qa-lab01--user-data-cfg01.yaml
new file mode 100644
index 0000000..31f60a7
--- /dev/null
+++ b/tcp_tests/templates/underlay/mk22-qa-lab01--user-data-cfg01.yaml
@@ -0,0 +1,98 @@
+| # All the data below will be stored as a string object
+  #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+
+  ssh_pwauth: True
+  users:
+   - name: root
+     sudo: ALL=(ALL) NOPASSWD:ALL
+     shell: /bin/bash
+     ssh_authorized_keys:
+      - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDGwjUlYn9UsmWmAGSuEA2sICad7WqxgsJR0HKcMbbxi0tn96h4Cq2iGYmzlJ48egLm5R5pxyWnFvL4b/2zb+kKTPCMwRc9nv7xEGosEFNQEoSDd+gYu2CO0dgS2bX/7m2DXmzvhqPjxWQUXXsb0OYAS1r9Es65FE8y4rLaegz8V35xfH45bTCA0W8VSKh264XtGz12hacqsttE/UvyjJTZe+/XV+xJy3WAWxe8J/MuW1VqbqNewTmpTE/LJU8i6pG4msU6+wH99UvsGAOKQOduynUHKWG3VZg5YCjpbbV/t/pfW/vHB3b3jiifQmNhulyiG/CNnSQ5BahtV/7qPsYt vagrant@cfg01
+
+  disable_root: false
+  chpasswd:
+   list: |
+    root:r00tme
+   expire: False
+
+  bootcmd:
+   # Block access to SSH while node is preparing
+   - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
+   # Enable root access
+   - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+   - service sshd restart
+  output:
+    all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
+
+  runcmd:
+   # Configure dhclient
+   - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
+   - sudo resolvconf -u
+
+   # Prepare network connection
+   - sudo ifup ens3
+   #- sudo route add default gw {gateway} {interface_name}
+   - sudo ifup ens4
+
+   # Create swap
+   - fallocate -l 4G /swapfile
+   - chmod 600 /swapfile
+   - mkswap /swapfile
+   - swapon /swapfile
+   - echo "/swapfile   none    swap    defaults    0   0" >> /etc/fstab
+
+   ########################################################
+   # Node is ready, allow SSH access
+   - echo "Allow SSH access ..."
+   - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
+   ########################################################
+
+  write_files:
+   - path: /etc/network/interfaces.d/99-tcp-tests.cfg
+     content: |
+          auto ens3
+          iface ens3 inet dhcp
+          auto ens4
+          iface ens4 inet dhcp
+
+   - path: /root/.ssh/id_rsa
+     owner: root:root
+     permissions: '0600'
+     content: |
+         -----BEGIN RSA PRIVATE KEY-----
+         MIIEpAIBAAKCAQEAxsI1JWJ/VLJlpgBkrhANrCAmne1qsYLCUdBynDG28YtLZ/eo
+         eAqtohmJs5SePHoC5uUeacclpxby+G/9s2/pCkzwjMEXPZ7+8RBqLBBTUBKEg3fo
+         GLtgjtHYEtm1/+5tg15s74aj48VkFF17G9DmAEta/RLOuRRPMuKy2noM/Fd+cXx+
+         OW0wgNFvFUioduuF7Rs9doWnKrLbRP1L8oyU2Xvv11fsSct1gFsXvCfzLltVam6j
+         XsE5qUxPyyVPIuqRuJrFOvsB/fVL7BgDikDnbsp1Bylht1WYOWAo6W21f7f6X1v7
+         xwd2944on0JjYbpcohvwjZ0kOQWobVf+6j7GLQIDAQABAoIBAF0tAAMlmLGY7CQU
+         /R3IctBlRhU1DpZmyTfXc1MbzzqO5Wu44yZbQyjBthcUrdWGEUQy1r4Z2OHq1T54
+         KcPry6DDjuU9Q+rkVXmnC07a3GOmOq7zEEA/3zU01ImJvFNdb8NtCb6ELOKDT7Zo
+         WGUi2h/7M41+OqDzD2m4csYO/3Vvr12sMhn9BfwU4OPpL44A4PJiEryEAw9o5/j/
+         73eyPvgf6tkC4l0mMtfHB9tg/F++iH8fiEr1SMvHGIc9gZNmFYMrs2XfLkAejPfH
+         XrOyw6eqd+kluqw51gHhdeQYwBx6mfOkbhPHWU79FzpH5M1ikdfImZmPCxVf3Ykj
+         nxLoK9UCgYEA4c9agPb/OFyN00nnUMBxzQt1pErpOf/7QhnvNZThomzSV7PyefxF
+         H6G/VlS3gCcrWBCh7mqOSxGcNQwgudVqzUm7QXruQeg4nWcCGSxg7lGYSEf0MyWL
+         5wrd+f9MoV/VV8udIPENjp96o5kwQEVRfsTBNwmk54kup2+br5q8re8CgYEA4VT8
+         UeIN+plP6FjZYITI+SO/ou5goKIhfBrqa5gOXXPc2y6sIu9wBWCr+T7FAF/2gGhS
+         rpVx76zcmx05nwkxIlJh58+G3MVyUDFoWnrtL38vdkBSuOGgNfzcBsFpQvFs8WaW
+         otbuTtkPcXbVdYRr32/C620MxXhUO+svo3CLaaMCgYEA1rjlF8NHl+Gy31rkQg5t
+         aIxgFpVBR+zZkNa2d94V3Ozb65fqmALB/D1Dg6VVROB6P+i5AsyCeHHLd0oMCIof
+         YAyfqrlpvHRE+bAM98ESfyxJwVnipYwrh8z2nZYd2UoWxcCRrtRpjtipts2ha0w/
+         HWudS2e5To5NNdxUT9y1VDMCgYEAxkQiE+ZkyGiXv+hVtLCBqX4EA9fdm9msvudr
+         9qn/kcj9vrntanvlxEWQbCoH61GEsu2YOtdyPiKKpc1sQvwyiHGWhgK7NoxhDiC7
+         IknhYxZ064ajgtu8PWS1MRiDhwypACt1Rej6HNSu2vZl0hZnWF2dU8tLHoHHFEXX
+         T+caNCMCgYBZpD6XBiiEXf0ikXYnXKOmbsyVG80V+yqfLo85qb2RW9TaviOSP43g
+         nB22ReMSHq2cOrs6VTTgfhxefBwzdDFbfKMf6ZU82jCNlpetAZOrhdMHUvcsjSQk
+         XKI6Ldfq6TU3xKujRHfGP+oQ6GLwVCL/kjGxOuSRLFGfRiiqYI3nww==
+         -----END RSA PRIVATE KEY-----
+
+   - path: /root/.ssh/config
+     owner: root:root
+     permissions: '0600'
+     content: |
+          Host *
+            ServerAliveInterval 300
+            ServerAliveCountMax 10
+            StrictHostKeyChecking no
+            UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/underlay/mk22-qa-lab01--user-data1404.yaml b/tcp_tests/templates/underlay/mk22-qa-lab01--user-data1404.yaml
new file mode 100644
index 0000000..8df02af
--- /dev/null
+++ b/tcp_tests/templates/underlay/mk22-qa-lab01--user-data1404.yaml
@@ -0,0 +1,98 @@
+| # All the data below will be stored as a string object
+  #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+
+  ssh_pwauth: True
+  users:
+   - name: root
+     sudo: ALL=(ALL) NOPASSWD:ALL
+     shell: /bin/bash
+     ssh_authorized_keys:
+      - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDGwjUlYn9UsmWmAGSuEA2sICad7WqxgsJR0HKcMbbxi0tn96h4Cq2iGYmzlJ48egLm5R5pxyWnFvL4b/2zb+kKTPCMwRc9nv7xEGosEFNQEoSDd+gYu2CO0dgS2bX/7m2DXmzvhqPjxWQUXXsb0OYAS1r9Es65FE8y4rLaegz8V35xfH45bTCA0W8VSKh264XtGz12hacqsttE/UvyjJTZe+/XV+xJy3WAWxe8J/MuW1VqbqNewTmpTE/LJU8i6pG4msU6+wH99UvsGAOKQOduynUHKWG3VZg5YCjpbbV/t/pfW/vHB3b3jiifQmNhulyiG/CNnSQ5BahtV/7qPsYt vagrant@cfg01
+
+  disable_root: false
+  chpasswd:
+   list: |
+    root:r00tme
+   expire: False
+
+  bootcmd:
+   # Block access to SSH while node is preparing
+   - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
+   # Enable root access
+   - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+   - service sshd restart
+  output:
+    all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
+
+  runcmd:
+   # Configure dhclient
+   - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
+   - sudo resolvconf -u
+
+   # Prepare network connection
+   - sudo ifup eth0
+   #- sudo route add default gw {gateway} {interface_name}
+   - sudo ifup eth1
+
+   # Create swap
+   - fallocate -l 4G /swapfile
+   - chmod 600 /swapfile
+   - mkswap /swapfile
+   - swapon /swapfile
+   - echo "/swapfile   none    swap    defaults    0   0" >> /etc/fstab
+
+   ############## TCP Cloud cfg01 node ##################
+   #- sleep 120
+   - echo "Preparing base OS"
+   - which wget >/dev/null || (apt-get update; apt-get install -y wget)
+
+   - echo "deb [arch=amd64] http://apt.tcpcloud.eu/nightly/ trusty main security tcp tcp-salt" > /etc/apt/sources.list
+   - wget -O - http://apt.tcpcloud.eu/public.gpg | apt-key add -
+   # saltstack repo is for minions that have the same version in the xenial and trusty (2016.3.3)
+   - echo "deb http://repo.saltstack.com/apt/ubuntu/14.04/amd64/latest trusty main" > /etc/apt/sources.list.d/saltstack.list
+   - wget -O - https://repo.saltstack.com/apt/ubuntu/14.04/amd64/latest/SALTSTACK-GPG-KEY.pub | apt-key add -
+
+   - apt-get clean
+   - apt-get update
+   - apt-get -y upgrade
+
+   # Install common packages
+   - apt-get install -y python-pip git
+   - apt-get install -y curl tmux byobu iputils-ping traceroute htop tree
+
+   - apt-get install -y salt-minion
+
+   # To be configured from inventory/fuel-devops by operator or autotests
+   - 'echo "id: {hostname}" >> /etc/salt/minion'
+   - 'echo "master: 172.16.10.100" >> /etc/salt/minion'
+
+   - echo "Restarting minion service with workarounds..."
+   - rm -f /etc/salt/pki/minion/minion_master.pub
+   - service salt-minion restart
+   - sleep 5
+   - rm -f /etc/salt/pki/minion/minion_master.pub
+   - service salt-minion restart
+
+   #- echo "Showing node metadata..."
+   #- salt-call pillar.data
+
+   #- echo "Running complete state ..."
+   #- salt-call state.sls linux,openssh,salt
+
+   # Workaround for bug https://mirantis.jira.com/browse/PROD-8214
+   - apt-get -y install --install-recommends linux-generic-lts-xenial
+   - reboot
+
+   ########################################################
+   # Node is ready, allow SSH access
+   ##- echo "Allow SSH access ..."
+   ##- sudo iptables -D INPUT -p tcp --dport 22 -j DROP
+   ########################################################
+
+  write_files:
+   - path: /etc/network/interfaces.d/99-tcp-tests.cfg
+     content: |
+          auto eth0
+          iface eth0 inet dhcp
+          auto eth1
+          iface eth1 inet dhcp
diff --git a/tcp_tests/templates/underlay/mk22-qa-lab01--user-data1604.yaml b/tcp_tests/templates/underlay/mk22-qa-lab01--user-data1604.yaml
new file mode 100644
index 0000000..4ec3a67
--- /dev/null
+++ b/tcp_tests/templates/underlay/mk22-qa-lab01--user-data1604.yaml
@@ -0,0 +1,94 @@
+| # All the data below will be stored as a string object
+  #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+
+  ssh_pwauth: True
+  users:
+   - name: root
+     sudo: ALL=(ALL) NOPASSWD:ALL
+     shell: /bin/bash
+     ssh_authorized_keys:
+      - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDGwjUlYn9UsmWmAGSuEA2sICad7WqxgsJR0HKcMbbxi0tn96h4Cq2iGYmzlJ48egLm5R5pxyWnFvL4b/2zb+kKTPCMwRc9nv7xEGosEFNQEoSDd+gYu2CO0dgS2bX/7m2DXmzvhqPjxWQUXXsb0OYAS1r9Es65FE8y4rLaegz8V35xfH45bTCA0W8VSKh264XtGz12hacqsttE/UvyjJTZe+/XV+xJy3WAWxe8J/MuW1VqbqNewTmpTE/LJU8i6pG4msU6+wH99UvsGAOKQOduynUHKWG3VZg5YCjpbbV/t/pfW/vHB3b3jiifQmNhulyiG/CNnSQ5BahtV/7qPsYt vagrant@cfg01
+
+  disable_root: false
+  chpasswd:
+   list: |
+    root:r00tme
+   expire: False
+
+  bootcmd:
+   # Block access to SSH while node is preparing
+   - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
+   # Enable root access
+   - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+   - service sshd restart
+  output:
+    all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
+
+  runcmd:
+   # Configure dhclient
+   - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
+   - sudo resolvconf -u
+
+   # Prepare network connection
+   - sudo ifup ens3
+   #- sudo route add default gw {gateway} {interface_name}
+   - sudo ifup ens4
+
+   # Create swap
+   - fallocate -l 4G /swapfile
+   - chmod 600 /swapfile
+   - mkswap /swapfile
+   - swapon /swapfile
+   - echo "/swapfile   none    swap    defaults   0   0" >> /etc/fstab
+
+   ############## TCP Cloud cfg01 node ##################
+   #- sleep 120
+   - echo "Preparing base OS"
+   - which wget >/dev/null || (apt-get update; apt-get install -y wget)
+
+   - echo "deb [arch=amd64] http://apt.tcpcloud.eu/nightly/ xenial main security tcp tcp-salt" > /etc/apt/sources.list
+   - wget -O - http://apt.tcpcloud.eu/public.gpg | apt-key add -
+   # saltstack repo is for minions that have the same version in the xenial and trusty (2016.3.3)
+   - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/latest xenial main" > /etc/apt/sources.list.d/saltstack.list
+   - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/latest/SALTSTACK-GPG-KEY.pub | apt-key add -
+
+   - apt-get clean
+   - apt-get update
+   - apt-get -y upgrade
+
+   # Install common packages
+   - apt-get install -y python-pip git
+   - apt-get install -y curl tmux byobu iputils-ping traceroute htop tree
+
+   - apt-get install -y salt-minion
+
+   # To be configured from inventory/fuel-devops by operator or autotests
+   - 'echo "id: {hostname}" >> /etc/salt/minion'
+   - 'echo "master: 172.16.10.100" >> /etc/salt/minion'
+
+   - echo "Restarting minion service with workarounds..."
+   - rm -f /etc/salt/pki/minion/minion_master.pub
+   - service salt-minion restart
+   - sleep 5
+   - rm -f /etc/salt/pki/minion/minion_master.pub
+   - service salt-minion restart
+
+   #- echo "Showing node metadata..."
+   #- salt-call pillar.data
+
+   #- echo "Running complete state ..."
+   #- salt-call state.sls linux,openssh,salt
+
+   ########################################################
+   # Node is ready, allow SSH access
+   - echo "Allow SSH access ..."
+   - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
+   ########################################################
+
+  write_files:
+   - path: /etc/network/interfaces.d/99-tcp-tests.cfg
+     content: |
+          auto ens3
+          iface ens3 inet dhcp
+          auto ens4
+          iface ens4 inet dhcp
diff --git a/tcp_tests/templates/underlay/mk22-qa-lab01.yaml b/tcp_tests/templates/underlay/mk22-qa-lab01.yaml
new file mode 100644
index 0000000..d2713d6
--- /dev/null
+++ b/tcp_tests/templates/underlay/mk22-qa-lab01.yaml
@@ -0,0 +1,451 @@
+---
+aliases:
+  dynamic_addresses_pool:
+    - &pool_default !os_env POOL_DEFAULT, 172.16.10.0/24:24
+
+  default_interface_model:
+    - &interface_model !os_env INTERFACE_MODEL, virtio
+
+template:
+  devops_settings:
+    env_name: !os_env ENV_NAME, tcpcloud-mk22
+
+    address_pools:
+      admin-pool01:
+        net: 172.16.10.0/24:24
+        params:
+          ip_reserved:
+            gateway: +1
+            l2_network_device: +1
+            default_cfg01.mk22-qa-lab01.local: +100
+            default_ctl01.mk22-qa-lab01.local: +101
+            default_ctl02.mk22-qa-lab01.local: +102
+            default_ctl03.mk22-qa-lab01.local: +103
+            default_prx01.mk22-qa-lab01.local: +121
+            default_cmp01.mk22-qa-lab01.local: +105
+            default_cmp02.mk22-qa-lab01.local: +106
+            default_mon01.mk22-qa-lab01.local: +107
+            default_gtw01.mk22-qa-lab01.local: +131
+            default_dbs01.mk22-qa-lab01.local: +181
+            default_dbs02.mk22-qa-lab01.local: +182
+            default_dbs03.mk22-qa-lab01.local: +183
+            default_vsrx01.mk22-qa-lab01.local: +250
+          ip_ranges:
+            dhcp: [+100, -2]
+
+      private-pool01:
+        net: 192.168.10.0/24:24
+        params:
+          ip_reserved:
+            gateway: +1
+            l2_network_device: +1
+            default_cfg01.mk22-qa-lab01.local: +100
+            default_ctl01.mk22-qa-lab01.local: +101
+            default_ctl02.mk22-qa-lab01.local: +102
+            default_ctl03.mk22-qa-lab01.local: +103
+            default_prx01.mk22-qa-lab01.local: +121
+            default_cmp01.mk22-qa-lab01.local: +105
+            default_cmp02.mk22-qa-lab01.local: +106
+            default_gtw01.mk22-qa-lab01.local: +131
+            default_dbs01.mk22-qa-lab01.local: +181
+            default_dbs02.mk22-qa-lab01.local: +182
+            default_dbs03.mk22-qa-lab01.local: +183
+            default_vsrx01.mk22-qa-lab01.local: +250
+          ip_ranges:
+            dhcp: [+100, -2]
+
+      public-pool01:
+        net: 10.100.1.0/24:24
+        params:
+          ip_reserved:
+            gateway: +1
+            l2_network_device: +1
+            default_vsrx01.mk22-qa-lab01.local: +250
+
+    groups:
+      - name: default
+        driver:
+          name: devops.driver.libvirt
+          params:
+            connection_string: !os_env CONNECTION_STRING, qemu:///system
+            storage_pool_name: !os_env STORAGE_POOL_NAME, default
+            stp: False
+            hpet: False
+            enable_acpi: true
+            use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
+
+        network_pools:
+          admin: admin-pool01
+          private: private-pool01
+
+        l2_network_devices:
+          admin:
+            address_pool: admin-pool01
+            dhcp: true
+#            forward:
+#              mode: nat
+
+          private:
+            address_pool: private-pool01
+            dhcp: true
+            forward:
+              mode: nat
+
+          public:
+            address_pool: public-pool01
+            forward:
+              mode: nat
+
+        group_volumes:
+         - name: cloudimage1404    # This name is used for 'backing_store' option for node volumes.
+           source_image: !os_env IMAGE_PATH1404  # https://cloud-images.ubuntu.com/trusty/current/trusty-server-cloudimg-amd64-disk1.img or
+                                             # http://apt.tcpcloud.eu/images/ubuntu-14-04-x64-201608231134.qcow2
+           format: qcow2
+         - name: cloudimage1604    # This name is used for 'backing_store' option for node volumes.
+           source_image: !os_env IMAGE_PATH1604  # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img or
+                                             # http://apt.tcpcloud.eu/images/ubuntu-16-04-x64-201608231004.qcow2
+           format: qcow2
+
+         - name: vsrx_image    # This name is used for 'backing_store' option for node volumes.
+           source_image: !os_env VSRX_PATH
+           format: qcow2
+
+        nodes:
+          - name: cfg01.mk22-qa-lab01.local
+            role: salt_master
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 4
+              memory: !os_env SLAVE_NODE_MEMORY, 2048
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: cloudimage1604
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: !include mk22-qa-lab01--meta-data.yaml
+                  cloudinit_user_data: !include mk22-qa-lab01--user-data-cfg01.yaml
+
+              interfaces:
+                - label: ens3
+                  l2_network_device: private
+                  interface_model: *interface_model
+                - label: ens4
+                  l2_network_device: admin
+                  interface_model: *interface_model
+              network_config:
+                ens3:
+                  networks:
+                    - private
+                ens4:
+                  networks:
+                    - admin
+
+          - name: ctl01.mk22-qa-lab01.local
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 4
+              memory: !os_env SLAVE_NODE_MEMORY, 15000
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: eth0
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: cloudimage1404
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: !include mk22-qa-lab01--meta-data.yaml
+                  cloudinit_user_data: !include mk22-qa-lab01--user-data1404.yaml
+
+              interfaces: &interfaces
+                - label: eth0
+                  l2_network_device: private
+                  interface_model: *interface_model
+                - label: eth1
+                  l2_network_device: admin
+                  interface_model: *interface_model
+              network_config: &network_config
+                eth0:
+                  networks:
+                    - private
+                eth1:
+                  networks:
+                    - admin
+
+          - name: ctl02.mk22-qa-lab01.local
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 4
+              memory: !os_env SLAVE_NODE_MEMORY, 15000
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: eth0
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: cloudimage1404
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: !include mk22-qa-lab01--meta-data.yaml
+                  cloudinit_user_data: !include mk22-qa-lab01--user-data1404.yaml
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: ctl03.mk22-qa-lab01.local
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 4
+              memory: !os_env SLAVE_NODE_MEMORY, 15000
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: eth0
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: cloudimage1404
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: !include mk22-qa-lab01--meta-data.yaml
+                  cloudinit_user_data: !include mk22-qa-lab01--user-data1404.yaml
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: cmp01.mk22-qa-lab01.local
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 4
+              memory: !os_env SLAVE_NODE_MEMORY, 2048
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: eth0
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: cloudimage1404
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: !include mk22-qa-lab01--meta-data.yaml
+                  cloudinit_user_data: !include mk22-qa-lab01--user-data1404.yaml
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: cmp02.mk22-qa-lab01.local
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 4
+              memory: !os_env SLAVE_NODE_MEMORY, 2048
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: eth0
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: cloudimage1404
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: !include mk22-qa-lab01--meta-data.yaml
+                  cloudinit_user_data: !include mk22-qa-lab01--user-data1404.yaml
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: prx01.mk22-qa-lab01.local
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 1
+              memory: !os_env SLAVE_NODE_MEMORY, 1024
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: eth0
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: cloudimage1404
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: !include mk22-qa-lab01--meta-data.yaml
+                  cloudinit_user_data: !include mk22-qa-lab01--user-data1404.yaml
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: dbs01.mk22-qa-lab01.local
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 2048
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: eth0
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: cloudimage1404
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: !include mk22-qa-lab01--meta-data.yaml
+                  cloudinit_user_data: !include mk22-qa-lab01--user-data1404.yaml
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: dbs02.mk22-qa-lab01.local
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 2048
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: eth0
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: cloudimage1404
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: !include mk22-qa-lab01--meta-data.yaml
+                  cloudinit_user_data: !include mk22-qa-lab01--user-data1404.yaml
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: dbs03.mk22-qa-lab01.local
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 2048
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: eth0
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: cloudimage1404
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: !include mk22-qa-lab01--meta-data.yaml
+                  cloudinit_user_data: !include mk22-qa-lab01--user-data1404.yaml
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+
+#          - name: gtw01.mk22-qa-lab01.local
+#            role: salt_minion
+#            params:
+#              vcpu: !os_env SLAVE_NODE_CPU, 1
+#              memory: !os_env SLAVE_NODE_MEMORY, 2048
+#              boot:
+#                - hd
+#              cloud_init_volume_name: iso
+#              cloud_init_iface_up: eth0
+#              volumes:
+#                - name: system
+#                  capacity: !os_env NODE_VOLUME_SIZE, 150
+#                  backing_store: cloudimage1404
+#                  format: qcow2
+#                - name: iso  # Volume with name 'iso' will be used
+#                             # for store image with cloud-init metadata.
+#                  capacity: 1
+#                  format: raw
+#                  device: cdrom
+#                  bus: ide
+#                  cloudinit_meta_data: !include mk22-qa-lab01--meta-data.yaml
+#                  cloudinit_user_data: !include mk22-qa-lab01--user-data1404.yaml
+#              interfaces: *interfaces
+#              network_config: *network_config
+
+          - name: vsrx01.mk22-qa-lab01.local
+            role: vsrx
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 2048
+              boot:
+                - hd
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: vsrx_image
+                  format: qcow2
+              interfaces:
+                - label: eth0
+                  l2_network_device: public
+                  interface_model: virtio
+                  mac_address: 52:54:00:4e:b4:36
+                - label: eth1
+                  l2_network_device: private
+                  interface_model: virtio
+                  mac_address: 52:54:00:e1:44:9d
+              network_config:
+                eth0:
+                  networks:
+                    - public
+                eth1:
+                  networks:
+                    - private