Add cookied-bm-contrail40-queens

Latest msg: Remove wr for ten network
US: PROD-22163

Change-Id: Iff6f055b0da984bd8c4f1b8c4aba41be32376624
diff --git a/tcp_tests/templates/cookied-bm-oc40-queens/core.yaml b/tcp_tests/templates/cookied-bm-oc40-queens/core.yaml
new file mode 100644
index 0000000..15d5b82
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-oc40-queens/core.yaml
@@ -0,0 +1,10 @@
+{% from 'cookied-bm-oc40-queens/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% import 'shared-core.yaml' as SHARED_CORE with context %}
+
+{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
+{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
+{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
+{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
+{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
+{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
+{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/cookied-bm-oc40-queens/lab04-physical-inventory.yaml b/tcp_tests/templates/cookied-bm-oc40-queens/lab04-physical-inventory.yaml
new file mode 100644
index 0000000..5aa9ebe
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-oc40-queens/lab04-physical-inventory.yaml
@@ -0,0 +1,94 @@
+nodes:

+    cfg01.cookied-bm-oc40-queens.local:

+      reclass_storage_name: infra_config_node01

+      roles:

+      - infra_config

+      - linux_system_codename_xenial

+      interfaces:

+        ens3:

+          role: single_dhcp

+        ens4:

+          role: single_static_ctl

+          single_address: 10.167.8.99

+

+    # Physical nodes

+    kvm01.cookied-bm-oc40-queens.local:

+      reclass_storage_name: infra_kvm_node01

+      roles:

+      - infra_kvm

+      - linux_system_codename_xenial

+      interfaces:

+        enp9s0f0:

+          role: single_mgm

+        enp9s0f1:

+          role: bond0_ab_ovs_vlan_ctl

+

+    kvm02.cookied-bm-oc40-queens.local:

+      reclass_storage_name: infra_kvm_node02

+      roles:

+      - infra_kvm

+      - linux_system_codename_xenial

+      interfaces:

+        enp9s0f0:

+          role: single_mgm

+        enp9s0f1:

+          role: bond0_ab_ovs_vlan_ctl

+

+    kvm03.cookied-bm-oc40-queens.local:

+      reclass_storage_name: infra_kvm_node03

+      roles:

+      - infra_kvm

+      - linux_system_codename_xenial

+      interfaces:

+        enp9s0f0:

+          role: single_mgm

+        enp9s0f1:

+          role: bond0_ab_ovs_vlan_ctl

+

+    cmp<<count>>:

+      reclass_storage_name: openstack_compute_rack01

+      roles:

+      - openstack_compute

+      - features_lvm_backend_volume_sdb

+      - linux_system_codename_xenial

+      interfaces:

+        enp2s0f1:

+        enp2s0f1:

+          role: single_dhcp

+        enp5s0f0:

+          role: bond0_ab_contrail

+        enp5s0f1:

+          role: single_vlan_ctl

+

+#    cmp001.cookied-bm-oc40-queens.local:

+#      reclass_storage_name: openstack_compute_node01

+#      roles:

+#      - openstack_compute

+#      - features_lvm_backend_volume_sdb

+#      - linux_system_codename_xenial

+#      interfaces:

+#        enp2s0f1:

+#          role: single_mgm

+#          deploy_address: 172.16.49.73

+#        enp5s0f0:

+#          role: single_contrail_vlan_prv

+#          tenant_address: 192.168.0.101

+#        enp5s0f1:

+#          role: single_vlan_ctl

+#          single_address: 10.167.8.101

+#    cmp002.cookied-bm-oc40-queens.local:

+#      reclass_storage_name: openstack_compute_node02

+#      roles:

+#      - openstack_compute

+#      - features_lvm_backend_volume_sdb

+#      - linux_system_codename_xenial

+#      interfaces:

+#        enp2s0f1:

+#          role: single_mgm

+#          deploy_address: 172.16.49.74

+#        enp5s0f0:

+#          role: single_contrail_vlan_prv

+#          tenant_address: 192.168.0.102

+#        enp5s0f1:

+#          role: single_vlan_ctl

+#          single_address: 10.167.8.102

diff --git a/tcp_tests/templates/cookied-bm-oc40-queens/openstack.yaml b/tcp_tests/templates/cookied-bm-oc40-queens/openstack.yaml
new file mode 100644
index 0000000..7dff4de
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-oc40-queens/openstack.yaml
@@ -0,0 +1,287 @@
+{% from 'cookied-bm-oc40-queens/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-bm-oc40-queens/underlay.yaml' import HOSTNAME_CTL01 with context %}
+{% from 'cookied-bm-oc40-queens/underlay.yaml' import DOMAIN_NAME with context %}
+{% from 'cookied-bm-oc40-queens/underlay.yaml' import LAB_CONFIG_NAME with context %}
+{% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
+{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
+
+{% set PATTERN = os_env('PATTERN', 'false') %}
+{% set RUN_TEMPEST = os_env('RUN_TEMPEST', 'false') %}
+
+{% import 'shared-openstack.yaml' as SHARED_OPENSTACK with context %}
+
+# Install OpenStack control services
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_GLANCE() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_NOVA() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_CINDER(INSTALL_VOLUME=false) }}
+
+- description: WR Install cinder volume
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@cinder:volume' state.sls cinder
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 5}
+  skip_fail: false
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_NEUTRON(INSTALL_GATEWAY=false) }}
+
+# install contrail
+
+- description: Install Docker services
+  cmd: |
+    if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:host' match.pillar 'docker:host' ; then
+      salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:host' state.sls docker.host
+    fi; sleep 10;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 20}
+  skip_fail: false
+
+- description: Install opencontrail database services on first minion
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@opencontrail:database and *01*' state.sls opencontrail.database
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 20}
+  skip_fail: false
+
+- description: Install opencontrail database services
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@opencontrail:database' state.sls opencontrail.database
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 20}
+  skip_fail: false
+
+- description: Install Opencontrail control services on first minion
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@opencontrail:control and *01*' state.sls opencontrail exclude=opencontrail.client
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 20}
+  skip_fail: false
+
+- description: Install Opencontrail control services
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@opencontrail:control' state.sls opencontrail exclude=opencontrail.client
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 20}
+  skip_fail: false
+
+- description: Install Opencontrail collectors on first minion
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@opencontrail:collector and *01*' state.sls opencontrail exclude=opencontrail.client
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 20}
+  skip_fail: false
+
+- description: Install Opencontrail collectors
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@opencontrail:collector' state.sls opencontrail exclude=opencontrail.client
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 20}
+  skip_fail: false
+
+- description: Spawn Opencontrail docker images
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@opencontrail:control or I@opencontrail:collector' state.sls docker.client && sleep 15;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 3, delay: 5}
+  skip_fail: false
+
+- description: Finalize opencontrail services
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@opencontrail:database:id:1' state.sls opencontrail.client
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 3, delay: 30}
+  skip_fail: false
+
+- description: Finalize opencontrail services
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@opencontrail:client and not I@opencontrail:compute' state.sls opencontrail.client
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Finalize opencontrail services
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@opencontrail:compute' state.sls opencontrail exclude=opencontrail.client
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 3, delay: 30}
+  skip_fail: true
+
+- description: Check contrail status
+  cmd: sleep 15; salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@opencontrail:database' cmd.run 'doctrail all contrail-status'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Reboot computes
+  cmd: |
+    salt "cmp*" system.reboot;
+    sleep 600;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: true
+
+- description: Remove crashes files from /var/crashes/ while vrouter was crashed
+  cmd: salt  "cmp*" cmd.run "rm -rf /var/crashes/*"
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: true
+
+- description: Apply Opencontrail compute
+  cmd: salt -C 'I@opencontrail:compute' state.sls opencontrail.client
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 3, delay: 30}
+  skip_fail: false
+
+- description: Apply Opencontrail compute
+  cmd: salt -C 'I@opencontrail:compute' state.sls opencontrail
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Check status for contrail services
+  cmd: |
+    sleep 15;
+    salt -C 'I@opencontrail:database' cmd.run 'doctrail all contrail-status'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_HEAT() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_HORIZON() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_COMPUTE(CELL_MAPPING=true) }}
+
+- description: sync time
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run
+    'service ntp stop; ntpd -gq;  service ntp start'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Hack resolv.conf on VCP nodes for internal services access
+  cmd: |
+    salt --hard-crash --state-output=mixed --state-verbose=False -C '* and not kvm* and not cmp* and not gtw* and not cfg*' cmd.run "echo 'nameserver 172.18.208.44' > /etc/resolv.conf;"
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Create heat-net before external net create
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron net-create heat-net'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Create public network for contrail
+  cmd: |
+    salt 'ntw01*' contrail.virtual_network_create public '{"external":true,"ip_prefix":"192.168.200.0","ip_prefix_len":24,"asn":64512,"target":10000}'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: true
+
+- description: Steps from neutron client for contrail
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron subnet-create heat-net 10.20.30.0/24 --allocation-pool start=10.20.30.10,end=10.20.30.254 --gateway 10.20.30.1 --name heat-subnet'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Steps from neutron client for contrail
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron router-create heat-router'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Steps from neutron client for contrail
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron router-gateway-set heat-router public'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Steps from neutron client for contrail
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron router-interface-add heat-router heat-subnet'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+# Starting prepare runtest
+
+- description: Upload tempest template
+  upload:
+    local_path: {{ config.salt_deploy.templates_dir }}{{ LAB_CONFIG_NAME }}/
+    local_filename: runtest.yml
+    remote_path: /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/
+  node_name: {{ HOSTNAME_CFG01 }}
+  skip_fail: False
+
+- description: Include class with tempest template into cfg node
+  cmd: |
+    sed -i 's/classes\:/classes\:\n- cluster.{{ LAB_CONFIG_NAME }}.infra.runtest/g' /srv/salt/reclass/nodes/_generated/cfg01.{{ DOMAIN_NAME }}.yml;
+    salt '*' saltutil.refresh_pillar;
+    salt '*' saltutil.sync_all;
+    salt 'ctl01*' pkg.install docker.io;
+    salt 'ctl01*' cmd.run 'iptables --policy FORWARD ACCEPT';
+    salt 'cfg01*' state.sls salt.minion && sleep 20;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Enforce keystone client
+  cmd: |
+    salt 'cfg01*' state.sls keystone.client;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Create flavors for tests
+  cmd: |
+    salt 'cfg01*' state.sls nova.client;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Upload cirros image
+  cmd: |
+    salt 'cfg01*' state.sls glance.client;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Generate tempest config
+  cmd: |
+    salt 'cfg01*' state.sls runtest;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Download cirros image for runtest
+  cmd: |
+    wget http://cz8133.bud.mirantis.net:8099/cirros-0.3.5-x86_64-disk.img -O /tmp/TestCirros-0.3.5.img
+  node_name: {{ HOSTNAME_CTL01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Run tempest from new docker image
+  cmd: |
+    OPENSTACK_VERSION=`salt-call --out=newline_values_only pillar.get _param:openstack_version`;
+    docker run -e ARGS="-r test -w 2" -v /tmp/test/tempest.conf:/etc/tempest/tempest.conf -v /tmp/:/tmp/ -v /tmp/test:/root/tempest -v /etc/ssl/certs/:/etc/ssl/certs/ --rm docker-prod-virtual.docker.mirantis.net/mirantis/cicd/ci-tempest:$OPENSTACK_VERSION /bin/bash -c "run-tempest";
+  node_name: {{ HOSTNAME_CTL01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: true
+
+- description: Download xml results
+  download:
+    remote_path: /tmp/test/
+    remote_filename: "report_*.xml"
+    local_path: {{ os_env('PWD') }}
+  node_name: {{ HOSTNAME_CTL01 }}
+  skip_fail: true
diff --git a/tcp_tests/templates/cookied-bm-oc40-queens/runtest.yml b/tcp_tests/templates/cookied-bm-oc40-queens/runtest.yml
new file mode 100644
index 0000000..f0d6d8a
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-oc40-queens/runtest.yml
@@ -0,0 +1,47 @@
+classes:
+- service.runtest.tempest
+- service.runtest.tempest.public_net
+- service.runtest.tempest.services.manila.glance
+parameters:
+  _param:
+    glance_image_cirros_location: http://cz8133.bud.mirantis.net:8099/cirros-0.3.5-x86_64-disk.img
+    glance_image_fedora_location: http://cz8133.bud.mirantis.net:8099/Fedora-Cloud-Base-27-1.6.x86_64.qcow2
+    glance_image_manila_location: http://cz8133.bud.mirantis.net:8099/manila-service-image-master.qcow2
+    openstack_public_neutron_subnet_allocation_end: 192.168.200.220
+    openstack_public_neutron_subnet_allocation_start: 192.168.200.130
+    openstack_public_neutron_subnet_cidr: 192.168.200.0/24
+    openstack_public_neutron_subnet_gateway: 192.168.200.1
+    runtest_tempest_cfg_dir: /tmp/test
+    runtest_tempest_cfg_name: tempest.conf
+    runtest_tempest_public_net: public
+    tempest_test_target: ctl01*
+  neutron:
+    client:
+      enabled: true
+  runtest:
+    enabled: true
+    keystonerc_node: ctl01*
+    tempest:
+      DEFAULT:
+        log_file: tempest.log
+      cfg_dir: ${_param:runtest_tempest_cfg_dir}
+      cfg_name: ${_param:runtest_tempest_cfg_name}
+      compute:
+        min_compute_nodes: 2
+      convert_to_uuid:
+        network:
+          public_network_id: ${_param:runtest_tempest_public_net}
+      enabled: true
+      heat_plugin:
+        build_timeout: '600'
+      put_keystone_rc_enabled: false
+      put_local_image_file_enabled: false
+      share:
+        capability_snapshot_support: true
+        run_driver_assisted_migration_tests: false
+        run_manage_unmanage_snapshot_tests: false
+        run_manage_unmanage_tests: false
+        run_migration_with_preserve_snapshots_tests: false
+        run_quota_tests: true
+        run_replication_tests: false
+        run_snapshot_tests: true
diff --git a/tcp_tests/templates/cookied-bm-oc40-queens/salt-context-cookiecutter-contrail.yaml b/tcp_tests/templates/cookied-bm-oc40-queens/salt-context-cookiecutter-contrail.yaml
new file mode 100644
index 0000000..bfcd153
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-oc40-queens/salt-context-cookiecutter-contrail.yaml
@@ -0,0 +1,257 @@
+default_context:
+  backup_private_key: |-
+    -----BEGIN RSA PRIVATE KEY-----
+    MIIEpAIBAAKCAQEApq5WxkagvkNWO85FtS1ByHDKkNWhmFdpY9D49dZrSwuE9XGQ
+    +WW79F2AGwKki2N2j1iyfpMEKRIEIb/5cbl6fZzTGTndhd7Jxkx6xGdhZkX9VM6N
+    qotaO4ckj7NsfiZKTwp58/YSRkz3Ii1XPpt0NQqZLuNAwus4Bl9e1Wk5dNw+gHN3
+    m4JmAczJbQ81lrQURC7f3d2xjoFkXWXC2FKkMS6AOl1j87ATeeSG9xeHLbOvIyBw
+    7IwP9MFA5vUtHl8DzsdmzWmVRabe2VMtGa1Ya5JTTgK8nXmtYW3dvEQ/DtgzcKPJ
+    2fO31cze9LRpDSS0E6d/cISBgzsPfBJuUCGHTQIDAQABAoIBAQCmFVVVoA6PRt1o
+    HjMLQpsntGvDQXsRJxhWY2WO4CZs0n+baZvBRgOwjHIXd9ypH2SFlSXWRXuByPfh
+    AT72eJB7FYaqviPjPojjVFWH2lMM63RvypkSdGRmqFRf87KJSHIGrDO0SV8QOaSO
+    o4spURDLwVG9jKd9EY/zmZgPIhgkPazzVrFoGr8YnKE6qSJh5HivscNl8D3+36SN
+    5uhuElzBTNGd2iU4elLJIGjahetIalEZqL0Fvi1ZzAWoK0YXDmbI8uG8/epJ5Sy4
+    XyyHc7+0Jvm1JWwXczdDFuy+RlL9r66Ja8V9MauuJyigOKnNOJhE2b5/klEcczhC
+    AHA/Hw4pAoGBANcJ/gdouXgcuq3JNXq5Cb4w9lvZbDwQdEtY3+qdHAVndomoGsDT
+    USKq6ZRZzkAAnjiN2YywAQzqFGevoYig+WNLTPd2TdNdlNHfw9Wc4G2iSFb1pIr2
+    uoJ+TQGv4Ck/7LS2NVnWfqNoeo8Iq+Wvnh+F3twv0UIazGI8Bj/xLxvrAoGBAMZu
+    QErf3vzbY4g50HFVbPNi2Nl63A7/P421pEe4JAT1clwIVMyntRpNdVyHKkkKdDWr
+    98tBOhf71+shgsVPEMkfPyZ2nuiBit7LzZ+EAztG9i3hhm8yIUPXoipo0YCOe+yF
+    r+r03pX97aciXuRMPmMTHH6N1vFaUXHSgVs6Y7OnAoGAP4v1ZO0eug8LX6XxRuX9
+    qhXAB96VrJ5UL5wA980b5cDwd7eUyFzqQittwWhUmfdUynOo0XmFpfJau1VckAq6
+    CAzNnud4Ejk6bFcLAUpNzDhD1mbbDDHjZgK68P+vZ6E7ax/ZXkYTwGh0p2Yxnjuq
+    p7gg5sK+vSE8Ot9wHV9Bw6cCgYEAguPq6PjvgF+/Mfbg9kFhUtKbNCoEyqe4ZmOw
+    79YZfGPjga3FMhJWNfluNxC55eBNc7HyDFMEXRm0/dbnCfvzmJdR8q9AdyIsVnad
+    NmHAN/PBI9al9OdeZf/xaoQl3eUe/Y/Z0OShhtMvVpYnffSFGplarGgnpqDrJGe1
+    CFZlufUCgYBemuy+C6gLwTOzhcTcCo4Ir5ZiKcXAE6ufk8OIdGnMWJcmTxxmIMY6
+    XyKu0oobWpOBXPiipQ6TmDpI+flxWYRHwPFFzPa+jhCtupRuTdORKrklV2UfdIWZ
+    N4e+J2yCu7lyz0upwa3MkFIVQ1ez0o8X9NRvAz243qi64y1+KOMPmQ==
+    -----END RSA PRIVATE KEY-----
+  backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCmrlbGRqC+Q1Y7zkW1LUHIcMqQ1aGYV2lj0Pj11mtLC4T1cZD5Zbv0XYAbAqSLY3aPWLJ+kwQpEgQhv/lxuXp9nNMZOd2F3snGTHrEZ2FmRf1Uzo2qi1o7hySPs2x+JkpPCnnz9hJGTPciLVc+m3Q1Cpku40DC6zgGX17VaTl03D6Ac3ebgmYBzMltDzWWtBRELt/d3bGOgWRdZcLYUqQxLoA6XWPzsBN55Ib3F4cts68jIHDsjA/0wUDm9S0eXwPOx2bNaZVFpt7ZUy0ZrVhrklNOArydea1hbd28RD8O2DNwo8nZ87fVzN70tGkNJLQTp39whIGDOw98Em5QIYdN
+  bmk_enabled: 'False'
+  ceph_enabled: 'False'
+  cicd_control_node01_address: 10.167.8.91
+  cicd_control_node01_hostname: cid01
+  cicd_control_node02_address: 10.167.8.92
+  cicd_control_node02_hostname: cid02
+  cicd_control_node03_address: 10.167.8.93
+  cicd_control_node03_hostname: cid03
+  cicd_control_vip_address: 10.167.8.90
+  cicd_control_vip_hostname: cid
+  cicd_enabled: 'True'
+  cicd_private_key: |-
+    -----BEGIN RSA PRIVATE KEY-----
+    MIIEowIBAAKCAQEAuBC224XQZFyzqC56EyS7yr/rlpRRYsr2vji77faoWQFmgYbZ
+    oeyqqqm8eSN0Cc0wAnxWsQ7H3ZN9uTnyWVrsogs1vx8597iorZAT4Mu6JDbkWlZh
+    IUHo9P9itWJdUWpxjDavqIvjZo+DmOO1mfv9K1asP8COanQEsgHSyuf+XKMBg0ko
+    kEammAUtS9HRxCAJ47QgLPSCFij5ih/MRWY3HWFUFEF3gRdUodWmeJNmW+7JH7T2
+    wId1kn8oRya7eadKxd6wEaCGm5ILXwwVFmFkOGlEeC8wHnbkatd/A53DxzUfOHBi
+    27Gaf83DPxKqDWW0aAh7b49EnFhdkuF3ZyXbYwIDAQABAoIBAFtioQbYpyBNDj2f
+    5af/guUk6Di4pregAWVsEZIR9n9KPLRuWTsVn55f611Rhtke8IkrZnc92WlfQvpl
+    lLdcd0P0wNiFDmi5W7XgZJ4lR+OXBUT8wfibGqgY688WaTJ04K82r3vFCD/xXOrZ
+    k15CR+3ueFKmrY6Yz4P5d8iZ6iXfR47ZYm+wdmx3vmJ+IVfZCRRPAGP25GxqsOs5
+    3qMl9hV7a1MGVVaVPmVzrq0Xzk6IAW2+0p5udGmezn4y6HFPIvOriUVUkni3mNjX
+    dokrETqVbOjkdFkSw28cMBfP/tO3vyfGh5VX24xvRztWtcpAm6Qr5lKEDSvFv13r
+    0z/DxRECgYEA8oZ4+w2cqLJz91fKpWutGZKj4m/HEY6FZfjVflsTT2bKTt+nTtRY
+    qAeKGYIbrjZMAyy4dG+RgW7WORFcRHFyeSrS5Aw51zO+JQ0KzuBv83UqcbqNLcsz
+    BAPHPk/7f30W4wuInqgXrWMTiGePz0hQsvNU6aR7MH4Sd2C0ot4W+00CgYEAwkq+
+    UtugC8ywK+F0xZvjXHi3VJRJZf4WLtRxZGy8CimaritSKpZZRG23Sk0ifDE6+4fD
+    VtxeTfTmeZBictg/fEAPVHzhsNPNyDMA8t7t4ZKmMX9DNYAqVX21s5YQ9encH6KT
+    1q0NRpjvw7QzhfbFfsxeAxHKZFbFlVmROplF+W8CgYAWHVz6x4r5dwxMCZ1Y6DCo
+    nE6FX1vvpedUHRSaqQNhwiXAe3RuI77R054sJUkQ4bKct386XtIN02WFXqfjNdUS
+    Z21DjjnX/cfg6QeLRbvvn0d3h2NIQbctLosEi5aLUYS8v1h93yYJkXc+gPMEG7wA
+    FWAwzebNzTEx4YeXMlk2IQKBgCt8JxTMawm5CkUH9Oa1eTGdIwsfFT5qm/RnP+nG
+    HF/559DLiVxWwiv6kmdi1DEPo6/gNuwd7k1sXpkeo6oolCzu+X9jY+/7t7bzE2dI
+    Vd2CwQebACPdR5xSwnQrRiiD6ux5qrUFjk8as68NieqVzKYQf4oYVUAX26kNnt+K
+    poqpAoGBAINHTGBFVK3XC+fCbu7rhFS8wZAjBmvEDHGnUBp19JREEr3q7a2D84T3
+    17zo0bwxL09QFnOCDDJcXsh8eGbCONV0hJvJU2o7wGol+lRFSd+v6WYZ37bPEyEx
+    l8kv0xXAElriC1RE1CNtvoOn/uxyRs+2OnNgBVxtAGqUWVdpm6CD
+    -----END RSA PRIVATE KEY-----
+  cicd_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC4ELbbhdBkXLOoLnoTJLvKv+uWlFFiyva+OLvt9qhZAWaBhtmh7Kqqqbx5I3QJzTACfFaxDsfdk325OfJZWuyiCzW/Hzn3uKitkBPgy7okNuRaVmEhQej0/2K1Yl1RanGMNq+oi+Nmj4OY47WZ+/0rVqw/wI5qdASyAdLK5/5cowGDSSiQRqaYBS1L0dHEIAnjtCAs9IIWKPmKH8xFZjcdYVQUQXeBF1Sh1aZ4k2Zb7skftPbAh3WSfyhHJrt5p0rF3rARoIabkgtfDBUWYWQ4aUR4LzAeduRq138DncPHNR84cGLbsZp/zcM/EqoNZbRoCHtvj0ScWF2S4XdnJdtj
+  cluster_domain: cookied-bm-oc40-queens.local
+  cluster_name: cookied-bm-oc40-queens
+  opencontrail_version: 4.0
+  linux_repo_contrail_component: oc40
+  compute_bond_mode: active-backup
+  compute_padding_with_zeros: 'True'
+  compute_primary_first_nic: eth1
+  compute_primary_second_nic: eth2
+  context_seed: TFWH0xgUevQkslwhbWVedwwYhBtImHLiGUIExjT9ahxPAUBHh9Kg3QSAIrqTqtvk
+  control_network_netmask: 255.255.255.0
+  control_network_subnet: 10.167.8.0/24
+  control_vlan: '2422'
+  cookiecutter_template_branch: ''
+  cookiecutter_template_credentials: gerrit
+  cookiecutter_template_url: https://gerrit.mcp.mirantis.com/mk/cookiecutter-templates.git
+  deploy_network_gateway: 172.16.49.65
+  deploy_network_netmask: 255.255.255.192
+  deploy_network_subnet: 172.16.49.64/26
+  deployment_type: physical
+  dns_server01: 172.18.176.6
+  dns_server02: 172.18.208.44
+  email_address: sgudz@mirantis.com
+  infra_bond_mode: active-backup
+  infra_deploy_nic: eth0
+  infra_kvm01_control_address: 10.167.8.241
+  infra_kvm01_deploy_address: 172.16.49.67
+  infra_kvm01_hostname: kvm01
+  infra_kvm02_control_address: 10.167.8.242
+  infra_kvm02_deploy_address: 172.16.49.68
+  infra_kvm02_hostname: kvm02
+  infra_kvm03_control_address: 10.167.8.243
+  infra_kvm03_deploy_address: 172.16.49.69
+  infra_kvm03_hostname: kvm03
+  infra_kvm_vip_address: 10.167.8.240
+  infra_primary_first_nic: eth1
+  infra_primary_second_nic: eth2
+  internal_proxy_enabled: 'False'
+  kqueen_custom_mail_enabled: 'False'
+  kqueen_enabled: 'False'
+  kubernetes_enabled: 'False'
+  local_repositories: 'False'
+  maas_deploy_address: 172.16.49.66
+  maas_deploy_cidr: 172.16.49.64/26
+  maas_deploy_gateway: 172.16.49.65
+  maas_deploy_range_end: 172.16.49.119
+  maas_deploy_range_start: 172.16.49.77
+  maas_deploy_vlan: '0'
+  maas_dhcp_enabled: 'True'
+  maas_fabric_name: fabric-51
+  maas_hostname: cfg01
+  maas_manage_deploy_network: 'True'
+  mcp_common_scripts_branch: ''
+  mcp_version: proposed
+  offline_deployment: 'False'
+  opencontrail_analytics_address: 10.167.8.30
+  opencontrail_analytics_hostname: nal
+  opencontrail_analytics_node01_address: 10.167.8.31
+  opencontrail_analytics_node01_hostname: nal01
+  opencontrail_analytics_node02_address: 10.167.8.32
+  opencontrail_analytics_node02_hostname: nal02
+  opencontrail_analytics_node03_address: 10.167.8.33
+  opencontrail_analytics_node03_hostname: nal03
+  opencontrail_compute_iface_mask: '24'
+  opencontrail_control_address: 10.167.8.20
+  opencontrail_control_hostname: ntw
+  opencontrail_control_node01_address: 10.167.8.21
+  opencontrail_control_node01_hostname: ntw01
+  opencontrail_control_node02_address: 10.167.8.22
+  opencontrail_control_node02_hostname: ntw02
+  opencontrail_control_node03_address: 10.167.8.23
+  opencontrail_control_node03_hostname: ntw03
+  opencontrail_enabled: 'True'
+  opencontrail_router01_address: 10.167.8.220
+  opencontrail_router01_hostname: rtr01
+  openldap_enabled: 'False'
+  openssh_groups: ''
+  openstack_benchmark_node01_address: 10.167.8.95
+  openstack_benchmark_node01_hostname: bmk01
+  openstack_cluster_size: compact
+  openstack_compute_count: '2'
+  openstack_compute_rack01_hostname: cmp
+  openstack_compute_rack01_single_subnet: 10.167.8
+  openstack_compute_rack01_tenant_subnet: 192.168.0
+  openstack_compute_single_address_ranges: 10.167.8.101-10.167.8.102
+  openstack_compute_deploy_address_ranges: 172.16.49.73-172.16.49.74
+  openstack_compute_tenant_address_ranges: 192.168.0.101-192.168.0.102
+  openstack_compute_backend_address_ranges: 192.168.0.101-192.168.0.102
+  openstack_compute_node01_hostname: cmp01
+  openstack_compute_node02_hostname: cmp02
+  openstack_compute_node01_address: 10.167.8.101
+  openstack_compute_node02_address: 10.167.8.102
+  openstack_compute_node01_single_address: 10.167.8.101
+  openstack_compute_node02_single_address: 10.167.8.102
+  openstack_compute_node01_deploy_address: 172.16.49.73
+  openstack_compute_node02_deploy_address: 172.16.49.74
+  openstack_control_address: 10.167.8.10
+  openstack_control_hostname: ctl
+  openstack_control_node01_address: 10.167.8.11
+  openstack_control_node01_hostname: ctl01
+  openstack_control_node02_address: 10.167.8.12
+  openstack_control_node02_hostname: ctl02
+  openstack_control_node03_address: 10.167.8.13
+  openstack_control_node03_hostname: ctl03
+  openstack_database_address: 10.167.8.50
+  openstack_database_hostname: dbs
+  openstack_database_node01_address: 10.167.8.51
+  openstack_database_node01_hostname: dbs01
+  openstack_database_node02_address: 10.167.8.52
+  openstack_database_node02_hostname: dbs02
+  openstack_database_node03_address: 10.167.8.53
+  openstack_database_node03_hostname: dbs03
+  openstack_enabled: 'True'
+  openstack_message_queue_address: 10.167.8.40
+  openstack_message_queue_hostname: msg
+  openstack_message_queue_node01_address: 10.167.8.41
+  openstack_message_queue_node01_hostname: msg01
+  openstack_message_queue_node02_address: 10.167.8.42
+  openstack_message_queue_node02_hostname: msg02
+  openstack_message_queue_node03_address: 10.167.8.43
+  openstack_message_queue_node03_hostname: msg03
+  openstack_network_engine: opencontrail
+  openstack_neutron_bgp_vpn: 'False'
+  openstack_neutron_bgp_vpn_driver: bagpipe
+  openstack_nfv_dpdk_enabled: 'False'
+  openstack_nfv_sriov_enabled: 'False'
+  openstack_nova_compute_nfv_req_enabled: 'False'
+  openstack_nova_compute_reserved_host_memory_mb: '900'
+  openstack_proxy_address: 10.167.8.80
+  openstack_proxy_hostname: prx
+  openstack_proxy_node01_address: 10.167.8.81
+  openstack_proxy_node01_hostname: prx01
+  openstack_proxy_node02_address: 10.167.8.82
+  openstack_proxy_node02_hostname: prx02
+  openstack_upgrade_node01_address: 10.167.8.19
+  openstack_version: queens
+  oss_enabled: 'False'
+  oss_node03_address: ${_param:stacklight_monitor_node03_address}
+  oss_webhook_app_id: '24'
+  oss_webhook_login_id: '13'
+  platform: openstack_enabled
+  public_host: ${_param:openstack_proxy_address}
+  publication_method: email
+  reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
+  salt_api_password: BNRhXeGFdgVNx0Ikm2CAMw7eyeHf4grH
+  salt_api_password_hash: $6$jriFnsbZ$eon54Ts/Kn4ywKpexe/W8srpBF64cxr2D8jd0RzTH8zdZVjS3viYt64m1d1VlXenurwpcGLkGzaGmOI0dlOox0
+  salt_master_address: 172.16.49.66
+  salt_master_hostname: cfg01
+  salt_master_management_address: 172.16.49.66
+  shared_reclass_branch: ''
+  shared_reclass_url: https://gerrit.mcp.mirantis.com/salt-models/reclass-system.git
+  stacklight_enabled: 'True'
+  stacklight_log_address: 10.167.8.60
+  stacklight_log_hostname: log
+  stacklight_log_node01_address: 10.167.8.61
+  stacklight_log_node01_hostname: log01
+  stacklight_log_node02_address: 10.167.8.62
+  stacklight_log_node02_hostname: log02
+  stacklight_log_node03_address: 10.167.8.63
+  stacklight_log_node03_hostname: log03
+  stacklight_long_term_storage_type: prometheus
+  stacklight_monitor_address: 10.167.8.70
+  stacklight_monitor_hostname: mon
+  stacklight_monitor_node01_address: 10.167.8.71
+  stacklight_monitor_node01_hostname: mon01
+  stacklight_monitor_node02_address: 10.167.8.72
+  stacklight_monitor_node02_hostname: mon02
+  stacklight_monitor_node03_address: 10.167.8.73
+  stacklight_monitor_node03_hostname: mon03
+  stacklight_telemetry_address: 10.167.8.85
+  stacklight_telemetry_hostname: mtr
+  stacklight_telemetry_node01_address: 10.167.8.86
+  stacklight_telemetry_node01_hostname: mtr01
+  stacklight_telemetry_node02_address: 10.167.8.87
+  stacklight_telemetry_node02_hostname: mtr02
+  stacklight_telemetry_node03_address: 10.167.8.88
+  stacklight_telemetry_node03_hostname: mtr03
+  stacklight_version: '2'
+  static_ips_on_deploy_network_enabled: 'False'
+  tenant_network_gateway: 192.168.0.220
+  tenant_network_netmask: 255.255.255.0
+  tenant_network_subnet: 192.168.0.0/24
+  tenant_vlan: '2423'
+  upstream_proxy_enabled: 'False'
+  use_default_network_scheme: 'True'
+  openldap_domain: cookied-bm-oc40-queens.local
+  openldap_enabled: 'True'
+  openldap_organisation: ${_param:cluster_name}
diff --git a/tcp_tests/templates/cookied-bm-oc40-queens/salt-context-environment.yaml b/tcp_tests/templates/cookied-bm-oc40-queens/salt-context-environment.yaml
new file mode 100644
index 0000000..90d7a3d
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-oc40-queens/salt-context-environment.yaml
@@ -0,0 +1,271 @@
+nodes:

+    # Virtual Control Plane nodes

+    cid01.cookied-bm-oc40-queens.local:

+      reclass_storage_name: cicd_control_node01

+      roles:

+      - cicd_control_leader

+      - linux_system_codename_xenial

+      interfaces:

+        ens3:

+          role: single_ctl

+

+    cid02.cookied-bm-oc40-queens.local:

+      reclass_storage_name: cicd_control_node02

+      roles:

+      - cicd_control_manager

+      - linux_system_codename_xenial

+      interfaces:

+        ens3:

+          role: single_ctl

+

+    cid03.cookied-bm-oc40-queens.local:

+      reclass_storage_name: cicd_control_node03

+      roles:

+      - cicd_control_manager

+      - linux_system_codename_xenial

+      interfaces:

+        ens3:

+          role: single_ctl 

+

+    ctl01.cookied-bm-oc40-queens.local:

+      reclass_storage_name: openstack_control_node01

+      roles:

+      - openstack_control_leader

+      - linux_system_codename_xenial

+      interfaces:

+        ens3:

+          role: single_ctl

+

+    ctl02.cookied-bm-oc40-queens.local:

+      reclass_storage_name: openstack_control_node02

+      roles:

+      - openstack_control

+      - linux_system_codename_xenial

+      interfaces:

+        ens3:

+          role: single_ctl

+

+    ctl03.cookied-bm-oc40-queens.local:

+      reclass_storage_name: openstack_control_node03

+      roles:

+      - openstack_control

+      - linux_system_codename_xenial

+      interfaces:

+        ens3:

+          role: single_ctl

+

+    dbs01.cookied-bm-oc40-queens.local:

+      reclass_storage_name: openstack_database_node01

+      roles:

+      - openstack_database_leader

+      - linux_system_codename_xenial

+      interfaces:

+        ens3:

+          role: single_ctl

+

+    dbs02.cookied-bm-oc40-queens.local:

+      reclass_storage_name: openstack_database_node02

+      roles:

+      - openstack_database

+      - linux_system_codename_xenial

+      interfaces:

+        ens3:

+          role: single_ctl

+

+    dbs03.cookied-bm-oc40-queens.local:

+      reclass_storage_name: openstack_database_node03

+      roles:

+      - openstack_database

+      - linux_system_codename_xenial

+      interfaces:

+        ens3:

+          role: single_ctl

+

+    msg01.cookied-bm-oc40-queens.local:

+      reclass_storage_name: openstack_message_queue_node01

+      roles:

+      - openstack_message_queue

+      - linux_system_codename_xenial

+      interfaces:

+        ens3:

+          role: single_ctl

+

+    msg02.cookied-bm-oc40-queens.local:

+      reclass_storage_name: openstack_message_queue_node02

+      roles:

+      - openstack_message_queue

+      - linux_system_codename_xenial

+      interfaces:

+        ens3:

+          role: single_ctl

+

+    msg03.cookied-bm-oc40-queens.local:

+      reclass_storage_name: openstack_message_queue_node03

+      roles:

+      - openstack_message_queue

+      - linux_system_codename_xenial

+      interfaces:

+        ens3:

+          role: single_ctl

+

+    prx01.cookied-bm-oc40-queens.local:

+      reclass_storage_name: openstack_proxy_node01

+      roles:

+      - openstack_proxy

+      - linux_system_codename_xenial

+      interfaces:

+        ens3:

+          role: single_ctl

+

+    prx02.cookied-bm-oc40-queens.local:

+      reclass_storage_name: openstack_proxy_node02

+      roles:

+      - openstack_proxy

+      - linux_system_codename_xenial

+      interfaces:

+        ens3:

+          role: single_ctl

+

+    mon01.cookied-bm-oc40-queens.local:

+      reclass_storage_name: stacklight_server_node01

+      roles:

+      - stacklightv2_server_leader

+      - linux_system_codename_xenial

+      interfaces:

+        ens3:

+          role: single_ctl

+

+    mon02.cookied-bm-oc40-queens.local:

+      reclass_storage_name: stacklight_server_node02

+      roles:

+      - stacklightv2_server

+      - linux_system_codename_xenial

+      interfaces:

+        ens3:

+          role: single_ctl

+

+    mon03.cookied-bm-oc40-queens.local:

+      reclass_storage_name: stacklight_server_node03

+      roles:

+      - stacklightv2_server

+      - linux_system_codename_xenial

+      interfaces:

+        ens3:

+          role: single_ctl

+

+    nal01.cookied-bm-oc40-queens.local:

+      reclass_storage_name: opencontrail_analytics_node01

+      roles:

+      - opencontrail_analytics

+      - linux_system_codename_xenial

+      interfaces:

+        ens3:

+          role: single_ctl

+

+    nal02.cookied-bm-oc40-queens.local:

+      reclass_storage_name: opencontrail_analytics_node02

+      roles:

+      - opencontrail_analytics

+      - linux_system_codename_xenial

+      interfaces:

+        ens3:

+          role: single_ctl

+

+    nal03.cookied-bm-oc40-queens.local:

+      reclass_storage_name: opencontrail_analytics_node03

+      roles:

+      - opencontrail_analytics

+      - linux_system_codename_xenial

+      interfaces:

+        ens3:

+          role: single_ctl

+

+    ntw01.cookied-bm-oc40-queens.local:

+      reclass_storage_name: opencontrail_control_node01

+      roles:

+      - opencontrail_control

+      - linux_system_codename_xenial

+      interfaces:

+        ens3:

+          role: single_ctl

+

+    ntw02.cookied-bm-oc40-queens.local:

+      reclass_storage_name: opencontrail_control_node02

+      roles:

+      - opencontrail_control

+      - linux_system_codename_xenial

+      interfaces:

+        ens3:

+          role: single_ctl

+

+    ntw03.cookied-bm-oc40-queens.local:

+      reclass_storage_name: opencontrail_control_node03

+      roles:

+      - opencontrail_control

+      - linux_system_codename_xenial

+      interfaces:

+        ens3:

+          role: single_ctl

+

+    mtr01.cookied-bm-oc40-queens.local:

+      reclass_storage_name: stacklight_telemetry_node01

+      roles:

+      - stacklight_telemetry

+      - linux_system_codename_xenial

+      interfaces:

+        ens3:

+          role: single_ctl

+

+    mtr02.cookied-bm-oc40-queens.local:

+      reclass_storage_name: stacklight_telemetry_node02

+      roles:

+      - stacklight_telemetry

+      - linux_system_codename_xenial

+      interfaces:

+        ens3:

+          role: single_ctl

+

+    mtr03.cookied-bm-oc40-queens.local:

+      reclass_storage_name: stacklight_telemetry_node03

+      roles:

+      - stacklight_telemetry

+      - linux_system_codename_xenial

+      interfaces:

+        ens3:

+          role: single_ctl

+

+    log01.cookied-bm-oc40-queens.local:

+      reclass_storage_name: stacklight_log_node01

+      roles:

+      - stacklight_log_leader_v2

+      - linux_system_codename_xenial

+      interfaces:

+        ens3:

+          role: single_ctl

+

+    log02.cookied-bm-oc40-queens.local:

+      reclass_storage_name: stacklight_log_node02

+      roles:

+      - stacklight_log

+      - linux_system_codename_xenial

+      interfaces:

+        ens3:

+          role: single_ctl

+

+    log03.cookied-bm-oc40-queens.local:

+      reclass_storage_name: stacklight_log_node03

+      roles:

+      - stacklight_log

+      - linux_system_codename_xenial

+      interfaces:

+        ens3:

+          role: single_ctl

+

+#    bmk01.cookied-bm-oc40-queens.local:

+#      reclass_storage_name: openstack_benchmark_node01

+#      roles:

+#      - openstack_benchmark

+#      - linux_system_codename_xenial

+#      interfaces:

+#        ens3:

+#          role: single_ctl

diff --git a/tcp_tests/templates/cookied-bm-oc40-queens/salt.yaml b/tcp_tests/templates/cookied-bm-oc40-queens/salt.yaml
new file mode 100644
index 0000000..3853acd
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-oc40-queens/salt.yaml
@@ -0,0 +1,152 @@
+{% from 'cookied-bm-oc40-queens/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-bm-oc40-queens/underlay.yaml' import LAB_CONFIG_NAME with context %}
+{% from 'cookied-bm-oc40-queens/underlay.yaml' import DOMAIN_NAME with context %}
+
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
+# Other salt model repository parameters see in shared-salt.yaml
+
+# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
+{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME','physical-cookied-bm-oc40-queens') %}
+# Path to the context files used to render Cluster and Environment models
+{%- set CLUSTER_CONTEXT_NAME = os_env('CLUSTER_CONTEXT_NAME', 'salt-context-cookiecutter-contrail.yaml') %}
+{%- set ENVIRONMENT_CONTEXT_NAMES = ['salt-context-environment.yaml','lab04-physical-inventory.yaml'] %}
+{%- set CONTROL_VLAN = os_env('CONTROL_VLAN', '2422') %}
+{%- set TENANT_VLAN = os_env('TENANT_VLAN', '2423') %}
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+
+{{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
+
+{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL(CONTROL_VLAN=CONTROL_VLAN, TENANT_VLAN=TENANT_VLAN) }}
+
+{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
+
+{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='\*') }}
+
+{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
+
+{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
+
+- description: "Change path to internal storage for salt.control images"
+  cmd: |
+    set -e;
+    . /root/venv-reclass-tools/bin/activate;
+    reclass-tools add-key parameters._param.salt_control_xenial_image 'https://apt.mcp.mirantis.net/images/ubuntu-16-04-x64-mcp{{ SHARED.REPOSITORY_SUITE }}.qcow2' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/init.yml;
+    reclass-tools add-key parameters._param.salt_control_trusty_image 'https://apt.mcp.mirantis.net/images/ubuntu-14-04-x64-mcp{{ SHARED.REPOSITORY_SUITE }}.qcow2' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/init.yml;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Temporary workaround for removing cinder-volume from CTL nodes
+  cmd: |
+    sed -i 's/\-\ system\.cinder\.volume\.single//g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control.yml;
+    sed -i 's/\-\ system\.cinder\.volume\.notification\.messagingv2//g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control.yml;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: true
+
+- description: Temporary WR for correct bridge name according to envoronment templates
+  cmd: |
+    sed -i 's/br\-ctl/br\_ctl/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
+    sed -i 's/br\-mgm/br\_mgm/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+{{ SHARED.MACRO_GENERATE_INVENTORY() }}
+
+{{ SHARED.MACRO_NETWORKING_WORKAROUNDS() }}
+
+- description: Update minion information
+  cmd: |
+    salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.sync_all &&
+    salt --hard-crash --state-output=mixed --state-verbose=False '*' mine.update &&
+    salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.refresh_pillar && sleep 10
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Rerun openssh after env model is generated
+  cmd: |
+    salt-call state.sls openssh
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Execute linux.network.host one more time after salt.minion to apply dynamically registered hosts on the cluster nodes
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@linux:system' state.sls linux.network.host
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 10}
+  skip_fail: false
+
+{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
+
+########################################
+# Spin up Control Plane VMs on KVM nodes
+########################################
+
+- description: Execute 'libvirt' states to create necessary libvirt networks
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'kvm*' state.sls libvirt
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 10}
+  skip_fail: false
+
+- description: Create VMs for control plane
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'kvm*' state.sls salt.control
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 3, delay: 10}
+  skip_fail: false
+
+- description: '*Workaround* for waiting the control-plane VMs in the salt-key (instead of sleep)'
+  cmd: |
+    salt-key -l acc| sort > /tmp/current_keys.txt &&
+    salt 'kvm*' cmd.run 'virsh list --name' | grep -v 'kvm'|sort|xargs -I {} fgrep {} /tmp/current_keys.txt
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 20, delay: 30}
+  skip_fail: false
+
+#########################################
+# Configure all running salt minion nodes
+#########################################
+
+- description: Hack resolv.conf on VCP nodes for internal services access
+  cmd: |
+    salt --hard-crash --state-output=mixed --state-verbose=False -C '* and not cfg*' cmd.run "echo 'nameserver 172.18.208.44' > /etc/resolv.conf;"
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Refresh pillars on all minions
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.refresh_pillar
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Sync all salt resources
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.sync_all && sleep 5
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Show  reclass-salt --top for generated nodes
+  cmd: reclass-salt --top -u /srv/salt/reclass/nodes/_generated/
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
+
+{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
+
+{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
+
+- description: "Lab04 workaround: Give each node root acces with key from cfg01"
+  cmd: |
+    set -e;
+    set -x;
+    key=$(ssh-keygen -y -f /root/.ssh/id_rsa);
+    salt '*' cmd.run "echo $key >> /root/.ssh/authorized_keys";
+    salt '*' cmd.run "service sshd restart"
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: true
diff --git a/tcp_tests/templates/cookied-bm-oc40-queens/sl.yaml b/tcp_tests/templates/cookied-bm-oc40-queens/sl.yaml
new file mode 100644
index 0000000..0de2e76
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-oc40-queens/sl.yaml
@@ -0,0 +1,16 @@
+{% from 'cookied-bm-oc40-queens/underlay.yaml' import HOSTNAME_CFG01 with context %}
+
+{% import 'shared-sl.yaml' as SHARED_SL with context %}
+{% import 'shared-sl-tests.yaml' as SHARED_SL_TESTS with context %}
+
+{{  SHARED_SL.MACRO_INSTALL_DOCKER_SWARM() }}
+{{  SHARED_SL.MACRO_INSTALL_GLUSTERFS_CLIENT() }}
+{{  SHARED_SL.MACRO_INSTALL_MONGODB() }}
+{{  SHARED_SL.MACRO_INSTALL_MONGODB_CLUSTER() }}
+{{  SHARED_SL.MACRO_INSTALL_TELEGRAF_AND_PROMETHEUS() }}
+{{  SHARED_SL.MACRO_INSTALL_ELASTICSEARCH_AND_KIBANA() }}
+{{  SHARED_SL.MACRO_INSTALL_LOG_COLLECTION() }}
+{{  SHARED_SL.MACRO_INSTALL_CEILOMETER_COLLECTOR() }}
+{{  SHARED_SL.MACRO_CONFIGURE_SERVICES() }}
+{{  SHARED_SL_TESTS.MACRO_CLONE_SL_TESTS() }}
+{{  SHARED_SL_TESTS.MACRO_CONFIGURE_TESTS() }}
diff --git a/tcp_tests/templates/cookied-bm-oc40-queens/underlay--meta-data.yaml b/tcp_tests/templates/cookied-bm-oc40-queens/underlay--meta-data.yaml
new file mode 100644
index 0000000..a594a53
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-oc40-queens/underlay--meta-data.yaml
@@ -0,0 +1,4 @@
+| # All the data below will be stored as a string object

+  instance-id: iid-local1

+  hostname: {hostname}

+  local-hostname: {hostname}

diff --git a/tcp_tests/templates/cookied-bm-oc40-queens/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-bm-oc40-queens/underlay--user-data-cfg01.yaml
new file mode 100644
index 0000000..6c9e48f
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-oc40-queens/underlay--user-data-cfg01.yaml
@@ -0,0 +1,102 @@
+| # All the data below will be stored as a string object

+  #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html

+

+  ssh_pwauth: True

+  users:

+   - name: root

+     sudo: ALL=(ALL) NOPASSWD:ALL

+     shell: /bin/bash

+     ssh_authorized_keys:

+     {% for key in config.underlay.ssh_keys %}

+      - ssh-rsa {{ key['public'] }}

+     {% endfor %}

+

+  disable_root: false

+  chpasswd:

+   list: |

+    root:r00tme

+   expire: False

+

+  bootcmd:

+   #   # Block access to SSH while node is preparing

+   #   - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP

+   # Enable root access

+   - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config

+   - service sshd restart

+  output:

+    all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'

+

+  runcmd:

+   # Configure dhclient

+   - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base

+   - sudo resolvconf -u

+

+   # Enable grub menu using updated config below

+   - update-grub

+

+   # Prepare network connection

+   - sudo ifdown ens3

+   - sudo ip r d default || true  # remove existing default route to get it from dhcp
+   - sudo ifup ens3

+   #- sudo route add default gw {gateway} {interface_name}

+

+   # Create swap

+   - fallocate -l 4G /swapfile

+   - chmod 600 /swapfile

+   - mkswap /swapfile

+   - swapon /swapfile

+   - echo "/swapfile   none    swap    defaults    0   0" >> /etc/fstab

+

+   ############## TCP Cloud cfg01 node ##################

+   #- sleep 120

+   #   - echo "Preparing base OS"

+

+   - echo "nameserver 172.18.208.44" > /etc/resolv.conf;

+   #   - which wget >/dev/null || (apt-get update; apt-get install -y wget);

+

+   # Configure Ubuntu mirrors
+   #   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
+   #   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
+   #   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
+
+   #   - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;

+   #   - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;

+   #   - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;

+   #   - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;

+

+   #   - apt-get clean

+   #   - apt-get update

+

+   # Install common packages

+   #   - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc

+

+   # Install salt-minion and stop it until it is configured

+   #   - eatmydata apt-get install -y salt-minion && service salt-minion stop

+

+   ########################################################

+   # Node is ready, allow SSH access

+   #   - echo "Allow SSH access ..."

+   #   - sudo iptables -D INPUT -p tcp --dport 22 -j DROP

+   ########################################################

+

+  write_files:

+   - path: /etc/default/grub.d/97-enable-grub-menu.cfg

+     content: |

+         GRUB_RECORDFAIL_TIMEOUT=30

+         GRUB_TIMEOUT=3

+         GRUB_TIMEOUT_STYLE=menu

+

+   - path: /etc/network/interfaces

+     content: |

+          auto ens3

+          iface ens3 inet dhcp

+

+   - path: /root/.ssh/config

+     owner: root:root

+     permissions: '0600'

+     content: |

+          Host *

+            ServerAliveInterval 300

+            ServerAliveCountMax 10

+            StrictHostKeyChecking no

+            UserKnownHostsFile /dev/null

diff --git a/tcp_tests/templates/cookied-bm-oc40-queens/underlay--user-data1604-hwe.yaml b/tcp_tests/templates/cookied-bm-oc40-queens/underlay--user-data1604-hwe.yaml
new file mode 100644
index 0000000..106c3d5
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-oc40-queens/underlay--user-data1604-hwe.yaml
@@ -0,0 +1,99 @@
+| # All the data below will be stored as a string object

+  #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html

+

+  ssh_pwauth: True

+  users:

+   - name: root

+     sudo: ALL=(ALL) NOPASSWD:ALL

+     shell: /bin/bash

+     ssh_authorized_keys:

+     {% for key in config.underlay.ssh_keys %}

+      - ssh-rsa {{ key['public'] }}

+     {% endfor %}

+

+  disable_root: false

+  chpasswd:

+   list: |

+    root:r00tme

+   expire: False

+

+  bootcmd:

+   #   # Block access to SSH while node is preparing

+   #   - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP

+   # Enable root access

+   - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config

+   - service sshd restart

+  output:

+    all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'

+

+  runcmd:

+   - export TERM=linux

+   - export LANG=C

+   # Configure dhclient

+   - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base

+   - sudo resolvconf -u

+

+   # Enable grub menu using updated config below

+   - update-grub

+

+   # Prepare network connection

+   - sudo ifup {interface_name}

+   #- sudo route add default gw {gateway} {interface_name}

+

+   # Create swap

+   - fallocate -l 4G /swapfile

+   - chmod 600 /swapfile

+   - mkswap /swapfile

+   - swapon /swapfile

+   - echo "/swapfile   none    swap    defaults   0   0" >> /etc/fstab

+

+

+   ############## TCP Cloud cfg01 node ##################

+   #- sleep 120

+   #   - echo "Preparing base OS"

+   - echo "nameserver 172.18.208.44" > /etc/resolv.conf;

+   #   - which wget >/dev/null || (apt-get update; apt-get install -y wget)

+

+   # Configure Ubuntu mirrors
+   #   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
+   #   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
+   #   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
+
+   #   - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;

+   #   - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;

+   #   - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;

+   #   - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;

+

+   #   - apt-get clean

+   #   - eatmydata apt-get update && apt-get -y upgrade

+

+   # Install common packages

+   #   - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc

+

+   # Install salt-minion and stop it until it is configured

+   #   - eatmydata apt-get install -y salt-minion && service salt-minion stop

+

+   # Install latest kernel

+   #   - eatmydata apt-get install -y {{ os_env('LINUX_KERNEL_HWE_PACKAGE_NAME', 'linux-image-extra-4.10.0-42-generic') }}
+

+   ########################################################

+   # Node is ready, allow SSH access

+   #- echo "Allow SSH access ..."

+   #- sudo iptables -D INPUT -p tcp --dport 22 -j DROP

+   #   - reboot

+   ########################################################

+

+  write_files:

+   - path: /etc/default/grub.d/97-enable-grub-menu.cfg

+     content: |

+         GRUB_RECORDFAIL_TIMEOUT=30

+         GRUB_TIMEOUT=3

+         GRUB_TIMEOUT_STYLE=menu

+

+   - path: /etc/network/interfaces

+     content: |

+          # The loopback network interface

+          auto lo

+          iface lo inet loopback

+          auto {interface_name}

+          iface {interface_name} inet dhcp

diff --git a/tcp_tests/templates/cookied-bm-oc40-queens/underlay--user-data1604.yaml b/tcp_tests/templates/cookied-bm-oc40-queens/underlay--user-data1604.yaml
new file mode 100644
index 0000000..915981e
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-oc40-queens/underlay--user-data1604.yaml
@@ -0,0 +1,95 @@
+| # All the data below will be stored as a string object

+  #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html

+

+  ssh_pwauth: True

+  users:

+   - name: root

+     sudo: ALL=(ALL) NOPASSWD:ALL

+     shell: /bin/bash

+     ssh_authorized_keys:

+     {% for key in config.underlay.ssh_keys %}

+      - ssh-rsa {{ key['public'] }}

+     {% endfor %}

+

+  disable_root: false

+  chpasswd:

+   list: |

+    root:r00tme

+   expire: False

+

+  bootcmd:

+   # Block access to SSH while node is preparing

+   #   - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP

+   # Enable root access

+   - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config

+   - service sshd restart

+  output:

+    all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'

+

+  runcmd:

+   - export TERM=linux

+   - export LANG=C

+   # Configure dhclient

+   - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base

+   - sudo resolvconf -u

+

+   # Enable grub menu using updated config below

+   - update-grub

+

+   # Prepare network connection

+   - sudo ifup {interface_name}

+   #- sudo route add default gw {gateway} {interface_name}

+

+   # Create swap

+   - fallocate -l 4G /swapfile

+   - chmod 600 /swapfile

+   - mkswap /swapfile

+   - swapon /swapfile

+   - echo "/swapfile   none    swap    defaults   0   0" >> /etc/fstab

+

+

+   ############## TCP Cloud cfg01 node ##################

+   #- sleep 120

+   #   - echo "Preparing base OS"

+   - echo "nameserver 172.18.208.44" > /etc/resolv.conf;

+   #   - which wget >/dev/null || (apt-get update; apt-get install -y wget)

+

+   # Configure Ubuntu mirrors
+   #   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
+   #   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
+   #   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
+
+   #   - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;

+   #   - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;

+   #   - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;

+   #   - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;

+

+   #   - apt-get clean

+   #   - eatmydata apt-get update && apt-get -y upgrade

+

+   # Install common packages

+   #   - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc

+

+   # Install salt-minion and stop it until it is configured

+   #   - eatmydata apt-get install -y salt-minion && service salt-minion stop

+

+   ########################################################

+   # Node is ready, allow SSH access

+   #   - echo "Allow SSH access ..."

+   #   - sudo iptables -D INPUT -p tcp --dport 22 -j DROP

+   ########################################################

+

+  write_files:

+   - path: /etc/default/grub.d/97-enable-grub-menu.cfg

+     content: |

+         GRUB_RECORDFAIL_TIMEOUT=30

+         GRUB_TIMEOUT=3

+         GRUB_TIMEOUT_STYLE=menu

+

+   - path: /etc/network/interfaces

+     content: |

+          # The loopback network interface

+          auto lo

+          iface lo inet loopback

+          auto {interface_name}

+          iface {interface_name} inet dhcp

diff --git a/tcp_tests/templates/cookied-bm-oc40-queens/underlay.yaml b/tcp_tests/templates/cookied-bm-oc40-queens/underlay.yaml
new file mode 100644
index 0000000..e84e22d
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-oc40-queens/underlay.yaml
@@ -0,0 +1,574 @@
+# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
+{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'proposed') %}
+
+#{% set DOMAIN_NAME = os_env('LAB_CONFIG_NAME', 'physical_mcp11_ovs_dpdk') + '.local' %}
+{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-bm-oc40-queens') %}
+{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME + '.local') %}
+{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_KVM01 = os_env('HOSTNAME_KVM01', 'kvm01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_KVM02 = os_env('HOSTNAME_KVM02', 'kvm02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_KVM03 = os_env('HOSTNAME_KVM03', 'kvm03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP001 = os_env('HOSTNAME_CMP001', 'cmp001.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP002 = os_env('HOSTNAME_CMP002', 'cmp002.' + DOMAIN_NAME) %}
+#{% set HOSTNAME_CMP003 = os_env('HOSTNAME_CMP003', 'cmp003.' + DOMAIN_NAME) %}
+#{% set HOSTNAME_KVM04 = os_env('HOSTNAME_KVM04', 'kvm04.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
+
+{% set ETH1_IP_ADDRESS_CFG01 = os_env('ETH1_IP_ADDRESS_CFG01', '172.16.49.66') %}
+{% set ETH2_IP_ADDRESS_CFG01 = os_env('ETH2_IP_ADDRESS_CFG01', '10.167.8.99') %}
+{% set ETH0_IP_ADDRESS_KVM01 = os_env('ETH0_IP_ADDRESS_KVM01', '172.16.49.67') %}
+{% set ETH0_IP_ADDRESS_KVM02 = os_env('ETH0_IP_ADDRESS_KVM02', '172.16.49.68') %}
+{% set ETH0_IP_ADDRESS_KVM03 = os_env('ETH0_IP_ADDRESS_KVM03', '172.16.49.69') %}
+{% set ETH0_IP_ADDRESS_CMP001 = os_env('ETH0_IP_ADDRESS_CMP001', '172.16.49.73') %}
+{% set ETH0_IP_ADDRESS_CMP002 = os_env('ETH0_IP_ADDRESS_CMP002', '172.16.49.74') %}
+#{% set ETH0_IP_ADDRESS_CMP003 = os_env('ETH0_IP_ADDRESS_CMP003', '172.16.49.121') %}
+#{% set ETH0_IP_ADDRESS_KVM04 = os_env('ETH0_IP_ADDRESS_KVM04', '172.16.49.122') %}
+# {% set ETH0_IP_ADDRESS_GTW02 = os_env('ETH0_IP_ADDRESS_GTW02', '172.16.49.4') %}
+
+{% import 'cookied-bm-oc40-queens/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
+{% import 'cookied-bm-oc40-queens/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
+{% import 'cookied-bm-oc40-queens/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA with context %}
+{% import 'cookied-bm-oc40-queens/underlay--user-data1604-hwe.yaml' as CLOUDINIT_USER_DATA_HWE with context %}
+
+---
+aliases:
+ - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
+ - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
+ - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
+ - &cloudinit_user_data {{ CLOUDINIT_USER_DATA }}
+ - &cloudinit_user_data_hwe {{ CLOUDINIT_USER_DATA_HWE }}
+
+
+template:
+  devops_settings:
+    env_name: {{ os_env('ENV_NAME', 'cookied-bm-oc4_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
+
+    address_pools:
+      admin-pool01:
+        net: {{ os_env('ADMIN_ADDRESS_POOL01', '172.16.49.64/26:26') }}
+        params:
+          ip_reserved:
+            gateway: +62
+            l2_network_device: +61
+            default_{{ HOSTNAME_CFG01 }}: {{ ETH1_IP_ADDRESS_CFG01 }}
+            default_{{ HOSTNAME_KVM01 }}: {{ ETH0_IP_ADDRESS_KVM01 }}
+            default_{{ HOSTNAME_KVM02 }}: {{ ETH0_IP_ADDRESS_KVM02 }}
+            default_{{ HOSTNAME_KVM03 }}: {{ ETH0_IP_ADDRESS_KVM03 }}
+            default_{{ HOSTNAME_CMP001 }}: {{ ETH0_IP_ADDRESS_CMP001 }}
+            default_{{ HOSTNAME_CMP002 }}: {{ ETH0_IP_ADDRESS_CMP002 }}
+            #default_{{ HOSTNAME_CMP003 }}: {{ ETH0_IP_ADDRESS_CMP003 }}
+            #default_{{ HOSTNAME_KVM04 }}: {{ ETH0_IP_ADDRESS_KVM04 }}
+            #default_{{ HOSTNAME_GTW02 }}: {{ ETH0_IP_ADDRESS_GTW02 }}
+            virtual_{{ HOSTNAME_KVM01 }}: {{ ETH0_IP_ADDRESS_KVM01 }}
+            virtual_{{ HOSTNAME_KVM02 }}: {{ ETH0_IP_ADDRESS_KVM02 }}
+            virtual_{{ HOSTNAME_KVM03 }}: {{ ETH0_IP_ADDRESS_KVM03 }}
+            virtual_{{ HOSTNAME_CMP001 }}: {{ ETH0_IP_ADDRESS_CMP001 }}
+            virtual_{{ HOSTNAME_CMP002 }}: {{ ETH0_IP_ADDRESS_CMP002 }}
+            #virtual_{{ HOSTNAME_CMP003 }}: {{ ETH0_IP_ADDRESS_CMP003 }}
+            #virtual_{{ HOSTNAME_KVM04 }}: {{ ETH0_IP_ADDRESS_KVM04 }}
+            # virtual_{{ HOSTNAME_GTW02 }}: {{ ETH0_IP_ADDRESS_GTW02 }}
+          #ip_ranges:
+          #    dhcp: [+2, -4]
+      private-pool01:
+        net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.167.8.0/24:24') }}
+        params:
+          ip_reserved:
+            virtual_{{ HOSTNAME_CFG01 }}: {{ ETH2_IP_ADDRESS_CFG01 }}
+            gateway: +1
+            l2_network_device: +1
+
+      tenant-pool01:
+        net: {{ os_env('TENANT_ADDRESS_POOL01', '192.168.5.0/24:24') }}
+        params:
+          ip_reserved:
+            gateway: +1
+            l2_network_device: +1
+
+      external-pool01:
+        net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '192.168.200.0/24:24') }}
+        params:
+          ip_reserved:
+            gateway: +1
+            l2_network_device: -2
+
+    groups:
+
+      - name: virtual
+        driver:
+          name: devops.driver.libvirt
+          params:
+            connection_string: !os_env CONNECTION_STRING, qemu:///system
+            storage_pool_name: !os_env STORAGE_POOL_NAME, default
+            stp: False
+            hpet: False
+            enable_acpi: true
+            use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
+
+        network_pools:
+          admin: admin-pool01
+
+        l2_network_devices:
+          # Ironic management interface
+          admin:
+            address_pool: admin-pool01
+            dhcp: false
+            parent_iface:
+              phys_dev: !os_env IRONIC_LAB_PXE_IFACE_0
+          private:
+            parent_iface:
+              phys_dev: !os_env CONTROL_IFACE
+
+        group_volumes:
+         - name: cloudimage1604    # This name is used for 'backing_store' option for node volumes.
+           source_image: !os_env IMAGE_PATH1604  # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
+           format: qcow2
+         - name: cfg01_day01_image               # Pre-configured day01 image
+           source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01', os_env('IMAGE_PATH1604')) }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
+           format: qcow2
+
+        nodes:
+          - name: {{ HOSTNAME_CFG01 }}
+            role: salt_master
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 4
+              memory: !os_env SLAVE_NODE_MEMORY, 8192
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: cfg01_day01_image
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_cfg01
+
+              interfaces:
+                - label: ens3
+                  l2_network_device: admin
+                  interface_model: *interface_model
+                  mac_address: !os_env ETH1_MAC_ADDRESS_CFG01
+                - label: ens4
+                  l2_network_device: private
+                  interface_model: *interface_model
+              network_config:
+                ens3:
+                  networks:
+                    - admin
+                ens4:
+                  networks:
+                    - private
+
+      - name: default
+        driver:
+          name: devops_driver_ironic
+          params:
+            os_auth_token: fake-token
+            ironic_url: !os_env IRONIC_URL  # URL that will be used by fuel-devops
+                                            # to access Ironic API
+            # Agent URL that is accessible from deploying node when nodes
+            # are bootstrapped with PXE. Usually PXE/provision network address is used.
+            agent_kernel_url: !os_env IRONIC_AGENT_KERNEL_URL
+            agent_ramdisk_url: !os_env IRONIC_AGENT_RAMDISK_URL
+
+        network_pools:
+          admin: admin-pool01
+
+        nodes:
+
+        #  - name: {{ HOSTNAME_CFG01 }}
+        #    role: salt_master
+        #    params:
+        #      ipmi_user: !os_env IPMI_USER
+        #      ipmi_password: !os_env IPMI_PASSWORD
+        #      ipmi_previlegies: OPERATOR
+        #      ipmi_host: !os_env IPMI_HOST_CFG01  # hostname or IP address
+        #      ipmi_lan_interface: lanplus
+        #      ipmi_port: 623
+
+        #      root_volume_name: system     # see 'volumes' below
+        #      cloud_init_volume_name: iso  # see 'volumes' below
+        #      cloud_init_iface_up: enp3s0f1  # see 'interfaces' below.
+        #      volumes:
+        #        - name: system
+        #          capacity: !os_env NODE_VOLUME_SIZE, 200
+
+        #          # The same as for agent URL, here is an URL to the image that should be
+        #          # used for deploy the node. It should also be accessible from deploying
+        #          # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
+        #          source_image: !os_env IRONIC_SOURCE_IMAGE_URL
+        #          source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
+
+        #        - name: iso  # Volume with name 'iso' will be used
+        #                     # for store image with cloud-init metadata.
+
+        #          cloudinit_meta_data: *cloudinit_meta_data
+        #          cloudinit_user_data: *cloudinit_user_data_cfg01
+
+        #      interfaces:
+        #        - label: enp3s0f0  # Infra interface
+        #          mac_address: !os_env ETH0_MAC_ADDRESS_CFG01
+        #        - label: enp3s0f1
+        #          l2_network_device: admin
+        #          mac_address: !os_env ETH1_MAC_ADDRESS_CFG01
+
+        #      network_config:
+        #        enp3s0f0:
+        #          networks:
+        #           - infra
+        #        enp3s0f1:
+        #          networks:
+        #           - admin
+
+          - name: {{ HOSTNAME_KVM01 }}
+            role: salt_minion
+            params:
+              ipmi_user: !os_env IPMI_USER
+              ipmi_password: !os_env IPMI_PASSWORD
+              ipmi_previlegies: OPERATOR
+              ipmi_host: !os_env IPMI_HOST_KVM01  # hostname or IP address
+              ipmi_lan_interface: lanplus
+              ipmi_port: 623
+
+              root_volume_name: system     # see 'volumes' below
+              cloud_init_volume_name: iso  # see 'volumes' below
+              cloud_init_iface_up: enp9s0f0  # see 'interfaces' below.
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 200
+
+                  # The same as for agent URL, here is an URL to the image that should be
+                  # used for deploy the node. It should also be accessible from deploying
+                  # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
+                  source_image: !os_env IRONIC_SOURCE_IMAGE_URL
+                  source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
+
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data
+
+              interfaces:
+                - label: enp9s0f0
+                  l2_network_device: admin
+                  mac_address: !os_env ETH0_MAC_ADDRESS_KVM01
+                - label: enp9s0f1
+                  mac_address: !os_env ETH1_MAC_ADDRESS_KVM01
+
+              network_config:
+                enp9s0f0:
+                  networks:
+                   - admin
+                bond0:
+                  networks:
+                   - control
+                  aggregation: active-backup
+                  parents:
+                   - enp9s0f1
+
+          - name: {{ HOSTNAME_KVM02 }}
+            role: salt_minion
+            params:
+              ipmi_user: !os_env IPMI_USER
+              ipmi_password: !os_env IPMI_PASSWORD
+              ipmi_previlegies: OPERATOR
+              ipmi_host: !os_env IPMI_HOST_KVM02  # hostname or IP address
+              ipmi_lan_interface: lanplus
+              ipmi_port: 623
+
+              root_volume_name: system     # see 'volumes' below
+              cloud_init_volume_name: iso  # see 'volumes' below
+              cloud_init_iface_up: enp9s0f0  # see 'interfaces' below.
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 200
+
+                  # The same as for agent URL, here is an URL to the image that should be
+                  # used for deploy the node. It should also be accessible from deploying
+                  # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
+                  source_image: !os_env IRONIC_SOURCE_IMAGE_URL
+                  source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
+
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data
+
+              interfaces:
+                - label: enp9s0f0
+                  l2_network_device: admin
+                  mac_address: !os_env ETH0_MAC_ADDRESS_KVM02
+                - label: enp9s0f1
+                  mac_address: !os_env ETH1_MAC_ADDRESS_KVM02
+
+              network_config:
+                enp9s0f0:
+                  networks:
+                   - admin
+                bond0:
+                  networks:
+                   - control
+                  aggregation: active-backup
+                  parents:
+                   - enp9s0f1
+
+          - name: {{ HOSTNAME_KVM03 }}
+            role: salt_minion
+            params:
+              ipmi_user: !os_env IPMI_USER
+              ipmi_password: !os_env IPMI_PASSWORD
+              ipmi_previlegies: OPERATOR
+              ipmi_host: !os_env IPMI_HOST_KVM03  # hostname or IP address
+              ipmi_lan_interface: lanplus
+              ipmi_port: 623
+
+              root_volume_name: system     # see 'volumes' below
+              cloud_init_volume_name: iso  # see 'volumes' below
+              # cloud_init_iface_up: eno1  # see 'interfaces' below.
+              cloud_init_iface_up: enp9s0f0  # see 'interfaces' below.
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 200
+
+                  # The same as for agent URL, here is an URL to the image that should be
+                  # used for deploy the node. It should also be accessible from deploying
+                  # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
+                  source_image: !os_env IRONIC_SOURCE_IMAGE_URL
+                  source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
+
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data
+
+              interfaces:
+                # - label: eno1
+                - label: enp9s0f0
+                  l2_network_device: admin
+                  mac_address: !os_env ETH0_MAC_ADDRESS_KVM03
+                # - label: eno2
+                - label: enp9s0f1
+                  mac_address: !os_env ETH1_MAC_ADDRESS_KVM03
+
+              network_config:
+                # eno1:
+                enp9s0f0:
+                  networks:
+                   - admin
+                bond0:
+                  networks:
+                   - control
+                  aggregation: active-backup
+                  parents:
+                   - enp9s0f1
+
+                     #          - name: {{ HOSTNAME_KVM04 }}
+                     #            role: salt_minion
+                     #            params:
+                     #              ipmi_user: !os_env IPMI_USER
+                     #              ipmi_password: !os_env IPMI_PASSWORD
+                     #              ipmi_previlegies: OPERATOR
+                     #              ipmi_host: !os_env IPMI_HOST_KVM04  # hostname or IP address
+                     #              ipmi_lan_interface: lanplus
+                     #              ipmi_port: 623
+                     #
+                     #              root_volume_name: system     # see 'volumes' below
+                     #              cloud_init_volume_name: iso  # see 'volumes' below
+                     #              # cloud_init_iface_up: eno1  # see 'interfaces' below.
+                     #              cloud_init_iface_up: enp2s0f0  # see 'interfaces' below.
+                     #              volumes:
+                     #                - name: system
+                     #                  capacity: !os_env NODE_VOLUME_SIZE, 200
+                     #
+                     #                  # The same as for agent URL, here is an URL to the image that should be
+                     #                  # used for deploy the node. It should also be accessible from deploying
+                     #                  # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
+                     #                  source_image: !os_env IRONIC_SOURCE_IMAGE_URL
+                     #                  source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
+                     #
+                     #                - name: iso  # Volume with name 'iso' will be used
+                     #                             # for store image with cloud-init metadata.
+                     #
+                     #                  cloudinit_meta_data: *cloudinit_meta_data
+                     #                  cloudinit_user_data: *cloudinit_user_data
+                     #
+                     #              interfaces:
+                     #                # - label: eno1
+                     #                - label: enp2s0f0
+                     #                  l2_network_device: admin
+                     #                  mac_address: !os_env ETH0_MAC_ADDRESS_KVM04
+                     #                # - label: eno2
+                     #                - label: enp2s0f1
+                     #                  mac_address: !os_env ETH1_MAC_ADDRESS_KVM04
+                     #
+                     #              network_config:
+                     #                # eno1:
+                     #                enp2s0f0:
+                     #                  networks:
+                     #                   - admin
+                     #                bond0:
+                     #                  networks:
+                     #                   - control
+                     #                  aggregation: active-backup
+                     #                  parents:
+                     #                   - enp2s0f1
+                     #
+          - name: {{ HOSTNAME_CMP001 }}
+            role: salt_minion
+            params:
+              ipmi_user: !os_env IPMI_USER
+              ipmi_password: !os_env IPMI_PASSWORD
+              ipmi_previlegies: OPERATOR
+              ipmi_host: !os_env IPMI_HOST_CMP001  # hostname or IP address
+              ipmi_lan_interface: lanplus
+              ipmi_port: 623
+
+              root_volume_name: system     # see 'volumes' below
+              cloud_init_volume_name: iso  # see 'volumes' below
+              # cloud_init_iface_up: enp3s0f0  # see 'interfaces' below.
+              cloud_init_iface_up: enp2s0f1  # see 'interfaces' below.
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 200
+
+                  # The same as for agent URL, here is an URL to the image that should be
+                  # used for deploy the node. It should also be accessible from deploying
+                  # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
+                  source_image: !os_env IRONIC_SOURCE_IMAGE_URL
+                  source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
+
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_hwe
+
+              interfaces:
+                - label: enp2s0f0
+                  mac_address: !os_env ETH0_MAC_ADDRESS_CMP001
+                - label: enp2s0f1
+                  l2_network_device: admin
+                  mac_address: !os_env ETH1_MAC_ADDRESS_CMP001
+                - label: enp5s0f0
+                  mac_address: !os_env ETH2_MAC_ADDRESS_CMP001
+                  features: ['dpdk', 'dpdk_pci: 0000:05:00.0']
+                - label: enp5s0f1
+                  mac_address: !os_env ETH3_MAC_ADDRESS_CMP001
+                  features: ['dpdk', 'dpdk_pci: 0000:05:00.1']
+                # - label: enp5s0f2
+                #   mac_address: !os_env ETH4_MAC_ADDRESS_CMP001
+                #   features: ['dpdk', 'dpdk_pci: 0000:05:00.2']
+
+              network_config:
+                enp2s0f1:
+                  networks:
+                   - admin
+
+          - name: {{ HOSTNAME_CMP002 }}
+            role: salt_minion
+            params:
+              ipmi_user: !os_env IPMI_USER
+              ipmi_password: !os_env IPMI_PASSWORD
+              ipmi_previlegies: OPERATOR
+              ipmi_host: !os_env IPMI_HOST_CMP002  # hostname or IP address
+              ipmi_lan_interface: lanplus
+              ipmi_port: 623
+
+              root_volume_name: system     # see 'volumes' below
+              cloud_init_volume_name: iso  # see 'volumes' below
+              # cloud_init_iface_up: eno1  # see 'interfaces' below.
+              cloud_init_iface_up: enp2s0f1  # see 'interfaces' below.
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 200
+
+                  # The same as for agent URL, here is an URL to the image that should be
+                  # used for deploy the node. It should also be accessible from deploying
+                  # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
+                  source_image: !os_env IRONIC_SOURCE_IMAGE_URL
+                  source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
+
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_hwe
+
+              interfaces:
+                # - label: eno1
+                - label: enp2s0f0
+                  mac_address: !os_env ETH0_MAC_ADDRESS_CMP002
+                # - label: eth0
+                - label: enp2s0f1
+                  l2_network_device: admin
+                  mac_address: !os_env ETH1_MAC_ADDRESS_CMP002
+                # - label: eth3
+                - label: enp5s0f0
+                  mac_address: !os_env ETH2_MAC_ADDRESS_CMP002
+                  features: ['dpdk', 'dpdk_pci: 0000:05:00.0']
+                # - label: eth2
+                - label: enp5s0f1
+                  mac_address: !os_env ETH3_MAC_ADDRESS_CMP002
+                  features: ['dpdk', 'dpdk_pci: 0000:05:00.1']
+                # - label: eth4
+                #   mac_address: !os_env ETH4_MAC_ADDRESS_CMP002
+                #   features: ['dpdk', 'dpdk_pci: 0000:0b:00.0']
+
+              network_config:
+                enp2s0f1:
+                  networks:
+                   - admin
+
+                     #          - name: {{ HOSTNAME_CMP003 }}
+                     #            role: salt_minion
+                     #            params:
+                     #              ipmi_user: !os_env IPMI_USER
+                     #              ipmi_password: !os_env IPMI_PASSWORD
+                     #              ipmi_previlegies: OPERATOR
+                     #              ipmi_host: !os_env IPMI_HOST_CMP003  # hostname or IP address
+                     #              ipmi_lan_interface: lanplus
+                     #              ipmi_port: 623
+                     #
+                     #              root_volume_name: system     # see 'volumes' below
+                     #              cloud_init_volume_name: iso  # see 'volumes' below
+                     #              # cloud_init_iface_up: eno1  # see 'interfaces' below.
+                     #              cloud_init_iface_up: enp2s0f0  # see 'interfaces' below.
+                     #              volumes:
+                     #                - name: system
+                     #                  capacity: !os_env NODE_VOLUME_SIZE, 200
+                     #
+                     #                  # The same as for agent URL, here is an URL to the image that should be
+                     #                  # used for deploy the node. It should also be accessible from deploying
+                     #                  # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
+                     #                  source_image: !os_env IRONIC_SOURCE_IMAGE_URL
+                     #                  source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
+                     #
+                     #                - name: iso  # Volume with name 'iso' will be used
+                     #                             # for store image with cloud-init metadata.
+                     #
+                     #                  cloudinit_meta_data: *cloudinit_meta_data
+                     #                  cloudinit_user_data: *cloudinit_user_data_hwe
+                     #
+                     #              interfaces:
+                     #                # - label: eno1
+                     #                - label: enp2s0f1
+                     #                  mac_address: !os_env ETH1_MAC_ADDRESS_CMP003
+                     #                # - label: eth0
+                     #                - label: enp2s0f0
+                     #                  l2_network_device: admin
+                     #                  mac_address: !os_env ETH0_MAC_ADDRESS_CMP003
+                     #
+                     #              network_config:
+                     #                enp2s0f0:
+                     #                  networks:
+                     #                   - admin