Add bm cookied template for k8s + contrail

PROD-20364

Change-Id: I87d31b4dfde0da017525705bb32c1cec1bd0fe3d
diff --git a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/common-services.yaml b/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/common-services.yaml
new file mode 100644
index 0000000..99b3aa7
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/common-services.yaml
@@ -0,0 +1,76 @@
+{% from 'cookied-bm-mcp-pike-k8s-contrail/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-bm-mcp-pike-k8s-contrail/underlay.yaml' import HOSTNAME_KVM01 with context %}
+{% from 'cookied-bm-mcp-pike-k8s-contrail/underlay.yaml' import HOSTNAME_KVM02 with context %}
+{% from 'cookied-bm-mcp-pike-k8s-contrail/underlay.yaml' import HOSTNAME_KVM03 with context %}
+{% from 'cookied-bm-mcp-pike-k8s-contrail/underlay.yaml' import HOSTNAME_CFG01 with context %}
+
+# Install support services
+- description: Create and distribute SSL certificates for services using salt state
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' state.sls salt
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: true
+
+- description: Install docker
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@docker:host' state.sls docker.host
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Check docker
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@docker:host' cmd.run 'docker ps'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description:  Install keepalived on primary controller
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keepalived:cluster and *01*' state.sls keepalived
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: true
+
+- description:  Install keepalived
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keepalived:cluster' state.sls keepalived
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: true
+
+- description: Install RabbitMQ
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@rabbitmq:server' state.sls rabbitmq
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 10}
+  skip_fail: false
+
+- description: Check RabbitMQ
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@rabbitmq:server' cmd.run "rabbitmqctl cluster_status"
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Install haproxy
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@haproxy:proxy' state.sls haproxy
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Check haproxy service
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@haproxy:proxy' service.status haproxy
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Restart rsyslog
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@haproxy:proxy' service.restart rsyslog
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
diff --git a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/k8s.yaml b/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/k8s.yaml
new file mode 100644
index 0000000..45ad04f
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/k8s.yaml
@@ -0,0 +1,264 @@
+{% from 'k8s-ha-contrail/underlay.yaml' import HOSTNAME_CFG01 with context %}
+
+- description: Install etcd
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@etcd:server' state.sls etcd.server.service
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Check the etcd health
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+     -C 'I@etcd:server' cmd.run '. /var/lib/etcd/configenv && etcdctl cluster-health'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Install Kubernetes Addons
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@kubernetes:master' state.sls kubernetes.master.kube-addons
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: true
+
+- description: Install Kubernetes components
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@kubernetes:pool' state.sls kubernetes.pool
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 5, delay: 60}
+  skip_fail: false
+
+# Opencontrail Control Plane
+
+- description: Create configuration files for OpenContrail
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@opencontrail:control' state.sls opencontrail exclude=opencontrail.client
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Install Opencontrail db on ctl01
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@opencontrail:database and *01*' state.sls opencontrail.database
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 20}
+  skip_fail: false
+
+- description: Configure OpenContrail as an add-on for Kubernetes
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@kubernetes:master' state.sls kubernetes.master.kube-addons
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: true
+
+- description: Verify the status of the OpenContrail service
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@opencontrail:database' cmd.run 'doctrail all contrail-status'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: true
+
+- description: Set up the OpenContrail resources
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@opencontrail:database:id:1' state.sls opencontrail.client
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+# OpenContrail vrouters
+- description: Refresh pillars on cmp*
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'cmp*' saltutil.refresh_pillar
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Sync all on contrail computes
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@opencontrail:compute' saltutil.sync_all
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Apply highstate on contrail computes
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@opencontrail:compute' state.highstate exclude=opencontrail.client
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+# - description: Reboot contrail computes
+#   cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+#     -C 'I@opencontrail:compute' cmd.run 'reboot'
+#   node_name: {{ HOSTNAME_CFG01 }}
+#   retry: {count: 1, delay: 5}
+#   skip_fail: false
+
+- description: Reboot contrail computes
+  cmd: salt --timeout=600 -C 'I@opencontrail:compute' system.reboot
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: true
+
+- description: Apply opencontrail.client on contrail computes
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@opencontrail:compute' state.sls opencontrail.client
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Run Kubernetes master without setup
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+     -C 'I@kubernetes:master' state.sls kubernetes exclude=kubernetes.master.setup
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 3, delay: 5}
+  skip_fail: true
+
+- description: Run Kubernetes master setup
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+     -C 'I@kubernetes:master' --subset 1 state.sls kubernetes.master.setup
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: true
+
+- description: Restart Kubelet
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@kubernetes:pool' service.restart 'kubelet'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: true
+
+- description: Check nodes registrations
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@kubernetes:pool' cmd.run 'sleep 60; kubectl get nodes'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: true
+
+- description: Renew hosts file on a whole cluster
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C '*' state.sls linux.network.host;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+# - description: Install Opencontrail db on all nodes
+#   cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+#     -C 'I@opencontrail:database' state.sls opencontrail.database
+#   node_name: {{ HOSTNAME_CFG01 }}
+#   retry: {count: 2, delay: 20}
+#   skip_fail: false
+
+# - description: Install Opencontrail control on ctl01
+#   cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+#     -C 'I@opencontrail:control and *01*' state.sls opencontrail exclude=opencontrail.client
+#   node_name: {{ HOSTNAME_CFG01 }}
+#   retry: {count: 1, delay: 5}
+#   skip_fail: false
+
+# - description: Install Opencontrail control on all nodes
+#   cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+#     -C 'I@opencontrail:control' state.sls opencontrail exclude=opencontrail.client
+#   node_name: {{ HOSTNAME_CFG01 }}
+#   retry: {count: 1, delay: 5}
+#   skip_fail: false
+
+# - description: Install Opencontrail on collector
+#   cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+#     -C 'I@opencontrail:collector' state.sls opencontrail exclude=opencontrail.client
+#   node_name: {{ HOSTNAME_CFG01 }}
+#   retry: {count: 1, delay: 5}
+#   skip_fail: false
+
+# # OpenContrail vrouters
+# - description: Install Opencontrail client
+#   cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+#     -C 'I@opencontrail:database:id:1' state.sls opencontrail.client
+#   node_name: {{ HOSTNAME_CFG01 }}
+#   retry: {count: 1, delay: 5}
+#   skip_fail: false
+
+# - description: Install Opencontrail on computes
+#   cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+#     -C 'I@opencontrail:compute' state.sls opencontrail exclude=opencontrail.client
+#   node_name: {{ HOSTNAME_CFG01 }}
+#   retry: {count: 2, delay: 5}
+#   skip_fail: false
+
+# - description: Wake up vhost0
+#   cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+#     -C 'I@nova:compute' cmd.run 'exec 0>&-; exec 1>&-; exec 2>&-;
+#     nohup bash -c "ip link | grep vhost && echo no_reboot || sleep 5 && reboot & "'
+#   node_name: {{ HOSTNAME_CFG01 }}
+#   retry: {count: 1, delay: 5}
+#   skip_fail: false
+
+# - description: Install Opencontrail client on computes
+#   cmd: sleep 300 && salt --timeout=60 --hard-crash --state-output=mixed --state-verbose=False
+#     -C 'I@opencontrail:compute' state.sls 'opencontrail.client'
+#   node_name: {{ HOSTNAME_CFG01 }}
+#   retry: {count: 1, delay: 5}
+#   skip_fail: false
+
+# - description: Install Opencontrail on computes #2
+#   cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+#     -C 'I@opencontrail:compute' state.sls opencontrail
+#   node_name: {{ HOSTNAME_CFG01 }}
+#   retry: {count: 2, delay: 5}
+#   skip_fail: false
+
+# # Kubernetes
+# - description: Install Kubernetes Addons
+#   cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+#     -C 'I@kubernetes:master' state.sls kubernetes.master.kube-addons
+#   node_name: {{ HOSTNAME_CFG01 }}
+#   retry: {count: 1, delay: 5}
+#   skip_fail: true
+
+# - description: Check contrail status
+#   cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+#     -C 'I@opencontrail:database' cmd.run contrail-status
+#   node_name: {{ HOSTNAME_CFG01 }}
+#   retry: {count: 1, delay: 5}
+#   skip_fail: false
+
+# - description: Install Kubernetes components
+#   cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+#     -C 'I@kubernetes:pool' state.sls kubernetes.pool
+#   node_name: {{ HOSTNAME_CFG01 }}
+#   retry: {count: 5, delay: 60}
+#   skip_fail: false
+
+# # NOTE(vryzhenkin): There is nothing to setup at this model
+# #- description: Setup etcd server on primary controller
+# #  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+# #     -C 'I@kubernetes:master and *01*' state.sls etcd.server.setup
+# #  node_name: {{ HOSTNAME_CFG01 }}
+# #  retry: {count: 1, delay: 5}
+# #  skip_fail: false
+
+# - description: Run Kubernetes master without setup
+#   cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+#      -C 'I@kubernetes:master' state.sls kubernetes exclude=kubernetes.master.setup
+#   node_name: {{ HOSTNAME_CFG01 }}
+#   retry: {count: 3, delay: 5}
+#   skip_fail: true
+
+# - description: Run Kubernetes master setup
+#   cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+#      -C 'I@kubernetes:master and *01*' state.sls kubernetes.master.setup
+#   node_name: {{ HOSTNAME_CFG01 }}
+#   retry: {count: 1, delay: 5}
+#   skip_fail: true
+
+# - description: Restart Kubelet
+#   cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+#     -C 'I@kubernetes:pool' service.restart 'kubelet'
+#   node_name: {{ HOSTNAME_CFG01 }}
+#   retry: {count: 1, delay: 5}
+#   skip_fail: true
+
+# - description: Renew hosts file on a whole cluster
+#   cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C '*' state.sls linux.network.host;
+#   node_name: {{ HOSTNAME_CFG01 }}
+#   retry: {count: 1, delay: 5}
+#   skip_fail: false
diff --git a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/lab04-upgrade-physical-inventory.yaml b/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/lab04-upgrade-physical-inventory.yaml
new file mode 100644
index 0000000..eb9d76f
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/lab04-upgrade-physical-inventory.yaml
@@ -0,0 +1,93 @@
+nodes:
+    cfg01.bm-mcp-pike-k8s-contrail.local:
+      reclass_storage_name: infra_config_node01
+      roles:
+      - infra_config
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+    # Physical nodes
+
+    kvm01.bm-mcp-pike-k8s-contrail.local:
+      reclass_storage_name: infra_kvm_node01
+      roles:
+      - infra_kvm
+      - linux_system_codename_xenial
+      interfaces:
+        enp9s0f0:
+          role: single_mgm
+        enp9s0f1:
+          role: single_ctl
+
+    kvm02.bm-mcp-pike-k8s-contrail.local:
+      reclass_storage_name: infra_kvm_node02
+      roles:
+      - infra_kvm
+      - linux_system_codename_xenial
+      interfaces:
+        enp9s0f0:
+          role: single_mgm
+        enp9s0f1:
+          role: single_ctl
+
+    kvm03.bm-mcp-pike-k8s-contrail.local:
+      reclass_storage_name: infra_kvm_node03
+      roles:
+      - infra_kvm
+      - linux_system_codename_xenial
+      interfaces:
+        enp9s0f0:
+          role: single_mgm
+        enp9s0f1:
+          role: single_ctl
+
+    # prx01.bm-mcp-pike-k8s-contrail.local:
+    #   reclass_storage_name: kubernetes_proxy_node01
+    #   roles:
+    #   - kubernetes_proxy
+    #   # - infra_proxy
+    #   # - stacklight_proxy
+    #   - salt_master_host
+    #   - linux_system_codename_xenial
+    #   interfaces:
+    #     enp9s0f0:
+    #       role: single_mgm
+    #       deploy_address: 172.17.41.8
+    #     enp9s0f1:
+    #       role: single_ctl
+    #       single_address: 10.167.8.81
+
+    cmp001.bm-mcp-pike-k8s-contrail.local:
+      reclass_storage_name: kubernetes_compute_node001
+      roles:
+      - linux_system_codename_xenial
+      - kubernetes_compute_contrail
+      - salt_master_host
+        #- features_lvm_backend
+      interfaces:
+        enp9s0f0:
+          role: single_dhcp
+        ens11f0:
+          role: bond0_ab_contrail
+          tenant_address: 192.168.0.101
+        ens11f1:
+          role: single_ctl
+          single_address: 10.167.8.101
+
+    # cmp002.bm-mcp-pike-k8s-contrail.local:
+    #   reclass_storage_name: kubernetes_compute_node02
+    #   roles:
+    #   - features_lvm_backend
+    #   - linux_system_codename_xenial
+    #   - kubernetes_compute_contrail
+    #   interfaces:
+    #     enp2s0f1:
+    #       role: single_mgm
+    #       deploy_address: 172.16.49.74
+    #     enp5s0f0:
+    #       role: bond0_ab_contrail
+    #       tenant_address: 192.168.0.102
+    #     enp5s0f1:
+    #       role: single_vlan_ctl
+    #       single_address: 10.167.8.102
diff --git a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/salt-context-cookiecutter-k8s-contrail.yaml b/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/salt-context-cookiecutter-k8s-contrail.yaml
new file mode 100644
index 0000000..b1a3be5
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/salt-context-cookiecutter-k8s-contrail.yaml
@@ -0,0 +1,200 @@
+default_context:
+  backup_private_key: |-
+    -----BEGIN RSA PRIVATE KEY-----
+    MIIEowIBAAKCAQEAskZyhTySYWvGrp+dlv+q2La8oiM8Sv1JfQog8+UW28hGUbCq
+    PnWa7bzVqENEY+g+qbQYh2Pvb2xURyY9+02TWLmCYzq7+orO1eG2SDt384YzDc6B
+    nQohUbGwaSH2YsT/QA8KV1fvUW9P0gVEHmJWR8Jo3vdRyw+tEUszYkiTYkcVc9zt
+    O5eYT9a6gSjLhl+tmJXu38jdWTcE8zJP+OGpJRDJpV2EAYwv+LXde9REv4KCGMqN
+    lvli9IA+zarfMsAzSTPpL5ooslB20akKM1h5li3LG47OpKMG9sMVFGEEo7+lqTGa
+    zUJEUPbJU95PqLfxoIOjYF/MhWKU5VALLraVwwIDAQABAoIBAHUAj0CqlFej6G3L
+    DB6CBZrds8el4HhG6+hIsX/gprPwKVaL3+/GN7w35vHb1BLN5fJy5HZXPFmge1+G
+    1b8NFttwRQbjEWRJnZ352Sxm+z60oOU61w4+C8gWGnWleJMyP2PHez3/1G1Z5MUt
+    95sJZx8JlNJg9ymSTD/BXyPuBezFKf8jUSgPbhBv8B2yy82YGzqc9u7sK6BN90P1
+    3ZcynQ4cfieZLoim56dF9YEixr8plGmGpOspPZFlVCGIc1y2BC4ZUyDatcCa7/gQ
+    3aDdt9lkEfoCHezAFOmaZDCOZ70spkwCqXYk42BXpDjKF6djCXyx3WKVF+IhEOYT
+    /S1I8KECgYEA1tzUZgW43/Z7Sm+QnoK3R9hG2oZZOoiTDdHyC+f5dk6maNbJbivM
+    FAPIpzHtCyvBEiSgGmlqpUrwR2NbYnOHjOX72Yq7/e0Vl1XWmplKCsTDNFGXx5Fs
+    9AQbWjOF+drgfZ5p3dNyE9689nJZg5EhTxL8dfwnZat/l+/OKFO2tM0CgYEA1GhW
+    4FMsXe3/g7M0dj5FnbS8xjz93pei5YZanID9mY/RUcKbegdtfvtwsfkZe5trbI39
+    jv96QyJeAnf48UDFwCV6SSZF/8Ez0pYHZanwkwKsli5uZdvSz7rUyVwE6tyO24WA
+    Trgpmbb8uxhJHBNuD+bC/iGd1H0IUuJ65ChD9M8CgYEAxfp2z4boQZ2ZHw5LoHLr
+    tIyJROPUJgYgEfToZaYbC7MOzL1Un2pFwg92fPCY7dkkLraGu690r9esLOtVEhNH
+    zEFB3cJi1Gf3pBlZA9zJB8Ej6Pphs2bBkNqT5XpiMcZHYhhsjhQ+Iibz0NWuu3cn
+    zPe+nmx4VMtAZ1x0hl4UlOUCgYBh8NaWS2O60AIwrRI0r5QUGwGsevUojU0Mfr4L
+    SiMlir4e8BgW1ZP0qGYXel/4sfp/rJ1NMZojmJY2lzFPtOk6J03SqvY97s1BffJd
+    O1X1w5bnDPPUvd7f8CsryeVuklLBADbglWSBP3IbpyAW9RKb/HDPE5seHqUW6t11
+    lOd42wKBgBW0tTV6+aqao6o4ZBU0SVoNg9/PwgOubAzeSDW2+ArXn1sMmroSfXVw
+    fbUTJI5TF/1pd7A5AKoR1FfTqlssoYlMGEPI6HJ4n9/4SqLpwd99HFW0ISt+EUMh
+    Tqt9mDfKzwHxG2QTuOwyrslO8iTwRoN5OYgm4dsL471Obr4DILTz
+    -----END RSA PRIVATE KEY-----
+  backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCyRnKFPJJha8aun52W/6rYtryiIzxK/Ul9CiDz5RbbyEZRsKo+dZrtvNWoQ0Rj6D6ptBiHY+9vbFRHJj37TZNYuYJjOrv6is7V4bZIO3fzhjMNzoGdCiFRsbBpIfZixP9ADwpXV+9Rb0/SBUQeYlZHwmje91HLD60RSzNiSJNiRxVz3O07l5hP1rqBKMuGX62Yle7fyN1ZNwTzMk/44aklEMmlXYQBjC/4td171ES/goIYyo2W+WL0gD7Nqt8ywDNJM+kvmiiyUHbRqQozWHmWLcsbjs6kowb2wxUUYQSjv6WpMZrNQkRQ9slT3k+ot/Ggg6NgX8yFYpTlUAsutpXD
+  bmk_enabled: 'False'
+  ceph_enabled: 'False'
+  cicd_control_node01_address: 10.167.8.91
+  cicd_control_node01_hostname: cid01
+  cicd_control_node02_address: 10.167.8.92
+  cicd_control_node02_hostname: cid02
+  cicd_control_node03_address: 10.167.8.93
+  cicd_control_node03_hostname: cid03
+  cicd_control_vip_address: 10.167.8.90
+  cicd_control_vip_hostname: cid
+  cicd_enabled: 'True'
+  cicd_private_key: |-
+    -----BEGIN RSA PRIVATE KEY-----
+    MIIEpgIBAAKCAQEAxLQy4F7sNBloj0fFvklCq9+IX/BV5OBs6OtSBf6A+ztTs68i
+    ib5W6Tz/knh7wt2URB6uKJJBRBK+Oqj91ZNJxISewP2f5gX10WpjOAti+Fghkemt
+    kvyA8aUxX1wuAz7Y2v1uv1IkfWa5ubL8oJXNjeT9yeCNJWBxvd46XT9UiCs5CnDL
+    lBjRg+AP2+u5OabUFtH7GSzVqcMzhi0qLilP+cRhKmar2tQXFEI5wnwADh0REAF/
+    OxUZPaPEPD9TW7fGxjfrMtyUKqTEbi+EPsIvldkR0IhYrKXjwcFFu3FKISuy8PVM
+    EKUM5aZaLMI/WiMs1zmx+bAOrkCsUAf+sVmocQIDAQABAoIBAQCRnSAojrxmmQSZ
+    RLVy9wK+/zwSYXUEeMrG5iUTQOM0mCePVa/IrjnicYB0anKbv7IZd2jPqe1cuk9O
+    V3mJGH68Vry6+0XaX0EpJIuMmolKdNttC8Ktk/TUbciN4kxBpM2d14ybXvCaUGhe
+    usxfCGZhi0oAnxV9vNaWiqNEEjS+k4u9XTnj3+GxstEwch+l7xJwz83WEsx7G1Zz
+    3Yxg7mh2uRPVCOZGVdClciym+9WHHrcdYw/OJCsSFsT7+qgzptsvXBVxa6EuGaVY
+    Pba+UfOnYIKlBtka4i3zXGaCQF6t2FHw5WyUEmYm3iBYmrGBbEf+3665Kh4NQs0a
+    PV4eHlLdAoGBAO8nDgkTA4gi1gyFy2YBUFP2BignkKCZGHkD8qvBnOt1Rxm6USlZ
+    7GzAtU3nSd8ODzgOBI7+zd82yRqv2hEwP7xARhr0Nx1XvyaQtRlQ6tQnBgvqLDCG
+    n0qvWoBM+Yl6sTRGYavAMCaR7PuULUcZFNWk7m0fv4vqddGijgRsje37AoGBANKP
+    nN72BujsQIhdzAYS+u5/Hxu56Tvgupe6kWkhQuV8MQcM+79I6cgJxxH6zQDP/hGt
+    3vXapgWUgi025LuEUWfkxAtTUfT4cRP2x529CH/XLQMYVqWxkoben9r+eFav+Kgw
+    C0dR3vSOlEMzYoIF+p/km0mIV1ZKZvrWymtXSdODAoGBAL4feUwDfqpKr4pzD1l/
+    r+Gf1BM2KQdTzp3eYpzjJiIWMTkl4wIRyCBJL5nIRvT6E2VH153qubY7srLxnFZP
+    2kuJeXJSNkKwkHlTT3XZ22Zfw7HTL+BAFgDk2PjouPTvwlLBpUJKXr07A4CZs0kz
+    ilmybg340GmmMpY/OdIQjuDjAoGBAMcd5hP2hqxHlfMe+CwaUM+uySU4FvZ32xxW
+    4uGPAarwWZC4V20Zr3JqxKUdDjYhersPOFQ4c129hajqSz2EsFLWRAoNvegx9QUT
+    Dsv9EgeK3Vca8f14wf7mkjbPA8++UyABZvkH1BZiqpQuCI66xrnjvnG4DBde/qlg
+    60S84+SvAoGBAKH1feNtJaNhDxF0OqRuVmSFyL3pkMDoYr/mgpT4T1ToRBW5AtEt
+    Io4egi68ph8IChAt/TGFvikW7tbEgK9ACD/RAfl+LiuhxqJJFtC1LfGfHI7ntuRj
+    DjQrUy59ULoflh3iWBPtpw2ooRlSrAwaIgGt9odMECXp3BK8WLsUG9H1
+    -----END RSA PRIVATE KEY-----
+  cicd_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDEtDLgXuw0GWiPR8W+SUKr34hf8FXk4Gzo61IF/oD7O1OzryKJvlbpPP+SeHvC3ZREHq4okkFEEr46qP3Vk0nEhJ7A/Z/mBfXRamM4C2L4WCGR6a2S/IDxpTFfXC4DPtja/W6/UiR9Zrm5svyglc2N5P3J4I0lYHG93jpdP1SIKzkKcMuUGNGD4A/b67k5ptQW0fsZLNWpwzOGLSouKU/5xGEqZqva1BcUQjnCfAAOHREQAX87FRk9o8Q8P1Nbt8bGN+sy3JQqpMRuL4Q+wi+V2RHQiFispePBwUW7cUohK7Lw9UwQpQzlploswj9aIyzXObH5sA6uQKxQB/6xWahx
+  cluster_domain: bm-mcp-pike-k8s-contrail.local
+  cluster_name: bm-mcp-pike-k8s-contrail
+  # compute_bond_mode: active-backup
+  compute_padding_with_zeros: 'True'
+  compute_primary_first_nic: eth1
+  compute_primary_second_nic: eth2
+  context_seed: GAZbu3hguubkeYMg15AQ1J6UuY60TAh8h0EVpNUrHWhjRS2SxRYBuxxLvVURv82m
+  control_network_netmask: 255.255.255.0
+  control_network_subnet: 10.167.8.0/24
+  control_vlan: '2410'
+  cookiecutter_template_branch: ''
+  cookiecutter_template_credentials: gerrit
+  cookiecutter_template_url: https://gerrit.mcp.mirantis.net/mk/cookiecutter-templates.git
+  deploy_network_gateway: 172.17.41.2
+  deploy_network_netmask: 255.255.255.192
+  deploy_network_subnet: 172.16.49.64/26
+  deployment_type: physical
+  dns_server01: 172.17.41.2
+  dns_server02: 172.17.41.2
+  email_address: dtyzhnenko@mirantis.com
+  etcd_ssl: 'True'
+  infra_bond_mode: active-backup
+  infra_deploy_nic: eth0
+  infra_kvm01_control_address: 10.167.8.241
+  infra_kvm01_deploy_address: 172.17.41.4
+  infra_kvm01_hostname: kvm01
+  infra_kvm02_control_address: 10.167.8.242
+  infra_kvm02_deploy_address: 172.17.41.5
+  infra_kvm02_hostname: kvm02
+  infra_kvm03_control_address: 10.167.8.243
+  infra_kvm03_deploy_address: 172.17.41.6
+  infra_kvm03_hostname: kvm03
+  infra_kvm_vip_address: 10.167.8.240
+  infra_primary_first_nic: eth1
+  infra_primary_second_nic: eth2
+  kubernetes_enabled: 'True'
+  kubernetes_compute_count: 1
+  kubernetes_compute_rack01_single_subnet: 10.167.8
+  kubernetes_compute_rack01_tenant_subnet: 192.168.0
+  kubernetes_network_opencontrail_enabled: 'True'
+  local_repositories: 'False'
+  maas_deploy_address: 172.16.49.66
+  maas_deploy_range_end: 10.0.0.254
+  maas_deploy_range_start: 10.0.0.1
+  maas_deploy_vlan: '0'
+  maas_fabric_name: fabric-0
+  maas_hostname: cfg01
+  mcp_common_scripts_branch: ''
+  mcp_version: proposed
+  offline_deployment: 'False'
+  opencontrail_analytics_address: 10.167.8.30
+  opencontrail_analytics_hostname: nal
+  opencontrail_analytics_node01_address: 10.167.8.31
+  opencontrail_analytics_node01_hostname: nal01
+  opencontrail_analytics_node02_address: 10.167.8.32
+  opencontrail_analytics_node02_hostname: nal02
+  opencontrail_analytics_node03_address: 10.167.8.33
+  opencontrail_analytics_node03_hostname: nal03
+  opencontrail_compute_iface_mask: '24'
+  opencontrail_control_address: 10.167.8.20
+  opencontrail_control_hostname: ntw
+  opencontrail_control_node01_address: 10.167.8.21
+  opencontrail_control_node01_hostname: ntw01
+  opencontrail_control_node02_address: 10.167.8.22
+  opencontrail_control_node02_hostname: ntw02
+  opencontrail_control_node03_address: 10.167.8.23
+  opencontrail_control_node03_hostname: ntw03
+  opencontrail_enabled: 'True'
+  opencontrail_router01_address: 10.167.8.100
+  opencontrail_router01_hostname: rtr01
+  opencontrail_router02_address: 10.167.8.101
+  opencontrail_router02_hostname: rtr02
+  opencontrail_version: '4.0'
+  openstack_enabled: 'False'
+  openssh_groups: ''
+  openstack_version: pike
+  oss_enabled: 'False'
+  oss_node03_address: ${_param:stacklight_monitor_node03_address}
+  oss_notification_smtp_use_tls: 'False'
+  oss_pushkin_email_sender_password: password
+  oss_pushkin_smtp_host: 127.0.0.1
+  oss_pushkin_smtp_port: '587'
+  oss_webhook_app_id: '24'
+  oss_webhook_login_id: '13'
+  platform: kubernetes_enabled
+  public_host: ${_param:infra_config_address}
+  publication_method: email
+  reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
+  salt_api_password: frJMLJsfGkFXCk4qTTEttKla518Akvdp
+  salt_api_password_hash: $6$ixefPtcd$vasKuySO6L2JM0FKaB/udsQvH4upI2dWoJZaR9XTqeAx4UMvkHsNNSwsALVqTTLbXIngkFqYNXpbNm1o4iMGS.
+  salt_master_address: 172.17.41.3
+  salt_master_hostname: cfg01
+  salt_master_management_address: 172.17.41.3
+  shared_reclass_branch: ''
+  shared_reclass_url: https://gerrit.mcp.mirantis.net/salt-models/reclass-system.git
+  stacklight_enabled: 'True'
+  stacklight_log_address: 10.167.8.60
+  stacklight_log_hostname: log
+  stacklight_log_node01_address: 10.167.8.61
+  stacklight_log_node01_hostname: log01
+  stacklight_log_node02_address: 10.167.8.62
+  stacklight_log_node02_hostname: log02
+  stacklight_log_node03_address: 10.167.8.63
+  stacklight_log_node03_hostname: log03
+  stacklight_long_term_storage_type: influxdb
+  stacklight_monitor_address: 10.167.8.70
+  stacklight_monitor_hostname: mon
+  stacklight_monitor_node01_address: 10.167.8.71
+  stacklight_monitor_node01_hostname: mon01
+  stacklight_monitor_node02_address: 10.167.8.72
+  stacklight_monitor_node02_hostname: mon02
+  stacklight_monitor_node03_address: 10.167.8.73
+  stacklight_monitor_node03_hostname: mon03
+  stacklight_telemetry_address: 10.167.8.85
+  stacklight_telemetry_hostname: mtr
+  stacklight_telemetry_node01_address: 10.167.8.86
+  stacklight_telemetry_node01_hostname: mtr01
+  stacklight_telemetry_node02_address: 10.167.8.87
+  stacklight_telemetry_node02_hostname: mtr02
+  stacklight_telemetry_node03_address: 10.167.8.88
+  stacklight_telemetry_node03_hostname: mtr03
+  stacklight_version: '2'
+  static_ips_on_deploy_network_enabled: 'False'
+  tenant_network_gateway: 192.168.0.1
+  tenant_network_netmask: 255.255.255.0
+  tenant_network_subnet: 192.168.0.0/24
+  tenant_vlan: '2411'
+  upstream_proxy_enabled: 'False'
+  use_default_network_scheme: 'True'
+  vnf_onboarding_enabled: 'False'
diff --git a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/salt-context-environment.yaml b/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/salt-context-environment.yaml
new file mode 100644
index 0000000..63f07b5
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/salt-context-environment.yaml
@@ -0,0 +1,209 @@
+nodes:
+    # Virtual Control Plane nodes
+
+    ctl01.cookied-bm-mcp-ocata-contrail.local:
+      reclass_storage_name: kubernetes_control_node01
+      roles:
+      - kubernetes_control_contrail
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_ctl
+
+    ctl02.cookied-bm-mcp-ocata-contrail.local:
+      reclass_storage_name: kubernetes_control_node02
+      roles:
+      - kubernetes_control_contrail
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_ctl
+
+    ctl03.cookied-bm-mcp-ocata-contrail.local:
+      reclass_storage_name: kubernetes_control_node03
+      roles:
+      - kubernetes_control_contrail
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_ctl
+
+    prx01.cookied-bm-mcp-ocata-contrail.local:
+      reclass_storage_name: kubernetes_proxy_node01
+      roles:
+      - kubernetes_proxy
+      # - infra_proxy
+      # - stacklight_proxy
+      - salt_master_host
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_ctl
+
+    prx02.cookied-bm-mcp-ocata-contrail.local:
+      reclass_storage_name: kubernetes_proxy_node02
+      roles:
+      - kubernetes_proxy
+      # - infra_proxy
+      # - stacklight_proxy
+      - salt_master_host
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_ctl
+
+    mon01.cookied-bm-mcp-ocata-contrail.local:
+      reclass_storage_name: stacklight_server_node01
+      roles:
+      - stacklightv2_server_leader
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_ctl
+
+    mon02.cookied-bm-mcp-ocata-contrail.local:
+      reclass_storage_name: stacklight_server_node02
+      roles:
+      - stacklightv2_server
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_ctl
+
+    mon03.cookied-bm-mcp-ocata-contrail.local:
+      reclass_storage_name: stacklight_server_node03
+      roles:
+      - stacklightv2_server
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_ctl
+
+    nal01.cookied-bm-mcp-ocata-contrail.local:
+      reclass_storage_name: opencontrail_analytics_node01
+      roles:
+      - opencontrail_analytics
+      - linux_system_codename_xenial
+      - salt_master_host
+      interfaces:
+        ens3:
+          role: single_ctl
+          single_address: 10.167.8.31
+
+    nal02.cookied-bm-mcp-ocata-contrail.local:
+      reclass_storage_name: opencontrail_analytics_node02
+      roles:
+      - opencontrail_analytics
+      - linux_system_codename_xenial
+      - salt_master_host
+      interfaces:
+        ens3:
+          role: single_ctl
+          single_address: 10.167.8.32
+
+    nal03.cookied-bm-mcp-ocata-contrail.local:
+      reclass_storage_name: opencontrail_analytics_node03
+      roles:
+      - opencontrail_analytics
+      - linux_system_codename_xenial
+      - salt_master_host
+      interfaces:
+        ens3:
+          role: single_ctl
+          single_address: 10.167.8.33
+
+    ntw01.cookied-bm-mcp-ocata-contrail.local:
+      reclass_storage_name: opencontrail_control_node01
+      roles:
+      - opencontrail_control
+      - linux_system_codename_xenial
+      - salt_master_host
+      interfaces:
+        ens3:
+          role: single_ctl
+          single_address: 10.167.8.21
+
+    ntw02.cookied-bm-mcp-ocata-contrail.local:
+      reclass_storage_name: opencontrail_control_node02
+      roles:
+      - opencontrail_control
+      - linux_system_codename_xenial
+      - salt_master_host
+      interfaces:
+        ens3:
+          role: single_ctl
+          single_address: 10.167.8.22
+
+    ntw03.cookied-bm-mcp-ocata-contrail.local:
+      reclass_storage_name: opencontrail_control_node03
+      roles:
+      - opencontrail_control
+      - linux_system_codename_xenial
+      - salt_master_host
+      interfaces:
+        ens3:
+          role: single_ctl
+          single_address: 10.167.8.23
+
+    mtr01.cookied-bm-mcp-ocata-contrail.local:
+      reclass_storage_name: stacklight_telemetry_node01
+      roles:
+      - stacklight_telemetry
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_ctl
+
+    mtr02.cookied-bm-mcp-ocata-contrail.local:
+      reclass_storage_name: stacklight_telemetry_node02
+      roles:
+      - stacklight_telemetry
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_ctl
+
+    mtr03.cookied-bm-mcp-ocata-contrail.local:
+      reclass_storage_name: stacklight_telemetry_node03
+      roles:
+      - stacklight_telemetry
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_ctl
+
+    log01.cookied-bm-mcp-ocata-contrail.local:
+      reclass_storage_name: stacklight_log_node01
+      roles:
+      - stacklight_log_leader_v2
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_ctl
+
+    log02.cookied-bm-mcp-ocata-contrail.local:
+      reclass_storage_name: stacklight_log_node02
+      roles:
+      - stacklight_log
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_ctl
+
+    log03.cookied-bm-mcp-ocata-contrail.local:
+      reclass_storage_name: stacklight_log_node03
+      roles:
+      - stacklight_log
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_ctl
+
+#    bmk01.cookied-bm-mcp-ocata-contrail.local:
+#      reclass_storage_name: openstack_benchmark_node01
+#      roles:
+#      - openstack_benchmark
+#      - linux_system_codename_xenial
+#      interfaces:
+#        ens3:
+#          role: single_ctl
diff --git a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/salt.yaml b/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/salt.yaml
new file mode 100644
index 0000000..7e46564
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/salt.yaml
@@ -0,0 +1,189 @@
+{% from 'cookied-bm-mcp-pike-k8s-contrail/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-bm-mcp-pike-k8s-contrail/underlay.yaml' import HOSTNAME_CMP001 with context %}
+{% from 'cookied-bm-mcp-pike-k8s-contrail/underlay.yaml' import LAB_CONFIG_NAME with context %}
+{% from 'cookied-bm-mcp-pike-k8s-contrail/underlay.yaml' import DOMAIN_NAME with context %}
+{% from 'cookied-bm-mcp-pike-k8s-contrail/underlay.yaml' import ETH0_IP_ADDRESS_CFG01 with context %}
+{% from 'cookied-bm-mcp-pike-k8s-contrail/underlay.yaml' import ETH0_IP_ADDRESS_CMP001 with context %}
+{% from 'cookied-bm-mcp-pike-k8s-contrail/underlay.yaml' import HOSTNAME_KVM01 with context %}
+{% from 'cookied-bm-mcp-pike-k8s-contrail/underlay.yaml' import HOSTNAME_KVM02 with context %}
+{% from 'cookied-bm-mcp-pike-k8s-contrail/underlay.yaml' import HOSTNAME_KVM03 with context %}
+
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+# Other salt model repository parameters see in shared-salt.yaml
+
+# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
+{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME','bm-mcp-pike-k8s-contrail') %}
+# Path to the context files used to render Cluster and Environment models
+{%- set CLUSTER_CONTEXT_NAME = 'salt-context-cookiecutter-k8s-contrail.yaml' %}
+{%- set ENVIRONMENT_CONTEXT_NAMES = ['salt-context-environment.yaml','lab04-upgrade-physical-inventory.yaml'] %}
+{%- set CONTROL_VLAN = os_env('CONTROL_VLAN', '2410') %}
+{%- set TENANT_VLAN = os_env('TENANT_VLAN', '2411') %}
+
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+
+{{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
+
+{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL(CONTROL_VLAN=CONTROL_VLAN, TENANT_VLAN=TENANT_VLAN) }}
+
+{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
+
+{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "powerdns" "glusterfs" "xtrabackup" "maas" "backupninja" "jenkins" "fluentd" "auditd"') }}
+
+{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
+
+{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
+
+{{ SHARED.ADJUST_SL_OPTS(OVERRIDES_FILENAME='/srv/salt/reclass/classes/environment/' + ENVIRONMENT_MODEL_INVENTORY_NAME + '/overrides.yml') }}
+
+
+# - description: "Registration cmp001 node"
+#   cmd: |
+#     salt-call event.send "reclass/minion/classify" \
+#       "{\"node_master_ip\": \"{{ ETH0_IP_ADDRESS_CFG01 }}\", \
+#       \"node_deploy_ip\": \"{{ ETH0_IP_ADDRESS_CMP001 }}\", \
+#       \"node_control_ip\": \"10.167.8.101\", \
+#       \"node_tenant_ipcontrol_ip\": \"10.167.8.101\", \
+#       \"node_os\": \"xenial\", \
+#       \"node_domain\": \"{{ DOMAIN_NAME }}\", \
+#       \"node_cluster\": \"{{ LAB_CONFIG_NAME }}\",
+#       \"node_hostname\": \"$(hostname -s)\"}"
+#   node_name: {{ HOSTNAME_CMP001 }}
+#   retry: {count: 1, delay: 10}
+#   skip_fail: false
+#
+- description: "Workaround for rack01 compute generator"
+  cmd: |
+    set -e;
+    # Remove rack01 key
+    . /root/venv-reclass-tools/bin/activate;
+    reclass-tools del-key parameters.reclass.storage.node.kubernetes_compute_rack01 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+# - description: "WR for changing image to proposed"
+#   cmd: |
+#     set -e;
+#     # Add message_queu host for opencontrail
+#     . /root/venv-reclass-tools/bin/activate;
+#     reclass-tools add-key parameters._param.salt_control_xenial_image 'http://ci.mcp.mirantis.net:8085/images/ubuntu-16-04-x64-mcpproposed.qcow2' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/init.yml;
+#     reclass-tools add-key parameters._param.salt_control_trusty_image 'http://ci.mcp.mirantis.net:8085/images/ubuntu-14-04-x64-mcpproposed.qcow2' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/init.yml;
+#   node_name: {{ HOSTNAME_CFG01 }}
+#   retry: {count: 1, delay: 10}
+#   skip_fail: false
+
+- description: "Workaround for xenial images"
+  cmd: |
+    set -e;
+    # Add tenant and single addresses for computes
+        set -e;
+    # Add message_queu host for opencontrail
+    . /root/venv-reclass-tools/bin/activate;
+    reclass-tools add-key parameters.salt.control.cluster.internal.node.nal01.image '${_param:salt_control_xenial_image}' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
+    reclass-tools add-key parameters.salt.control.cluster.internal.node.nal02.image '${_param:salt_control_xenial_image}' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
+    reclass-tools add-key parameters.salt.control.cluster.internal.node.nal03.image '${_param:salt_control_xenial_image}' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
+    reclass-tools add-key parameters.salt.control.cluster.internal.node.ntw01.image '${_param:salt_control_xenial_image}' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
+    reclass-tools add-key parameters.salt.control.cluster.internal.node.ntw02.image '${_param:salt_control_xenial_image}' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
+    reclass-tools add-key parameters.salt.control.cluster.internal.node.ntw03.image '${_param:salt_control_xenial_image}' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
+    reclass-tools add-key parameters.salt.control.cluster.internal.node.prx01.image '${_param:salt_control_xenial_image}' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
+    reclass-tools add-key parameters.salt.control.cluster.internal.node.prx02.image '${_param:salt_control_xenial_image}' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+
+{{ SHARED.MACRO_GENERATE_INVENTORY() }}
+
+{{ SHARED.MACRO_NETWORKING_WORKAROUNDS() }}
+
+
+- description: "Workaround for PROD-14060"
+  cmd: |
+    set -e;
+    # Add tenant and single addresses for computes
+    salt-call reclass.cluster_meta_set deploy_address 172.17.41.7 /srv/salt/reclass/nodes/_generated/cmp001.cookied-bm-mcp-ocata-contrail.local.yml
+    salt-call reclass.cluster_meta_set tenant_address 192.168.0.101 /srv/salt/reclass/nodes/_generated/cmp001.cookied-bm-mcp-ocata-contrail.local.yml
+    salt-call reclass.cluster_meta_set single_address 10.167.8.101 /srv/salt/reclass/nodes/_generated/cmp001.cookied-bm-mcp-ocata-contrail.local.yml
+
+    # salt-call reclass.cluster_meta_set deploy_address 172.16.49.74 /srv/salt/reclass/nodes/_generated/cmp002.cookied-bm-mcp-ocata-contrail.local.yml
+    # salt-call reclass.cluster_meta_set tenant_address 192.168.0.102 /srv/salt/reclass/nodes/_generated/cmp002.cookied-bm-mcp-ocata-contrail.local.yml
+    # salt-call reclass.cluster_meta_set single_address 10.167.8.102 /srv/salt/reclass/nodes/_generated/cmp002.cookied-bm-mcp-ocata-contrail.local.yml
+
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+
+- description: Temporary workaround for removing cinder-volume from CTL nodes
+  cmd: |
+    sed -i 's/\-\ system\.cinder\.volume\.single//g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control.yml;
+    sed -i 's/\-\ system\.cinder\.volume\.notification\.messagingv2//g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control.yml;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: true
+
+- description: Temporary WR for correct bridge name according to envoronment templates
+  cmd: |
+    sed -i 's/br\-ctl/br\_ctl/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
+    sed -i 's/br\-mgm/br\_mgm/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
+
+########################################
+# Spin up Control Plane VMs on KVM nodes
+########################################
+
+- description: Execute 'libvirt' states to create necessary libvirt networks
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'kvm*' state.sls libvirt
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 10}
+  skip_fail: false
+
+- description: Create VMs for control plane
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'kvm*' state.sls salt.control
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 3, delay: 10}
+  skip_fail: false
+
+- description: '*Workaround* for waiting the control-plane VMs in the salt-key (instead of sleep)'
+  cmd: |
+    salt-key -l acc| sort > /tmp/current_keys.txt &&
+    salt 'kvm*' cmd.run 'virsh list --name' | grep -v 'kvm'|sort|xargs -I {} fgrep {} /tmp/current_keys.txt
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 20, delay: 30}
+  skip_fail: false
+
+#########################################
+# Configure all running salt minion nodes
+#########################################
+
+- description: Hack resolv.conf on VCP nodes for internal services access
+  cmd: |
+    salt --hard-crash --state-output=mixed --state-verbose=False -C '* and not kvm* and not cmp* and not gtw* and not cfg*' cmd.run "echo 'nameserver 172.17.41.2' > /etc/resolv.conf;"
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Refresh pillars on all minions
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.refresh_pillar
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Sync all salt resources
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.sync_all && sleep 5
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Show  reclass-salt --top for generated nodes
+  cmd: reclass-salt --top -u /srv/salt/reclass/nodes/_generated/
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
diff --git a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/sl.yaml b/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/sl.yaml
new file mode 100644
index 0000000..4045fe8
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/sl.yaml
@@ -0,0 +1,257 @@
+{% from 'cookied-bm-mcp-ocata-contrail/underlay.yaml' import HOSTNAME_CFG01 with context %}
+
+# Install docker swarm
+- description: Configure docker service
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls docker.host
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Install docker swarm on master node
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Send grains to the swarm slave nodes
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls salt.minion.grains
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description:  Update mine
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' mine.update
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description:  Refresh modules
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' saltutil.refresh_modules; sleep 5;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description:  Rerun swarm on slaves to proper token population
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description:  Configure slave nodes
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:manager' state.sls docker.swarm -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description:  List registered Docker swarm nodes
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' cmd.run 'docker node ls'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Install keepalived on mon nodes
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'mon*' state.sls keepalived
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Check the VIP on mon nodes
+  cmd: |
+    SL_VIP=`salt-call --out=newline_values_only pillar.get _param:stacklight_monitor_address`;
+    echo "_param:stacklight_monitor_address (vip): ${SL_VIP}";
+    salt --hard-crash --state-output=mixed --state-verbose=False -C 'mon*' cmd.run "ip a | grep ${SL_VIP}" | grep -B1 ${SL_VIP}
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+# Install slv2 infra
+#Launch containers
+- description: Install Mongo if target matches
+  cmd: |
+    if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' match.pillar 'mongodb:server' ; then
+      salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' state.sls mongodb
+    fi
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Configure Alerta if it is exists
+  cmd: |
+    if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:alerta' match.pillar 'prometheus:alerta' ; then
+      salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:alerta' state.sls prometheus.alerta
+    fi
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: launch prometheus containers
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master and I@prometheus:server' state.sls docker.client
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 10}
+  skip_fail: false
+
+- description: Check docker ps
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:server' cmd.run "docker ps"
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 10}
+  skip_fail: false
+
+- description: Install telegraf
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@telegraf:agent or I@telegraf:remote_agent' state.sls telegraf
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 10}
+  skip_fail: false
+
+- description: Configure Prometheus exporters, if pillar 'prometheus:exporters' exists on any server
+  cmd: |
+    if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' match.pillar 'prometheus:exporters' ; then
+      salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' state.sls prometheus
+    fi
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Install elasticsearch server
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:server' state.sls elasticsearch.server -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Install kibana server
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:server' state.sls kibana.server -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Install elasticsearch client
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:client' state.sls elasticsearch.client
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 30}
+  skip_fail: false
+
+- description: Install kibana client
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:client' state.sls kibana.client
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Check influix db
+  cmd: |
+    INFLUXDB_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' test.ping 1>/dev/null 2>&1 && echo true`;
+    echo "Influxdb service presence: ${INFLUXDB_SERVICE}";
+    if [[ "$INFLUXDB_SERVICE" == "true" ]]; then
+        salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' state.sls influxdb
+    fi
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: true
+
+# Install Prometheus LTS(optional if set in model)
+- description: Prometheus LTS(optional if set in model)
+  cmd: |
+    PROMETHEUS_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:relay' test.ping 1>/dev/null 2>&1 && echo true`;
+    echo "PROMETHEUS rely service presence: ${PROMETHEUS_SERVICE}";
+    if [[ "$PROMETHEUS_SERVICE" == "true" ]]; then
+        salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:relay' state.sls prometheus
+    fi
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: true
+
+# Install service for the log collection
+- description: Configure fluentd
+  cmd: |
+    FLUENTD_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@fluentd:agent' test.ping 1>/dev/null 2>&1 && echo true`;
+    echo "Fluentd service presence: ${FLUENTD_SERVICE}";
+    if [[ "$FLUENTD_SERVICE" == "true" ]]; then
+        salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@fluentd:agent' state.sls fluentd
+    else
+        salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:log_collector' state.sls heka.log_collector
+    fi
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+#Install heka ceilometer collector
+- description: Install heka ceilometer if they exists
+  cmd: |
+    CEILO=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:ceilometer_collector:enabled' test.ping 1>/dev/null 2>&1 && echo true`;
+    echo "Ceilometer service presence: ${CEILO}";
+    if [[ "$CEILO" == "true" ]]; then
+        salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:ceilometer_collector:enabled' state.sls heka.ceilometer_collector;
+        salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:ceilometer_collector:enabled' service.restart ceilometer_collector
+    fi
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+# Collect grains needed to configure the services
+
+- description: Get grains
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' state.sls salt.minion.grains
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Sync modules
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' saltutil.refresh_modules
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Update mine
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' mine.update; sleep 5;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 5, delay: 15}
+  skip_fail: false
+
+# Configure the services running in Docker Swarm
+- description: Configure prometheus in docker swarm
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:server' state.sls prometheus
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Configure Remote Collector in Docker Swarm for Openstack deployments
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:server' state.sls heka.remote_collector
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Install sphinx
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@sphinx:server' state.sls sphinx
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+
+#- description: Install prometheus alertmanager
+#  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls prometheus,heka.remote_collector -b 1
+#  node_name: {{ HOSTNAME_CFG01 }}
+#  retry: {count: 1, delay: 10}
+#  skip_fail: false
+
+#- description: run docker state
+#  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker
+#  node_name: {{ HOSTNAME_CFG01 }}
+#  retry: {count: 1, delay: 10}
+#  skip_fail: false
+#
+#- description: docker ps
+#  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' dockerng.ps
+#  node_name: {{ HOSTNAME_CFG01 }}
+#  retry: {count: 1, delay: 10}
+#  skip_fail: false
+
+- description: Configure Grafana dashboards and datasources
+  cmd: sleep 30;  salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@grafana:client' state.sls grafana.client
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 10}
+  skip_fail: false
+
+- description: Run salt minion to create cert files
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False "*" state.sls salt.minion
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
diff --git a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/underlay--meta-data.yaml b/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/underlay--meta-data.yaml
new file mode 100644
index 0000000..3699401
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/underlay--meta-data.yaml
@@ -0,0 +1,4 @@
+| # All the data below will be stored as a string object
+  instance-id: iid-local1
+  hostname: {hostname}
+  local-hostname: {hostname}
diff --git a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/underlay--user-data-cfg01.yaml
new file mode 100644
index 0000000..646af7a
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/underlay--user-data-cfg01.yaml
@@ -0,0 +1,103 @@
+| # All the data below will be stored as a string object
+  #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+
+  ssh_pwauth: True
+  users:
+   - name: root
+     sudo: ALL=(ALL) NOPASSWD:ALL
+     shell: /bin/bash
+     ssh_authorized_keys:
+     {% for key in config.underlay.ssh_keys %}
+      - ssh-rsa {{ key['public'] }}
+     {% endfor %}
+
+  disable_root: false
+  chpasswd:
+   list: |
+    root:r00tme
+   expire: False
+
+  bootcmd:
+   #   # Block access to SSH while node is preparing
+   #   - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
+   # Enable root access
+   - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+   - sed -i -e '/^PasswordAuthentication/s/^.*$/PasswordAuthentication yes/' /etc/ssh/sshd_config
+   - service sshd restart
+  output:
+    all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
+
+  runcmd:
+   # Configure dhclient
+   - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
+   - sudo resolvconf -u
+
+   # Enable grub menu using updated config below
+   - update-grub
+
+   # Prepare network connection
+   - sudo ifdown ens3
+   - sudo ip r d default || true  # remove existing default route to get it from dhcp
+   - sudo ifup ens3
+   #- sudo route add default gw {gateway} {interface_name}
+
+   # Create swap
+   - fallocate -l 4G /swapfile
+   - chmod 600 /swapfile
+   - mkswap /swapfile
+   - swapon /swapfile
+   - echo "/swapfile   none    swap    defaults    0   0" >> /etc/fstab
+
+   ############## TCP Cloud cfg01 node ##################
+   #- sleep 120
+   #   - echo "Preparing base OS"
+
+   - echo "nameserver 172.17.41.2" > /etc/resolv.conf;
+   #   - which wget >/dev/null || (apt-get update; apt-get install -y wget);
+
+   # Configure Ubuntu mirrors
+   #   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
+   #   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
+   #   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
+
+   #   - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
+   #   - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
+   #   - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;
+   #   - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;
+
+   #   - apt-get clean
+   #   - apt-get update
+
+   # Install common packages
+   #   - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc
+
+   # Install salt-minion and stop it until it is configured
+   #   - eatmydata apt-get install -y salt-minion && service salt-minion stop
+
+   ########################################################
+   # Node is ready, allow SSH access
+   #   - echo "Allow SSH access ..."
+   #   - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
+   ########################################################
+
+  write_files:
+   - path: /etc/default/grub.d/97-enable-grub-menu.cfg
+     content: |
+         GRUB_RECORDFAIL_TIMEOUT=30
+         GRUB_TIMEOUT=3
+         GRUB_TIMEOUT_STYLE=menu
+
+   - path: /etc/network/interfaces
+     content: |
+          auto ens3
+          iface ens3 inet dhcp
+
+   - path: /root/.ssh/config
+     owner: root:root
+     permissions: '0600'
+     content: |
+          Host *
+            ServerAliveInterval 300
+            ServerAliveCountMax 10
+            StrictHostKeyChecking no
+            UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/underlay--user-data1604-hwe-compute.yaml b/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/underlay--user-data1604-hwe-compute.yaml
new file mode 100644
index 0000000..a8981c7
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/underlay--user-data1604-hwe-compute.yaml
@@ -0,0 +1,104 @@
+| # All the data below will be stored as a string object
+  #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+
+  ssh_pwauth: True
+  users:
+   - name: root
+     sudo: ALL=(ALL) NOPASSWD:ALL
+     shell: /bin/bash
+     ssh_authorized_keys:
+     {% for key in config.underlay.ssh_keys %}
+      - ssh-rsa {{ key['public'] }}
+     {% endfor %}
+
+  disable_root: false
+  chpasswd:
+   list: |
+    root:r00tme
+   expire: False
+
+  bootcmd:
+   #   # Block access to SSH while node is preparing
+   #   - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
+   # Enable root access
+   - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+   # - sed -i -e '/^PasswordAuthentication/s/^.*$/PasswordAuthentication yes/' /etc/ssh/sshd_config
+   - service sshd restart
+  output:
+    all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
+
+  runcmd:
+   - export TERM=linux
+   - export LANG=C
+   # Configure dhclient
+   - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
+   - sudo resolvconf -u
+
+   # Enable grub menu using updated config below
+   - update-grub
+
+   # Prepare network connection
+   - sudo ifup {interface_name}
+   #- sudo route add default gw {gateway} {interface_name}
+
+   # Create swap
+   - fallocate -l 4G /swapfile
+   - chmod 600 /swapfile
+   - mkswap /swapfile
+   - swapon /swapfile
+   - echo "/swapfile   none    swap    defaults   0   0" >> /etc/fstab
+
+
+   ############## TCP Cloud cfg01 node ##################
+   #- sleep 120
+   #   - echo "Preparing base OS"
+   - echo "nameserver 172.17.41.2" > /etc/resolv.conf;
+   #   - which wget >/dev/null || (apt-get update; apt-get install -y wget)
+
+   # Configure Ubuntu mirrors
+   #   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
+   #   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
+   #   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
+
+   #   - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
+   #   - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
+   #   - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;
+   #   - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;
+
+   #   - apt-get clean
+   #   - eatmydata apt-get update && apt-get -y upgrade
+
+   # Install common packages
+   #   - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc
+
+   # Install salt-minion and stop it until it is configured
+   #   - eatmydata apt-get install -y salt-minion && service salt-minion stop
+
+   # Install latest kernel
+   #   - eatmydata apt-get install -y {{ os_env('LINUX_KERNEL_HWE_PACKAGE_NAME', 'linux-image-extra-4.10.0-42-generic') }}
+
+   # Register compute node in salt master
+   # - salt-call event.send "reclass/minion/classify" "{{ "{{" }}\"node_master_ip\": \"{{ ETH0_IP_ADDRESS_CFG01 }}\", \"node_os\": \"xenial\", \"node_domain\": \"{{ DOMAIN_NAME }}\", \"node_cluster\": \"{{ LAB_CONFIG_NAME }}\"{{ "}}" }}"
+
+
+   ########################################################
+   # Node is ready, allow SSH access
+   #- echo "Allow SSH access ..."
+   #- sudo iptables -D INPUT -p tcp --dport 22 -j DROP
+   #   - reboot
+   ########################################################
+
+  write_files:
+   - path: /etc/default/grub.d/97-enable-grub-menu.cfg
+     content: |
+         GRUB_RECORDFAIL_TIMEOUT=30
+         GRUB_TIMEOUT=3
+         GRUB_TIMEOUT_STYLE=menu
+
+   - path: /etc/network/interfaces
+     content: |
+          # The loopback network interface
+          auto lo
+          iface lo inet loopback
+          auto {interface_name}
+          iface {interface_name} inet dhcp
diff --git a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/underlay--user-data1604.yaml b/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/underlay--user-data1604.yaml
new file mode 100644
index 0000000..bb7056a
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/underlay--user-data1604.yaml
@@ -0,0 +1,95 @@
+| # All the data below will be stored as a string object
+  #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+
+  ssh_pwauth: True
+  users:
+   - name: root
+     sudo: ALL=(ALL) NOPASSWD:ALL
+     shell: /bin/bash
+     ssh_authorized_keys:
+     {% for key in config.underlay.ssh_keys %}
+      - ssh-rsa {{ key['public'] }}
+     {% endfor %}
+
+  disable_root: false
+  chpasswd:
+   list: |
+    root:r00tme
+   expire: False
+
+  bootcmd:
+   # Block access to SSH while node is preparing
+   #   - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
+   # Enable root access
+   - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+   - service sshd restart
+  output:
+    all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
+
+  runcmd:
+   - export TERM=linux
+   - export LANG=C
+   # Configure dhclient
+   - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
+   - sudo resolvconf -u
+
+   # Enable grub menu using updated config below
+   - update-grub
+
+   # Prepare network connection
+   - sudo ifup {interface_name}
+   #- sudo route add default gw {gateway} {interface_name}
+
+   # Create swap
+   - fallocate -l 4G /swapfile
+   - chmod 600 /swapfile
+   - mkswap /swapfile
+   - swapon /swapfile
+   - echo "/swapfile   none    swap    defaults   0   0" >> /etc/fstab
+
+
+   ############## TCP Cloud cfg01 node ##################
+   #- sleep 120
+   #   - echo "Preparing base OS"
+   - echo "nameserver 172.17.41.2" > /etc/resolv.conf;
+   #   - which wget >/dev/null || (apt-get update; apt-get install -y wget)
+
+   # Configure Ubuntu mirrors
+   #   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
+   #   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
+   #   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
+
+   #   - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
+   #   - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
+   #   - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;
+   #   - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;
+
+   #   - apt-get clean
+   #   - eatmydata apt-get update && apt-get -y upgrade
+
+   # Install common packages
+   #   - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc
+
+   # Install salt-minion and stop it until it is configured
+   #   - eatmydata apt-get install -y salt-minion && service salt-minion stop
+
+   ########################################################
+   # Node is ready, allow SSH access
+   #   - echo "Allow SSH access ..."
+   #   - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
+   ########################################################
+
+  write_files:
+   - path: /etc/default/grub.d/97-enable-grub-menu.cfg
+     content: |
+         GRUB_RECORDFAIL_TIMEOUT=30
+         GRUB_TIMEOUT=3
+         GRUB_TIMEOUT_STYLE=menu
+
+   - path: /etc/network/interfaces
+     content: |
+          # The loopback network interface
+          auto lo
+          iface lo inet loopback
+          auto {interface_name}
+          iface {interface_name} inet dhcp
diff --git a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/underlay.yaml b/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/underlay.yaml
new file mode 100644
index 0000000..ef8c4f1
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/underlay.yaml
@@ -0,0 +1,538 @@
+# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
+{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
+
+#{% set DOMAIN_NAME = os_env('LAB_CONFIG_NAME', 'physical_mcp11_ovs_dpdk') + '.local' %}
+{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-bm-mcp-pike-k8s-contrail') %}
+{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME + '.local') %}
+{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_KVM01 = os_env('HOSTNAME_KVM01', 'kvm01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_KVM02 = os_env('HOSTNAME_KVM02', 'kvm02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_KVM03 = os_env('HOSTNAME_KVM03', 'kvm03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP001 = os_env('HOSTNAME_CMP001', 'cmp001.' + DOMAIN_NAME) %}
+{#
+{% set HOSTNAME_CMP002 = os_env('HOSTNAME_CMP002', 'cmp002.' + DOMAIN_NAME) %}
+{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_GTW02 = os_env('HOSTNAME_GTW02', 'gtw02.' + DOMAIN_NAME) %}
+#}
+{% set ETH0_IP_ADDRESS_CFG01 = os_env('ETH0_IP_ADDRESS_CFG01', '172.17.41.3') %}
+{% set ETH0_IP_ADDRESS_KVM01 = os_env('ETH0_IP_ADDRESS_KVM01', '172.17.41.4') %}
+{% set ETH0_IP_ADDRESS_KVM02 = os_env('ETH0_IP_ADDRESS_KVM02', '172.17.41.5') %}
+{% set ETH0_IP_ADDRESS_KVM03 = os_env('ETH0_IP_ADDRESS_KVM03', '172.17.41.6') %}
+{% set ETH0_IP_ADDRESS_CMP001 = os_env('ETH0_IP_ADDRESS_CMP001', '172.17.41.7') %}
+{#
+# {% set ETH0_IP_ADDRESS_CMP002 = os_env('ETH0_IP_ADDRESS_CMP002', '172.16.49.74') %}
+# {% set ETH0_IP_ADDRESS_CMP003 = os_env('ETH0_IP_ADDRESS_CMP003', '172.16.167.140') %}
+#}
+{#
+# {% set ETH0_IP_ADDRESS_PRX01 = os_env('ETH0_IP_ADDRESS_PRX01', '172.17.41.8') %}
+# {% set ETH0_IP_ADDRESS_GTW01 = os_env('ETH0_IP_ADDRESS_GTW01', '172.16.49.5') %}
+# {% set ETH0_IP_ADDRESS_GTW02 = os_env('ETH0_IP_ADDRESS_GTW02', '172.16.49.4') %}
+#}
+{% import 'cookied-bm-mcp-pike-k8s-contrail/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
+{% import 'cookied-bm-mcp-pike-k8s-contrail/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
+{% import 'cookied-bm-mcp-pike-k8s-contrail/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA with context %}
+{% import 'cookied-bm-mcp-pike-k8s-contrail/underlay--user-data1604-hwe-compute.yaml' as CLOUDINIT_USER_DATA_HWE_CMP with context %}
+
+---
+aliases:
+ - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
+ - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
+ - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
+ - &cloudinit_user_data {{ CLOUDINIT_USER_DATA }}
+ - &cloudinit_user_data_hwe_cmp {{ CLOUDINIT_USER_DATA_HWE_CMP }}
+
+
+template:
+  devops_settings:
+    env_name: {{ os_env('ENV_NAME', 'cookied-bm-mcp-pike-k8s-contrail_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
+
+    address_pools:
+      admin-pool01:
+        net: {{ os_env('ADMIN_ADDRESS_POOL01', '172.17.41.0/26:26') }}
+        params:
+          ip_reserved:
+            gateway: +1
+            l2_network_device: +61
+            default_{{ HOSTNAME_CFG01 }}: {{ ETH0_IP_ADDRESS_CFG01 }}
+            default_{{ HOSTNAME_KVM01 }}: {{ ETH0_IP_ADDRESS_KVM01 }}
+            default_{{ HOSTNAME_KVM02 }}: {{ ETH0_IP_ADDRESS_KVM02 }}
+            default_{{ HOSTNAME_KVM03 }}: {{ ETH0_IP_ADDRESS_KVM03 }}
+            default_{{ HOSTNAME_CMP001 }}: {{ ETH0_IP_ADDRESS_CMP001 }}
+            #default_{{ HOSTNAME_PRX01 }}: {{ ETH0_IP_ADDRESS_PRX01 }}
+
+            virtual_{{ HOSTNAME_CFG01 }}: {{ ETH0_IP_ADDRESS_CFG01 }}
+            virtual_{{ HOSTNAME_KVM01 }}: {{ ETH0_IP_ADDRESS_KVM01 }}
+            virtual_{{ HOSTNAME_KVM02 }}: {{ ETH0_IP_ADDRESS_KVM02 }}
+            virtual_{{ HOSTNAME_KVM03 }}: {{ ETH0_IP_ADDRESS_KVM03 }}
+            virtual_{{ HOSTNAME_CMP001 }}: {{ ETH0_IP_ADDRESS_CMP001 }}
+            #virtual_{{ HOSTNAME_PRX01 }}: {{ ETH0_IP_ADDRESS_PRX01 }}
+          #ip_ranges:
+          #    dhcp: [+2, -4]
+      private-pool01:
+        net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.167.13.0/24:24') }}
+        params:
+          ip_reserved:
+            gateway: +1
+            l2_network_device: +1
+
+      tenant-pool01:
+        net: {{ os_env('TENANT_ADDRESS_POOL01', '10.167.14.0/24:24') }}
+        params:
+          ip_reserved:
+            gateway: +1
+            l2_network_device: +1
+
+      external-pool01:
+        net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '172.17.41.128/26:26') }}
+        params:
+          ip_reserved:
+            gateway: +1
+            l2_network_device: -2
+
+    groups:
+
+      - name: virtual
+        driver:
+          name: devops.driver.libvirt
+          params:
+            connection_string: !os_env CONNECTION_STRING, qemu:///system
+            storage_pool_name: !os_env STORAGE_POOL_NAME, default
+            stp: False
+            hpet: False
+            enable_acpi: true
+            use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
+
+        network_pools:
+          admin: admin-pool01
+
+        l2_network_devices:
+          # Ironic management interface
+          admin:
+            address_pool: admin-pool01
+            dhcp: false
+            parent_iface:
+              phys_dev: !os_env IRONIC_LAB_PXE_IFACE_0
+
+        group_volumes:
+         - name: cloudimage1604    # This name is used for 'backing_store' option for node volumes.
+           source_image: !os_env IMAGE_PATH1604  # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
+           format: qcow2
+         - name: cfg01_day01_image               # Pre-configured day01 image
+           source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01', os_env('IMAGE_PATH1604')) }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
+           format: qcow2
+
+        nodes:
+          - name: {{ HOSTNAME_CFG01 }}
+            role: salt_master
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 4
+              memory: !os_env SLAVE_NODE_MEMORY, 8192
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: cfg01_day01_image
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_cfg01
+
+              interfaces:
+                - label: ens3
+                  l2_network_device: admin
+                  interface_model: *interface_model
+                  mac_address: !os_env ETH0_MAC_ADDRESS_CFG01
+                #- label: ens4
+                #  l2_network_device: private
+                #  interface_model: *interface_model
+              network_config:
+                ens3:
+                  networks:
+                    - admin
+                #ens4:
+                #  networks:
+                #    - private
+
+
+      - name: default
+        driver:
+          name: devops_driver_ironic
+          params:
+            os_auth_token: fake-token
+            ironic_url: !os_env IRONIC_URL  # URL that will be used by fuel-devops
+                                            # to access Ironic API
+            # Agent URL that is accessible from deploying node when nodes
+            # are bootstrapped with PXE. Usually PXE/provision network address is used.
+            agent_kernel_url: !os_env IRONIC_AGENT_KERNEL_URL
+            agent_ramdisk_url: !os_env IRONIC_AGENT_RAMDISK_URL
+
+        network_pools:
+          admin: admin-pool01
+
+        nodes:
+
+        #  - name: {{ HOSTNAME_CFG01 }}
+        #    role: salt_master
+        #    params:
+        #      ipmi_user: !os_env IPMI_USER
+        #      ipmi_password: !os_env IPMI_PASSWORD
+        #      ipmi_previlegies: OPERATOR
+        #      ipmi_host: !os_env IPMI_HOST_CFG01  # hostname or IP address
+        #      ipmi_lan_interface: lanplus
+        #      ipmi_port: 623
+
+        #      root_volume_name: system     # see 'volumes' below
+        #      cloud_init_volume_name: iso  # see 'volumes' below
+        #      cloud_init_iface_up: enp3s0f1  # see 'interfaces' below.
+        #      volumes:
+        #        - name: system
+        #          capacity: !os_env NODE_VOLUME_SIZE, 200
+
+        #          # The same as for agent URL, here is an URL to the image that should be
+        #          # used for deploy the node. It should also be accessible from deploying
+        #          # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
+        #          source_image: !os_env IRONIC_SOURCE_IMAGE_URL
+        #          source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
+
+        #        - name: iso  # Volume with name 'iso' will be used
+        #                     # for store image with cloud-init metadata.
+
+        #          cloudinit_meta_data: *cloudinit_meta_data
+        #          cloudinit_user_data: *cloudinit_user_data_cfg01
+
+        #      interfaces:
+        #        - label: enp3s0f0  # Infra interface
+        #          mac_address: !os_env ETH0_MAC_ADDRESS_CFG01
+        #        - label: enp3s0f1
+        #          l2_network_device: admin
+        #          mac_address: !os_env ETH1_MAC_ADDRESS_CFG01
+
+        #      network_config:
+        #        enp3s0f0:
+        #          networks:
+        #           - infra
+        #        enp3s0f1:
+        #          networks:
+        #           - admin
+          # - name: {{ HOSTNAME_PRX01 }}
+          #   role: salt_minion
+          #   params:
+          #     ipmi_user: !os_env IPMI_USER
+          #     ipmi_password: !os_env IPMI_PASSWORD
+          #     ipmi_previlegies: OPERATOR
+          #     ipmi_host: !os_env IPMI_HOST_PRX01  # hostname or IP address
+          #     ipmi_lan_interface: lanplus
+          #     ipmi_port: 623
+
+          #     root_volume_name: system     # see 'volumes' below
+          #     cloud_init_volume_name: iso  # see 'volumes' below
+          #     cloud_init_iface_up: enp9s0f0  # see 'interfaces' below.
+          #     volumes:
+          #       - name: system
+          #         capacity: !os_env NODE_VOLUME_SIZE, 200
+
+          #         # The same as for agent URL, here is an URL to the image that should be
+          #         # used for deploy the node. It should also be accessible from deploying
+          #         # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
+          #         source_image: !os_env IRONIC_SOURCE_IMAGE_URL
+          #         source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
+
+          #       - name: iso  # Volume with name 'iso' will be used
+          #                    # for store image with cloud-init metadata.
+
+          #         cloudinit_meta_data: *cloudinit_meta_data
+          #         cloudinit_user_data: *cloudinit_user_data
+
+          #     interfaces:
+          #       - label: enp9s0f0
+          #         l2_network_device: admin
+          #         mac_address: !os_env ETH0_MAC_ADDRESS_PRX01
+          #       - label: enp9s0f1
+          #         mac_address: !os_env ETH1_MAC_ADDRESS_PRX01
+
+          #     network_config:
+          #       enp9s0f0:
+          #         networks:
+          #          - admin
+          #       bond0:
+          #         networks:
+          #          - control
+          #         aggregation: active-backup
+          #         parents:
+          #          - enp9s0f1
+
+          - name: {{ HOSTNAME_KVM01 }}
+            role: salt_minion
+            params:
+              ipmi_user: !os_env IPMI_USER
+              ipmi_password: !os_env IPMI_PASSWORD
+              ipmi_previlegies: OPERATOR
+              ipmi_host: !os_env IPMI_HOST_KVM01  # hostname or IP address
+              ipmi_lan_interface: lanplus
+              ipmi_port: 623
+
+              root_volume_name: system     # see 'volumes' below
+              cloud_init_volume_name: iso  # see 'volumes' below
+              cloud_init_iface_up: enp9s0f0  # see 'interfaces' below.
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 200
+
+                  # The same as for agent URL, here is an URL to the image that should be
+                  # used for deploy the node. It should also be accessible from deploying
+                  # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
+                  source_image: !os_env IRONIC_SOURCE_IMAGE_URL
+                  source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
+
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data
+
+              interfaces:
+                - label: enp9s0f0
+                  l2_network_device: admin
+                  mac_address: !os_env ETH0_MAC_ADDRESS_KVM01
+                - label: enp9s0f1
+                  mac_address: !os_env ETH1_MAC_ADDRESS_KVM01
+
+              network_config:
+                enp9s0f0:
+                  networks:
+                   - admin
+                bond0:
+                  networks:
+                   - control
+                  aggregation: active-backup
+                  parents:
+                   - enp9s0f1
+
+          - name: {{ HOSTNAME_KVM02 }}
+            role: salt_minion
+            params:
+              ipmi_user: !os_env IPMI_USER
+              ipmi_password: !os_env IPMI_PASSWORD
+              ipmi_previlegies: OPERATOR
+              ipmi_host: !os_env IPMI_HOST_KVM02  # hostname or IP address
+              ipmi_lan_interface: lanplus
+              ipmi_port: 623
+
+              root_volume_name: system     # see 'volumes' below
+              cloud_init_volume_name: iso  # see 'volumes' below
+              cloud_init_iface_up: enp9s0f0  # see 'interfaces' below.
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 200
+
+                  # The same as for agent URL, here is an URL to the image that should be
+                  # used for deploy the node. It should also be accessible from deploying
+                  # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
+                  source_image: !os_env IRONIC_SOURCE_IMAGE_URL
+                  source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
+
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data
+
+              interfaces:
+                - label: enp9s0f0
+                  l2_network_device: admin
+                  mac_address: !os_env ETH0_MAC_ADDRESS_KVM02
+                - label: enp9s0f1
+                  mac_address: !os_env ETH1_MAC_ADDRESS_KVM02
+
+              network_config:
+                enp9s0f0:
+                  networks:
+                   - admin
+                bond0:
+                  networks:
+                   - control
+                  aggregation: active-backup
+                  parents:
+                   - enp9s0f1
+
+          - name: {{ HOSTNAME_KVM03 }}
+            role: salt_minion
+            params:
+              ipmi_user: !os_env IPMI_USER
+              ipmi_password: !os_env IPMI_PASSWORD
+              ipmi_previlegies: OPERATOR
+              ipmi_host: !os_env IPMI_HOST_KVM03  # hostname or IP address
+              ipmi_lan_interface: lanplus
+              ipmi_port: 623
+
+              root_volume_name: system     # see 'volumes' below
+              cloud_init_volume_name: iso  # see 'volumes' below
+              # cloud_init_iface_up: eno1  # see 'interfaces' below.
+              cloud_init_iface_up: enp9s0f0  # see 'interfaces' below.
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 200
+
+                  # The same as for agent URL, here is an URL to the image that should be
+                  # used for deploy the node. It should also be accessible from deploying
+                  # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
+                  source_image: !os_env IRONIC_SOURCE_IMAGE_URL
+                  source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
+
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data
+
+              interfaces:
+                # - label: eno1
+                - label: enp9s0f0
+                  l2_network_device: admin
+                  mac_address: !os_env ETH0_MAC_ADDRESS_KVM03
+                # - label: eno2
+                - label: enp9s0f1
+                  mac_address: !os_env ETH1_MAC_ADDRESS_KVM03
+
+              network_config:
+                # eno1:
+                enp9s0f0:
+                  networks:
+                   - admin
+                bond0:
+                  networks:
+                   - control
+                  aggregation: active-backup
+                  parents:
+                   - enp9s0f1
+
+
+          - name: {{ HOSTNAME_CMP001 }}
+            role: salt_minion
+            params:
+              ipmi_user: !os_env IPMI_USER
+              ipmi_password: !os_env IPMI_PASSWORD
+              ipmi_previlegies: OPERATOR
+              ipmi_host: !os_env IPMI_HOST_CMP001  # hostname or IP address
+              ipmi_lan_interface: lanplus
+              ipmi_port: 623
+
+              root_volume_name: system     # see 'volumes' below
+              cloud_init_volume_name: iso  # see 'volumes' below
+              # cloud_init_iface_up: enp3s0f0  # see 'interfaces' below.
+              cloud_init_iface_up: enp9s0f0  # see 'interfaces' below.
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 200
+
+                  # The same as for agent URL, here is an URL to the image that should be
+                  # used for deploy the node. It should also be accessible from deploying
+                  # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
+                  source_image: !os_env IRONIC_SOURCE_IMAGE_URL
+                  source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
+
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_hwe_cmp
+
+              interfaces:
+                - label: enp9s0f0
+                  l2_network_device: admin
+                  mac_address: !os_env ETH0_MAC_ADDRESS_CMP001
+                - label: enp9s0f1
+                  mac_address: !os_env ETH1_MAC_ADDRESS_CMP001
+                # - label: enp5s0f0
+                #   mac_address: !os_env ETH2_MAC_ADDRESS_CMP001
+                #   features: ['dpdk', 'dpdk_pci: 0000:05:00.0']
+                # - label: enp5s0f1
+                #   mac_address: !os_env ETH3_MAC_ADDRESS_CMP001
+                #   features: ['dpdk', 'dpdk_pci: 0000:05:00.1']
+                # - label: enp5s0f2
+                #   mac_address: !os_env ETH4_MAC_ADDRESS_CMP001
+                #   features: ['dpdk', 'dpdk_pci: 0000:05:00.2']
+
+              network_config:
+                enp9s0f0:
+                  networks:
+                   - admin
+                bond0:
+                  networks:
+                   - control
+                  aggregation: active-backup
+                  parents:
+                   - enp9s0f1
+
+
+
+          # - name: {{ HOSTNAME_CMP002 }}
+          #   role: salt_minion
+          #   params:
+          #     ipmi_user: !os_env IPMI_USER
+          #     ipmi_password: !os_env IPMI_PASSWORD
+          #     ipmi_previlegies: OPERATOR
+          #     ipmi_host: !os_env IPMI_HOST_CMP002  # hostname or IP address
+          #     ipmi_lan_interface: lanplus
+          #     ipmi_port: 623
+
+          #     root_volume_name: system     # see 'volumes' below
+          #     cloud_init_volume_name: iso  # see 'volumes' below
+          #     # cloud_init_iface_up: eno1  # see 'interfaces' below.
+          #     cloud_init_iface_up: enp2s0f1  # see 'interfaces' below.
+          #     volumes:
+          #       - name: system
+          #         capacity: !os_env NODE_VOLUME_SIZE, 200
+
+          #         # The same as for agent URL, here is an URL to the image that should be
+          #         # used for deploy the node. It should also be accessible from deploying
+          #         # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
+          #         source_image: !os_env IRONIC_SOURCE_IMAGE_URL
+          #         source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
+
+          #       - name: iso  # Volume with name 'iso' will be used
+          #                    # for store image with cloud-init metadata.
+
+          #         cloudinit_meta_data: *cloudinit_meta_data
+          #         cloudinit_user_data: *cloudinit_user_data_hwe
+
+          #     interfaces:
+          #       # - label: eno1
+          #       - label: enp2s0f0
+          #         mac_address: !os_env ETH0_MAC_ADDRESS_CMP002
+          #       # - label: eth0
+          #       - label: enp2s0f1
+          #         l2_network_device: admin
+          #         mac_address: !os_env ETH1_MAC_ADDRESS_CMP002
+          #       # - label: eth3
+          #       - label: enp5s0f0
+          #         mac_address: !os_env ETH2_MAC_ADDRESS_CMP002
+          #         features: ['dpdk', 'dpdk_pci: 0000:05:00.0']
+          #       # - label: eth2
+          #       - label: enp5s0f1
+          #         mac_address: !os_env ETH3_MAC_ADDRESS_CMP002
+          #         features: ['dpdk', 'dpdk_pci: 0000:05:00.1']
+          #       # - label: eth4
+          #       #   mac_address: !os_env ETH4_MAC_ADDRESS_CMP002
+          #       #   features: ['dpdk', 'dpdk_pci: 0000:0b:00.0']
+
+          #     network_config:
+          #       enp2s0f1:
+          #         networks:
+          #          - admin
+          #       bond0:
+          #         networks:
+          #          - control
+          #         aggregation: active-backup
+          #         parents:
+          #          - enp5s0f0
+          #          - enp5s0f1
diff --git a/tcp_tests/templates/shared-salt.yaml b/tcp_tests/templates/shared-salt.yaml
index 9a2bcd9..36d68bb 100644
--- a/tcp_tests/templates/shared-salt.yaml
+++ b/tcp_tests/templates/shared-salt.yaml
@@ -958,7 +958,7 @@
 - description: Update minion information
   cmd: |
     salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.sync_grains &&
-    salt --hard-crash --state-output=mixed --state-verbose=False '*' mine.update && sleep 15
+    salt --hard-crash --state-output=mixed --state-verbose=False '*' mine.update && sleep 60
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
@@ -1356,4 +1356,4 @@
   retry: {count: 1, delay: 5}
   skip_fail: false
 
-{%- endmacro %}
\ No newline at end of file
+{%- endmacro %}