Remove old cookied* templates

cookied-* templates are relate to 2018.11.0 MCP

Change-Id: I472397c7394fcfbb317842c2652629c00b5683e3
Related-Prod: PROD-34559
diff --git a/tcp_tests/templates/cookied-bm-k8s-contrail/core.yaml b/tcp_tests/templates/cookied-bm-k8s-contrail/core.yaml
deleted file mode 100644
index 2d79d55..0000000
--- a/tcp_tests/templates/cookied-bm-k8s-contrail/core.yaml
+++ /dev/null
@@ -1,68 +0,0 @@
-{% from 'cookied-bm-k8s-contrail/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-bm-k8s-contrail/underlay.yaml' import HOSTNAME_KVM01 with context %}
-{% from 'cookied-bm-k8s-contrail/underlay.yaml' import HOSTNAME_KVM02 with context %}
-{% from 'cookied-bm-k8s-contrail/underlay.yaml' import HOSTNAME_KVM03 with context %}
-{% from 'cookied-bm-k8s-contrail/underlay.yaml' import HOSTNAME_CFG01 with context %}
-
-# Install support services
-- description: Sync all
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.sync_all
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Create and distribute SSL certificates for services using salt state
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' state.sls salt -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 5}
-  skip_fail: true
-
-- description: Install docker
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@docker:host' state.sls docker.host
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check docker
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@docker:host' cmd.run 'docker ps'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description:  Install keepalived on primary controller
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keepalived:cluster and *01*' state.sls keepalived
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: true
-
-- description:  Install keepalived
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keepalived:cluster' state.sls keepalived
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: true
-
-- description: Install haproxy
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' state.sls haproxy
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check haproxy service
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' service.status haproxy
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Restart rsyslog
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' service.restart rsyslog
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
diff --git a/tcp_tests/templates/cookied-bm-k8s-contrail/k8s.yaml b/tcp_tests/templates/cookied-bm-k8s-contrail/k8s.yaml
deleted file mode 100644
index c505c58..0000000
--- a/tcp_tests/templates/cookied-bm-k8s-contrail/k8s.yaml
+++ /dev/null
@@ -1,278 +0,0 @@
-{% from 'cookied-bm-k8s-contrail/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-bm-k8s-contrail/underlay.yaml' import HOSTNAME_CTL01 with context %}
-
-{%- macro MACRO_CHECK_SYSTEMCTL() %}
-{#######################################}
-- description: Check systemctl on compute
-  cmd: |
-    set -ex;
-    salt 'cmp*' cmd.run "systemctl --version";
-    salt 'cmp*' cmd.run "journalctl -u dbus";
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 1}
-  skip_fail: true
-{%- endmacro %}
-
-- description:  Install keepalived on primary controller
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keepalived:cluster and *01*' state.sls keepalived
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-- description:  Install keepalived
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keepalived:cluster' state.sls keepalived
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-- description: Install haproxy
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' state.sls haproxy
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install etcd
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@etcd:server' state.sls etcd.server.service
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 3, delay: 30}
-  skip_fail: false
-
-- description: Install certs
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@etcd:server' state.sls salt.minion -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 5}
-  skip_fail: false
-
-# Install opencontrail database services
-- description: Install opencontrail database services for 01
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@opencontrail:database and *01*' state.sls opencontrail.database
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 30}
-  skip_fail: false
-
-- description: Install opencontrail database services
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@opencontrail:database' state.sls opencontrail.database
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 30}
-  skip_fail: false
-
-# Install opencontrail control services
-- description: Install opencontrail services for 01
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@opencontrail:control and *01*' state.sls opencontrail exclude=opencontrail.client
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 30}
-  skip_fail: false
-
-- description: Install opencontrail services
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@opencontrail:control' state.sls opencontrail exclude=opencontrail.client
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 30}
-  skip_fail: false
-
-- description: Install docker host
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@kubernetes:master' state.sls salt.minion.cert
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 30}
-  skip_fail: false
-
-- description: Install docker host
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@docker:host' state.sls docker.host
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 30}
-  skip_fail: false
-
-  #- description: Configure OpenContrail as an add-on for Kubernetes
-  #  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-  #    -C 'I@kubernetes:master' state.sls kubernetes.master.kube-addons
-  #  node_name: {{ HOSTNAME_CFG01 }}
-  #  retry: {count: 1, delay: 5}
-  #  skip_fail: false
-
-- description: Install Kubernetes components
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@kubernetes:master' state.sls kubernetes.pool
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: "Run k8s master at *01* to simplify namespaces creation"
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-     -C 'I@kubernetes:master and *01*' state.sls kubernetes.master exclude=kubernetes.master.setup,kubernetes.master.kube-addons
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 15}
-  skip_fail: false
-
-- description: Run k8s without master.setup
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-     -C 'I@kubernetes:master' state.sls kubernetes exclude=kubernetes.master.setup,kubernetes.master.kube-addons
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 15}
-  skip_fail: false
-
-- description: Check the etcd health
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-     -C 'I@etcd:server' cmd.run '. /var/lib/etcd/configenv && etcdctl cluster-health'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-{{ MACRO_CHECK_SYSTEMCTL() }}
-
-- description: Run Kubernetes master setup
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-     -C 'I@kubernetes:master' state.sls kubernetes.master.setup
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Restart Kubelet
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@kubernetes:master' service.restart 'kubelet'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Waiting for contrail-containers up. opencontrail.client state should be run only after that
-  cmd: |
-    sleep 30;
-    total_pods=`kubectl get pods --all-namespaces | awk '/opencontrail/ {print $3}' | cut -d "/" -f2`
-    for i in `seq 1 10`; do
-      ready_pods=`kubectl get pods --all-namespaces | awk '/opencontrail/ {print $3}' | cut -d "/" -f1`
-      if [ "$ready_pods" == "$total_pods" ];then
-        echo "containers are ready. Going to the next step"
-        break
-      elif [ "$i" -ne "10" ]; then
-        echo "Opencontrail containers is not ready. $ready_pods from $total_pods is ready."
-        sleep 60
-        continue
-      else
-        echo "Failed to up contrail containers in 10 minutes"
-        exit 1
-      fi
-    done
-  node_name: {{ HOSTNAME_CTL01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: true
-
-- description: Check all pods
-  cmd: |
-     salt 'ctl*' cmd.run "kubectl -o wide get pods --all-namespaces";
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-# Install contrail computes
-- description: Set up the OpenContrail resources
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@opencontrail:database:id:1' state.sls opencontrail.client
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 3, delay: 60}
-  skip_fail: false
-
-- description: Apply opencontrail.client on contrail computes
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@opencontrail:compute' state.sls opencontrail exclude=opencontrail.client
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 5}
-  skip_fail: false
-
-- description: Reboot contrail computes
-  cmd: |
-    salt --async -C 'I@opencontrail:compute' system.reboot;
-    sleep 450;
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: true
-
-- description: Apply opencontrail.client on contrail computes
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@opencontrail:compute' state.sls opencontrail.client
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 5}
-  skip_fail: false
-
-- description: Apply opencontrail.client on contrail computes
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@opencontrail:compute' state.sls opencontrail
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Refresh pillars on cmp*
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'cmp*' saltutil.refresh_pillar
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Sync all on contrail computes
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@opencontrail:compute' saltutil.sync_all
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install docker host
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@kubernetes:master' state.sls salt.minion.cert
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 30}
-  skip_fail: false
-
-- description: Install Kubernetes components
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@kubernetes:pool and not I@kubernetes:master' state.sls kubernetes.pool
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Restart Kubelet
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@kubernetes:pool and not I@kubernetes:master' service.restart 'kubelet'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Configure OpenContrail as an add-on for Kubernetes
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@kubernetes:master' state.sls kubernetes.master.kube-addons
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Renew hosts file on a whole cluster
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C '*' state.sls linux.network.host;
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Final check all pods
-  cmd: |
-     sleep 60;
-     salt 'ctl*' cmd.run "kubectl -o wide get pods --all-namespaces";
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check contrail status on all pods
-  cmd: |
-     pods=`kubectl get pods --all-namespaces | awk '/opencontrail/ {print $2}'`
-     for i in $pods; do
-       kubectl exec $i -c opencontrail-controller -n kube-system contrail-status;
-       kubectl exec $i -c opencontrail-analytics -n kube-system contrail-status;
-       kubectl exec $i -c opencontrail-analyticsdb -n kube-system contrail-status;
-     done
-  node_name: {{ HOSTNAME_CTL01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
diff --git a/tcp_tests/templates/cookied-bm-k8s-contrail/lab04-upgrade-physical-inventory.yaml b/tcp_tests/templates/cookied-bm-k8s-contrail/lab04-upgrade-physical-inventory.yaml
deleted file mode 100644
index ad4e04a..0000000
--- a/tcp_tests/templates/cookied-bm-k8s-contrail/lab04-upgrade-physical-inventory.yaml
+++ /dev/null
@@ -1,119 +0,0 @@
-nodes:
-    cfg01.bm-k8s-contrail.local:
-      reclass_storage_name: infra_config_node01
-      roles:
-      - infra_config
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-    # Physical nodes
-
-    kvm01.bm-k8s-contrail.local:
-      reclass_storage_name: infra_kvm_node01
-      roles:
-      - infra_kvm
-      - linux_system_codename_xenial
-      interfaces:
-        enp9s0f0:
-          role: single_mgm
-        enp9s0f1:
-          role: single_vlan_ctl
-
-    kvm02.bm-k8s-contrail.local:
-      reclass_storage_name: infra_kvm_node02
-      roles:
-      - infra_kvm
-      - linux_system_codename_xenial
-      interfaces:
-        enp9s0f0:
-          role: single_mgm
-        enp9s0f1:
-          role: single_vlan_ctl
-
-    kvm03.bm-k8s-contrail.local:
-      reclass_storage_name: infra_kvm_node03
-      roles:
-      - infra_kvm
-      - linux_system_codename_xenial
-      interfaces:
-        enp9s0f0:
-          role: single_mgm
-        enp9s0f1:
-          role: single_vlan_ctl
-
-    ctl01.bm-k8s-contrail.local:
-      reclass_storage_name: kubernetes_control_node01
-      roles:
-      - kubernetes_control_contrail
-      - linux_system_codename_xenial
-      interfaces:
-        enp2s0f0:
-          role: single_mgm
-          deploy_address: 172.17.41.9
-        enp2s0f1:
-          role: single_vlan_ctl
-          single_address: 10.167.8.239
-
-    ctl02.bm-k8s-contrail.local:
-      reclass_storage_name: kubernetes_control_node02
-      roles:
-      - kubernetes_control_contrail
-      - linux_system_codename_xenial
-      interfaces:
-        enp2s0f0:
-          role: single_mgm
-          deploy_address: 172.17.41.10
-        enp2s0f1:
-          role: single_vlan_ctl
-          single_address: 10.167.8.238
-
-    ctl03.bm-k8s-contrail.local:
-      reclass_storage_name: kubernetes_control_node03
-      roles:
-      - kubernetes_control_contrail
-      - linux_system_codename_xenial
-      interfaces:
-        enp2s0f0:
-          role: single_mgm
-          deploy_address: 172.17.41.11
-        enp2s0f1:
-          role: single_vlan_ctl
-          single_address: 10.167.8.237
-
-    cmp<<count>>:
-      reclass_storage_name: kubernetes_compute_rack01
-      roles:
-      - kubernetes_compute_contrail
-      - linux_system_codename_xenial
-      - salt_master_host
-      interfaces:
-        enp9s0f0:
-          role: single_dhcp
-        ens11f1:
-          role: k8s_oc40_only_vhost_on_control_vlan
-          #    cmp001.bm-k8s-contrail.local:
-          #      reclass_storage_name: kubernetes_compute_node001
-          #      roles:
-          #      - linux_system_codename_xenial
-          #      - kubernetes_compute_contrail
-          #      - salt_master_host
-          #      interfaces:
-          #        enp9s0f0:
-          #          role: single_dhcp
-          #        ens11f1:
-          #          role: k8s_oc40_only_vhost_on_control_vlan
-          #          single_address: 10.167.8.103
-          #
-          #    cmp002.bm-k8s-contrail.local:
-          #      reclass_storage_name: kubernetes_compute_node002
-          #      roles:
-          #      - linux_system_codename_xenial
-          #      - kubernetes_compute_contrail
-          #      - salt_master_host
-          #      interfaces:
-          #        enp9s0f0:
-          #          role: single_dhcp
-          #        ens11f1:
-          #          role: k8s_oc40_only_vhost_on_control_vlan
-          #          single_address: 10.167.8.104
diff --git a/tcp_tests/templates/cookied-bm-k8s-contrail/salt-context-cookiecutter-k8s-contrail.yaml b/tcp_tests/templates/cookied-bm-k8s-contrail/salt-context-cookiecutter-k8s-contrail.yaml
deleted file mode 100644
index c8fc345..0000000
--- a/tcp_tests/templates/cookied-bm-k8s-contrail/salt-context-cookiecutter-k8s-contrail.yaml
+++ /dev/null
@@ -1,220 +0,0 @@
-#https://docs.mirantis.com/mcp/master/mcp-ref-arch/opencontrail-plan/contrail-overview/contrail-4.html#
-default_context:
-  backup_private_key: |-
-    -----BEGIN RSA PRIVATE KEY-----
-    MIIEowIBAAKCAQEAskZyhTySYWvGrp+dlv+q2La8oiM8Sv1JfQog8+UW28hGUbCq
-    PnWa7bzVqENEY+g+qbQYh2Pvb2xURyY9+02TWLmCYzq7+orO1eG2SDt384YzDc6B
-    nQohUbGwaSH2YsT/QA8KV1fvUW9P0gVEHmJWR8Jo3vdRyw+tEUszYkiTYkcVc9zt
-    O5eYT9a6gSjLhl+tmJXu38jdWTcE8zJP+OGpJRDJpV2EAYwv+LXde9REv4KCGMqN
-    lvli9IA+zarfMsAzSTPpL5ooslB20akKM1h5li3LG47OpKMG9sMVFGEEo7+lqTGa
-    zUJEUPbJU95PqLfxoIOjYF/MhWKU5VALLraVwwIDAQABAoIBAHUAj0CqlFej6G3L
-    DB6CBZrds8el4HhG6+hIsX/gprPwKVaL3+/GN7w35vHb1BLN5fJy5HZXPFmge1+G
-    1b8NFttwRQbjEWRJnZ352Sxm+z60oOU61w4+C8gWGnWleJMyP2PHez3/1G1Z5MUt
-    95sJZx8JlNJg9ymSTD/BXyPuBezFKf8jUSgPbhBv8B2yy82YGzqc9u7sK6BN90P1
-    3ZcynQ4cfieZLoim56dF9YEixr8plGmGpOspPZFlVCGIc1y2BC4ZUyDatcCa7/gQ
-    3aDdt9lkEfoCHezAFOmaZDCOZ70spkwCqXYk42BXpDjKF6djCXyx3WKVF+IhEOYT
-    /S1I8KECgYEA1tzUZgW43/Z7Sm+QnoK3R9hG2oZZOoiTDdHyC+f5dk6maNbJbivM
-    FAPIpzHtCyvBEiSgGmlqpUrwR2NbYnOHjOX72Yq7/e0Vl1XWmplKCsTDNFGXx5Fs
-    9AQbWjOF+drgfZ5p3dNyE9689nJZg5EhTxL8dfwnZat/l+/OKFO2tM0CgYEA1GhW
-    4FMsXe3/g7M0dj5FnbS8xjz93pei5YZanID9mY/RUcKbegdtfvtwsfkZe5trbI39
-    jv96QyJeAnf48UDFwCV6SSZF/8Ez0pYHZanwkwKsli5uZdvSz7rUyVwE6tyO24WA
-    Trgpmbb8uxhJHBNuD+bC/iGd1H0IUuJ65ChD9M8CgYEAxfp2z4boQZ2ZHw5LoHLr
-    tIyJROPUJgYgEfToZaYbC7MOzL1Un2pFwg92fPCY7dkkLraGu690r9esLOtVEhNH
-    zEFB3cJi1Gf3pBlZA9zJB8Ej6Pphs2bBkNqT5XpiMcZHYhhsjhQ+Iibz0NWuu3cn
-    zPe+nmx4VMtAZ1x0hl4UlOUCgYBh8NaWS2O60AIwrRI0r5QUGwGsevUojU0Mfr4L
-    SiMlir4e8BgW1ZP0qGYXel/4sfp/rJ1NMZojmJY2lzFPtOk6J03SqvY97s1BffJd
-    O1X1w5bnDPPUvd7f8CsryeVuklLBADbglWSBP3IbpyAW9RKb/HDPE5seHqUW6t11
-    lOd42wKBgBW0tTV6+aqao6o4ZBU0SVoNg9/PwgOubAzeSDW2+ArXn1sMmroSfXVw
-    fbUTJI5TF/1pd7A5AKoR1FfTqlssoYlMGEPI6HJ4n9/4SqLpwd99HFW0ISt+EUMh
-    Tqt9mDfKzwHxG2QTuOwyrslO8iTwRoN5OYgm4dsL471Obr4DILTz
-    -----END RSA PRIVATE KEY-----
-  backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCyRnKFPJJha8aun52W/6rYtryiIzxK/Ul9CiDz5RbbyEZRsKo+dZrtvNWoQ0Rj6D6ptBiHY+9vbFRHJj37TZNYuYJjOrv6is7V4bZIO3fzhjMNzoGdCiFRsbBpIfZixP9ADwpXV+9Rb0/SBUQeYlZHwmje91HLD60RSzNiSJNiRxVz3O07l5hP1rqBKMuGX62Yle7fyN1ZNwTzMk/44aklEMmlXYQBjC/4td171ES/goIYyo2W+WL0gD7Nqt8ywDNJM+kvmiiyUHbRqQozWHmWLcsbjs6kowb2wxUUYQSjv6WpMZrNQkRQ9slT3k+ot/Ggg6NgX8yFYpTlUAsutpXD
-  bmk_enabled: 'False'
-  ceph_enabled: 'False'
-  auditd_enabled: 'False'
-  cicd_control_node01_address: 10.167.8.91
-  cicd_control_node01_hostname: cid01
-  cicd_control_node02_address: 10.167.8.92
-  cicd_control_node02_hostname: cid02
-  cicd_control_node03_address: 10.167.8.93
-  cicd_control_node03_hostname: cid03
-  cicd_control_vip_address: 10.167.8.90
-  cicd_control_vip_hostname: cid
-  cicd_enabled: 'True'
-  cicd_private_key: |-
-    -----BEGIN RSA PRIVATE KEY-----
-    MIIEpgIBAAKCAQEAxLQy4F7sNBloj0fFvklCq9+IX/BV5OBs6OtSBf6A+ztTs68i
-    ib5W6Tz/knh7wt2URB6uKJJBRBK+Oqj91ZNJxISewP2f5gX10WpjOAti+Fghkemt
-    kvyA8aUxX1wuAz7Y2v1uv1IkfWa5ubL8oJXNjeT9yeCNJWBxvd46XT9UiCs5CnDL
-    lBjRg+AP2+u5OabUFtH7GSzVqcMzhi0qLilP+cRhKmar2tQXFEI5wnwADh0REAF/
-    OxUZPaPEPD9TW7fGxjfrMtyUKqTEbi+EPsIvldkR0IhYrKXjwcFFu3FKISuy8PVM
-    EKUM5aZaLMI/WiMs1zmx+bAOrkCsUAf+sVmocQIDAQABAoIBAQCRnSAojrxmmQSZ
-    RLVy9wK+/zwSYXUEeMrG5iUTQOM0mCePVa/IrjnicYB0anKbv7IZd2jPqe1cuk9O
-    V3mJGH68Vry6+0XaX0EpJIuMmolKdNttC8Ktk/TUbciN4kxBpM2d14ybXvCaUGhe
-    usxfCGZhi0oAnxV9vNaWiqNEEjS+k4u9XTnj3+GxstEwch+l7xJwz83WEsx7G1Zz
-    3Yxg7mh2uRPVCOZGVdClciym+9WHHrcdYw/OJCsSFsT7+qgzptsvXBVxa6EuGaVY
-    Pba+UfOnYIKlBtka4i3zXGaCQF6t2FHw5WyUEmYm3iBYmrGBbEf+3665Kh4NQs0a
-    PV4eHlLdAoGBAO8nDgkTA4gi1gyFy2YBUFP2BignkKCZGHkD8qvBnOt1Rxm6USlZ
-    7GzAtU3nSd8ODzgOBI7+zd82yRqv2hEwP7xARhr0Nx1XvyaQtRlQ6tQnBgvqLDCG
-    n0qvWoBM+Yl6sTRGYavAMCaR7PuULUcZFNWk7m0fv4vqddGijgRsje37AoGBANKP
-    nN72BujsQIhdzAYS+u5/Hxu56Tvgupe6kWkhQuV8MQcM+79I6cgJxxH6zQDP/hGt
-    3vXapgWUgi025LuEUWfkxAtTUfT4cRP2x529CH/XLQMYVqWxkoben9r+eFav+Kgw
-    C0dR3vSOlEMzYoIF+p/km0mIV1ZKZvrWymtXSdODAoGBAL4feUwDfqpKr4pzD1l/
-    r+Gf1BM2KQdTzp3eYpzjJiIWMTkl4wIRyCBJL5nIRvT6E2VH153qubY7srLxnFZP
-    2kuJeXJSNkKwkHlTT3XZ22Zfw7HTL+BAFgDk2PjouPTvwlLBpUJKXr07A4CZs0kz
-    ilmybg340GmmMpY/OdIQjuDjAoGBAMcd5hP2hqxHlfMe+CwaUM+uySU4FvZ32xxW
-    4uGPAarwWZC4V20Zr3JqxKUdDjYhersPOFQ4c129hajqSz2EsFLWRAoNvegx9QUT
-    Dsv9EgeK3Vca8f14wf7mkjbPA8++UyABZvkH1BZiqpQuCI66xrnjvnG4DBde/qlg
-    60S84+SvAoGBAKH1feNtJaNhDxF0OqRuVmSFyL3pkMDoYr/mgpT4T1ToRBW5AtEt
-    Io4egi68ph8IChAt/TGFvikW7tbEgK9ACD/RAfl+LiuhxqJJFtC1LfGfHI7ntuRj
-    DjQrUy59ULoflh3iWBPtpw2ooRlSrAwaIgGt9odMECXp3BK8WLsUG9H1
-    -----END RSA PRIVATE KEY-----
-  cicd_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDEtDLgXuw0GWiPR8W+SUKr34hf8FXk4Gzo61IF/oD7O1OzryKJvlbpPP+SeHvC3ZREHq4okkFEEr46qP3Vk0nEhJ7A/Z/mBfXRamM4C2L4WCGR6a2S/IDxpTFfXC4DPtja/W6/UiR9Zrm5svyglc2N5P3J4I0lYHG93jpdP1SIKzkKcMuUGNGD4A/b67k5ptQW0fsZLNWpwzOGLSouKU/5xGEqZqva1BcUQjnCfAAOHREQAX87FRk9o8Q8P1Nbt8bGN+sy3JQqpMRuL4Q+wi+V2RHQiFispePBwUW7cUohK7Lw9UwQpQzlploswj9aIyzXObH5sA6uQKxQB/6xWahx
-  cluster_domain: bm-mcp-pike-k8s-contrail.local
-  cluster_name: bm-mcp-pike-k8s-contrail
-  # compute_bond_mode: active-backup
-  compute_padding_with_zeros: 'True'
-  compute_primary_first_nic: eth1
-  compute_primary_second_nic: eth2
-  context_seed: GAZbu3hguubkeYMg15AQ1J6UuY60TAh8h0EVpNUrHWhjRS2SxRYBuxxLvVURv82m
-  control_network_netmask: 255.255.255.0
-  control_network_subnet: 10.167.8.0/24
-  control_vlan: '2410'
-  cookiecutter_template_branch: ''
-  cookiecutter_template_credentials: gerrit
-  cookiecutter_template_url: https://gerrit.mcp.mirantis.com/mk/cookiecutter-templates.git
-  deploy_network_gateway: 172.17.41.2
-  deploy_network_netmask: 255.255.255.192
-  deploy_network_subnet: 172.17.41.0/26
-  deployment_type: physical
-  dns_server01: 172.17.41.2
-  dns_server02: 172.17.41.2
-  email_address: dtyzhnenko@mirantis.com
-  etcd_ssl: 'True'
-  infra_bond_mode: active-backup
-  infra_deploy_nic: eth0
-  infra_kvm01_control_address: 10.167.8.241
-  infra_kvm01_deploy_address: 172.17.41.4
-  infra_kvm01_hostname: kvm01
-  infra_kvm02_control_address: 10.167.8.242
-  infra_kvm02_deploy_address: 172.17.41.5
-  infra_kvm02_hostname: kvm02
-  infra_kvm03_control_address: 10.167.8.243
-  infra_kvm03_deploy_address: 172.17.41.6
-  infra_kvm03_hostname: kvm03
-  infra_kvm_vip_address: 10.167.8.240
-  infra_primary_first_nic: eth1
-  infra_primary_second_nic: eth2
-  kubernetes_enabled: 'True'
-  kubernetes_compute_count: 2
-  kubernetes_compute_rack01_hostname: cmp
-  kubernetes_compute_single_address_ranges: 10.167.8.103-10.167.8.104
-  kubernetes_compute_tenant_address_ranges: 10.167.8.103-10.167.8.104
-  kubernetes_network_opencontrail_enabled: 'True'
-  kubernetes_keepalived_vip_interface: br_ctl
-  kubernetes_metallb_enabled: 'False'  # Not used with opencontrail
-  metallb_addresses: 172.17.41.160-172.17.41.180
-  kubernetes_ingressnginx_enabled: 'True'
-  kubernetes_ingressnginx_controller_replicas: 2
-  local_repositories: 'False'
-  maas_deploy_address: 172.16.49.66
-  maas_deploy_range_end: 10.0.0.254
-  maas_deploy_range_start: 10.0.0.1
-  maas_deploy_vlan: '0'
-  maas_fabric_name: fabric-0
-  maas_hostname: cfg01
-  mcp_common_scripts_branch: ''
-  mcp_version: proposed
-  offline_deployment: 'False'
-  kubernetes_control_address: 10.167.8.236
-  kubernetes_control_node01_deploy_address: 172.17.41.9
-  kubernetes_control_node01_address: 10.167.8.239
-  kubernetes_control_node01_hostname: ctl01
-  kubernetes_control_node02_deploy_address: 172.17.41.10
-  kubernetes_control_node02_address: 10.167.8.238
-  kubernetes_control_node02_hostname: ctl02
-  kubernetes_control_node02_deploy_address: 172.17.41.11
-  kubernetes_control_node03_address: 10.167.8.237
-  kubernetes_control_node03_hostname: ctl03
-  linux_repo_contrail_component: oc40
-  opencontrail_analytics_hostname: ctl
-  opencontrail_analytics_node01_hostname: ctl01
-  opencontrail_analytics_node02_hostname: ctl02
-  opencontrail_analytics_node03_hostname: ctl03
-  opencontrail_analytics_address: ${_param:opencontrail_control_address}
-  opencontrail_analytics_node01_address: ${_param:opencontrail_control_node01_address}
-  opencontrail_analytics_node02_address: ${_param:opencontrail_control_node02_address}
-  opencontrail_analytics_node03_address: ${_param:opencontrail_control_node03_address}
-  opencontrail_compute_iface_mask: '24'
-  opencontrail_compute_iface: ens11f1
-  opencontrail_control_address: 10.167.8.236
-  opencontrail_control_hostname: ctl
-  opencontrail_control_node01_address: 10.167.8.239
-  opencontrail_control_node01_hostname: ctl01
-  opencontrail_control_node02_address: 10.167.8.238
-  opencontrail_control_node02_hostname: ctl02
-  opencontrail_control_node03_address: 10.167.8.237
-  opencontrail_control_node03_hostname: ctl03
-  opencontrail_enabled: 'True'
-  opencontrail_router01_address: 10.167.8.100
-  opencontrail_router01_hostname: rtr01
-  opencontrail_router02_address: 10.167.8.101
-  opencontrail_router02_hostname: rtr02
-  opencontrail_public_ip_range: 172.17.41.128/26
-  opencontrail_version: '4.0'
-  openstack_enabled: 'False'
-  openssh_groups: ''
-  openstack_version: pike
-  oss_enabled: 'False'
-  oss_node03_address: ${_param:stacklight_monitor_node03_address}
-  oss_notification_smtp_use_tls: 'False'
-  oss_pushkin_email_sender_password: password
-  oss_pushkin_smtp_host: 127.0.0.1
-  oss_pushkin_smtp_port: '587'
-  oss_webhook_app_id: '24'
-  oss_webhook_login_id: '13'
-  platform: kubernetes_enabled
-  public_host: ${_param:infra_config_address}
-  publication_method: email
-  reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
-  salt_api_password: frJMLJsfGkFXCk4qTTEttKla518Akvdp
-  salt_api_password_hash: $6$ixefPtcd$vasKuySO6L2JM0FKaB/udsQvH4upI2dWoJZaR9XTqeAx4UMvkHsNNSwsALVqTTLbXIngkFqYNXpbNm1o4iMGS.
-  salt_master_address: 172.17.41.3
-  salt_master_hostname: cfg01
-  salt_master_management_address: 172.17.41.3
-  shared_reclass_branch: ''
-  shared_reclass_url: https://gerrit.mcp.mirantis.com/salt-models/reclass-system.git
-  stacklight_enabled: 'True'
-  stacklight_log_address: 10.167.8.60
-  stacklight_log_hostname: log
-  stacklight_log_node01_address: 10.167.8.61
-  stacklight_log_node01_hostname: log01
-  stacklight_log_node02_address: 10.167.8.62
-  stacklight_log_node02_hostname: log02
-  stacklight_log_node03_address: 10.167.8.63
-  stacklight_log_node03_hostname: log03
-  stacklight_monitor_address: 10.167.8.70
-  stacklight_monitor_hostname: mon
-  stacklight_monitor_node01_address: 10.167.8.71
-  stacklight_monitor_node01_hostname: mon01
-  stacklight_monitor_node02_address: 10.167.8.72
-  stacklight_monitor_node02_hostname: mon02
-  stacklight_monitor_node03_address: 10.167.8.73
-  stacklight_monitor_node03_hostname: mon03
-  stacklight_telemetry_address: 10.167.8.85
-  stacklight_telemetry_hostname: mtr
-  stacklight_telemetry_node01_address: 10.167.8.86
-  stacklight_telemetry_node01_hostname: mtr01
-  stacklight_telemetry_node02_address: 10.167.8.87
-  stacklight_telemetry_node02_hostname: mtr02
-  stacklight_telemetry_node03_address: 10.167.8.88
-  stacklight_telemetry_node03_hostname: mtr03
-  stacklight_version: '2'
-  static_ips_on_deploy_network_enabled: 'False'
-  tenant_network_gateway: 10.167.8.1
-  tenant_network_netmask: 255.255.255.0
-  tenant_network_subnet: 10.167.8.0/24
-  tenant_vlan: '2410'
-  upstream_proxy_enabled: 'False'
-  use_default_network_scheme: 'True'
-  vnf_onboarding_enabled: 'False'
diff --git a/tcp_tests/templates/cookied-bm-k8s-contrail/salt-context-environment.yaml b/tcp_tests/templates/cookied-bm-k8s-contrail/salt-context-environment.yaml
deleted file mode 100644
index 206dead..0000000
--- a/tcp_tests/templates/cookied-bm-k8s-contrail/salt-context-environment.yaml
+++ /dev/null
@@ -1,108 +0,0 @@
-nodes:
-    mon01.bm-k8s-contrail.local:
-      reclass_storage_name: stacklight_server_node01
-      roles:
-      - stacklightv2_server_leader
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_ctl
-
-    mon02.bm-k8s-contrail.local:
-      reclass_storage_name: stacklight_server_node02
-      roles:
-      - stacklightv2_server
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_ctl
-
-    mon03.bm-k8s-contrail.local:
-      reclass_storage_name: stacklight_server_node03
-      roles:
-      - stacklightv2_server
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_ctl
-
-    mtr01.bm-k8s-contrail.local:
-      reclass_storage_name: stacklight_telemetry_node01
-      roles:
-      - stacklight_telemetry
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_ctl
-
-    mtr02.bm-k8s-contrail.local:
-      reclass_storage_name: stacklight_telemetry_node02
-      roles:
-      - stacklight_telemetry
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_ctl
-
-    mtr03.bm-k8s-contrail.local:
-      reclass_storage_name: stacklight_telemetry_node03
-      roles:
-      - stacklight_telemetry
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_ctl
-
-    log01.bm-k8s-contrail.local:
-      reclass_storage_name: stacklight_log_node01
-      roles:
-      - stacklight_log_leader_v2
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_ctl
-
-    log02.bm-k8s-contrail.local:
-      reclass_storage_name: stacklight_log_node02
-      roles:
-      - stacklight_log
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_ctl
-
-    log03.bm-k8s-contrail.local:
-      reclass_storage_name: stacklight_log_node03
-      roles:
-      - stacklight_log
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_ctl
-
-    cid01.bm-k8s-contrail.local:
-      reclass_storage_name: cicd_control_node01
-      roles:
-      - cicd_control_leader
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_ctl
-
-    cid02.bm-k8s-contrail.local:
-      reclass_storage_name: cicd_control_node02
-      roles:
-      - cicd_control_manager
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_ctl
-
-    cid03.bm-k8s-contrail.local:
-      reclass_storage_name: cicd_control_node03
-      roles:
-      - cicd_control_manager
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_ctl
diff --git a/tcp_tests/templates/cookied-bm-k8s-contrail/salt.yaml b/tcp_tests/templates/cookied-bm-k8s-contrail/salt.yaml
deleted file mode 100644
index 274fb44..0000000
--- a/tcp_tests/templates/cookied-bm-k8s-contrail/salt.yaml
+++ /dev/null
@@ -1,199 +0,0 @@
-{% from 'cookied-bm-k8s-contrail/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-bm-k8s-contrail/underlay.yaml' import LAB_CONFIG_NAME with context %}
-{% from 'cookied-bm-k8s-contrail/underlay.yaml' import DOMAIN_NAME with context %}
-{% from 'cookied-bm-k8s-contrail/underlay.yaml' import HOSTNAME_KVM01 with context %}
-{% from 'cookied-bm-k8s-contrail/underlay.yaml' import HOSTNAME_KVM02 with context %}
-{% from 'cookied-bm-k8s-contrail/underlay.yaml' import HOSTNAME_KVM03 with context %}
-
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
-# Other salt model repository parameters see in shared-salt.yaml
-
-# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
-{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME','bm-mcp-pike-k8s-contrail') %}
-# Path to the context files used to render Cluster and Environment models
-{%- set CLUSTER_CONTEXT_NAME = 'salt-context-cookiecutter-k8s-contrail.yaml' %}
-{%- set ENVIRONMENT_CONTEXT_NAMES = ['salt-context-environment.yaml','lab04-upgrade-physical-inventory.yaml'] %}
-{%- set CONTROL_VLAN = os_env('CONTROL_VLAN', '2410') %}
-{%- set TENANT_VLAN = os_env('TENANT_VLAN', '2411') %}
-
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-{{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
-
-{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL(CONTROL_VLAN=CONTROL_VLAN, TENANT_VLAN=TENANT_VLAN, CLUSTER_PRODUCT_MODELS='cicd infra kubernetes opencontrail stacklight2') }}
-
-{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
-
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='\*') }}
-{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
-
-{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
-
-- description: "Change path to internal storage for salt.control images"
-  cmd: |
-    set -e;
-    . /root/venv-reclass-tools/bin/activate;
-    reclass-tools add-key parameters._param.salt_control_xenial_image 'http://images.mcp.mirantis.net/ubuntu-16-04-x64-mcp{{ SHARED.REPOSITORY_SUITE }}.qcow2' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/init.yml;
-    reclass-tools add-key parameters._param.salt_control_trusty_image 'http://images.mcp.mirantis.net/ubuntu-14-04-x64-mcp{{ SHARED.REPOSITORY_SUITE }}.qcow2' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/init.yml;
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-- description: Delete proxy inclusion from kvm
-  cmd: |
-    sed -i 's/- system.salt.control.cluster.kubernetes_proxy_cluster//g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-- description: Temporary WR for correct bridge name according to envoronment templates
-  cmd: |
-    sed -i 's/br\-ctl/br\_ctl/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
-    sed -i 's/br\-mgm/br\_mgm/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-- description: "Excluding tenant network from cluster"
-  cmd: |
-    set -e;
-    . /root/venv-reclass-tools/bin/activate;
-    reclass-tools add-key parameters._param.opencontrail_compute_address '${_param:single_address}' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/opencontrail/compute.yml;
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-- description: "Use correct compute interface"
-  cmd: |
-    set -e;
-    . /root/venv-reclass-tools/bin/activate;
-    reclass-tools add-key parameters._param.opencontrail_compute_iface 'ens11f1.${_param:control_vlan}' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/opencontrail/init.yml;
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-- description: Rerun openssh after env model is generated
-  cmd: |
-    salt-call state.sls openssh
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-{{ SHARED.MACRO_GENERATE_INVENTORY() }}
-
-{{ SHARED.MACRO_NETWORKING_WORKAROUNDS() }}
-
-- description: "Disable kubelet_fail_on_swap"
-  cmd: |
-    set -e;
-    . /root/venv-reclass-tools/bin/activate;
-    reclass-tools add-key parameters._param.kubelet_fail_on_swap false /srv/salt/reclass/classes/system/kubernetes/common.yml;
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-- description: Update minion information
-  cmd: |
-    salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.sync_all &&
-    salt --hard-crash --state-output=mixed --state-verbose=False '*' mine.update &&
-    salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.refresh_pillar && sleep 10
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-- description: Rerun openssh after env model is generated
-  cmd: |
-    salt-call state.sls openssh
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-- description: Execute linux.network.host one more time after salt.minion to apply dynamically registered hosts on the cluster nodes
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@linux:system' state.sls linux.network.host
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 10}
-  skip_fail: false
-
-{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
-
-########################################
-# Spin up Control Plane VMs on KVM nodes
-########################################
-
-- description: Execute 'libvirt' states to create necessary libvirt networks
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'kvm*' state.sls libvirt
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 10}
-  skip_fail: false
-
-- description: Create VMs for control plane
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'kvm*' state.sls salt.control
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 3, delay: 10}
-  skip_fail: false
-
-- description: '*Workaround* for waiting the control-plane VMs in the salt-key (instead of sleep)'
-  cmd: |
-    salt-key -l acc| sort > /tmp/current_keys.txt &&
-    salt 'kvm*' cmd.run 'virsh list --name' | grep -v 'kvm'|sort|xargs -I {} fgrep {} /tmp/current_keys.txt
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 20, delay: 30}
-  skip_fail: false
-
-{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
-
-#########################################
-# Configure all running salt minion nodes
-#########################################
-
-- description: Hack resolv.conf on VCP nodes for internal services access
-  cmd: |
-    salt --hard-crash --state-output=mixed --state-verbose=False -C '* and not cfg*' cmd.run "echo 'nameserver 172.17.41.2' > /etc/resolv.conf;"
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Refresh pillars on all minions
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.refresh_pillar
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Sync all salt resources
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.sync_all && sleep 5
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Show  reclass-salt --top for generated nodes
-  cmd: reclass-salt --top -u /srv/salt/reclass/nodes/_generated/
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
-
-- description: "Lab04 workaround: Give each node root acces with key from cfg01"
-  cmd: |
-    set -e;
-    set -x;
-    key=$(ssh-keygen -y -f /root/.ssh/id_rsa);
-    salt '*' cmd.run "echo $key >> /root/.ssh/authorized_keys";
-    salt '*' cmd.run "service sshd restart"
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: true
-
-- description: "Lab04 workaround: Control network access from cfg01 node using sshuttle via kvm01"
-  cmd: |
-    set -e;
-    set -x;
-    KVM01_DEPLOY_ADDRESS=$(salt-call --out=newline_values_only pillar.get _param:infra_kvm_node01_deploy_address);
-    apt-get install -y sshuttle;
-    sshuttle -r ${KVM01_DEPLOY_ADDRESS} 10.167.8.0/24 -D >/dev/null;
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: true
diff --git a/tcp_tests/templates/cookied-bm-k8s-contrail/sl.yaml b/tcp_tests/templates/cookied-bm-k8s-contrail/sl.yaml
deleted file mode 100644
index cb929e4..0000000
--- a/tcp_tests/templates/cookied-bm-k8s-contrail/sl.yaml
+++ /dev/null
@@ -1,267 +0,0 @@
-{% from 'cookied-bm-k8s-contrail/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% import 'shared-sl-tests.yaml' as SHARED_SL_TESTS with context %}
-
-# Install docker swarm
-- description: Configure docker service
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls docker.host
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-- description: Install docker swarm on master node
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-- description: Send grains to the swarm slave nodes
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls salt.minion.grains
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-- description:  Update mine
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' mine.update
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-- description:  Refresh modules
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' saltutil.refresh_modules; sleep 5;
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-- description:  Rerun swarm on slaves to proper token population
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-- description:  Configure slave nodes
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:manager' state.sls docker.swarm -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-- description:  List registered Docker swarm nodes
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' cmd.run 'docker node ls'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-- description: Install keepalived on mon nodes
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'mon*' state.sls keepalived
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-- description: Check the VIP on mon nodes
-  cmd: |
-    SL_VIP=`salt-call --out=newline_values_only pillar.get _param:stacklight_monitor_address`;
-    echo "_param:stacklight_monitor_address (vip): ${SL_VIP}";
-    salt --hard-crash --state-output=mixed --state-verbose=False -C 'mon*' cmd.run "ip a | grep ${SL_VIP}" | grep -B1 ${SL_VIP}
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install keepalived on mon nodes
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@glusterfs:client' state.sls glusterfs.client
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 15}
-  skip_fail: false
-
-# Install slv2 infra
-#Launch containers
-- description: Install Mongo if target matches
-  cmd: |
-    if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' match.pillar 'mongodb:server' ; then
-      salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' state.sls mongodb.server
-    fi
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-- description: Install Mongo if target matches
-  cmd: |
-    if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' match.pillar 'mongodb:server' ; then
-      salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' state.sls mongodb.cluster
-    fi
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 5, delay: 20}
-  skip_fail: false
-
-- description: Install telegraf
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@telegraf:agent or I@telegraf:remote_agent' state.sls telegraf
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 10}
-  skip_fail: false
-
-- description: Configure Prometheus exporters, if pillar 'prometheus:exporters' exists on any server
-  cmd: |
-    if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' match.pillar 'prometheus:exporters' ; then
-      salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' state.sls prometheus
-    fi
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-- description: Install elasticsearch server
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:server:enabled:true and *01*' state.sls elasticsearch.server
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-- description: Install elasticsearch server
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:server:enabled:true' state.sls elasticsearch.server
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-- description: Install kibana server
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:server:enabled:true and *01*' state.sls kibana.server
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-- description: Install kibana server
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:server:enabled:true' state.sls kibana.server
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-- description: Install elasticsearch client
-  cmd: sleep 30; salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:client' state.sls elasticsearch.client
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 30}
-  skip_fail: false
-
-- description: Install kibana client
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:client' state.sls kibana.client
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-- description: Check influix db
-  cmd: |
-    INFLUXDB_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' test.ping 1>/dev/null 2>&1 && echo true`;
-    echo "Influxdb service presence: ${INFLUXDB_SERVICE}";
-    if [[ "$INFLUXDB_SERVICE" == "true" ]]; then
-        salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server and *01*' state.sls influxdb;
-        salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' state.sls influxdb;
-    fi
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-# Install service for the log collection
-- description: Configure fluentd
-  cmd: |
-    FLUENTD_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@fluentd:agent' test.ping 1>/dev/null 2>&1 && echo true`;
-    echo "Fluentd service presence: ${FLUENTD_SERVICE}";
-    if [[ "$FLUENTD_SERVICE" == "true" ]]; then
-        salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@fluentd:agent' state.sls fluentd
-    else
-        salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:log_collector' state.sls heka.log_collector
-    fi
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-#Install heka ceilometer collector
-- description: Install heka ceilometer if they exists
-  cmd: |
-    CEILO=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:ceilometer_collector:enabled' test.ping 1>/dev/null 2>&1 && echo true`;
-    echo "Ceilometer service presence: ${CEILO}";
-    if [[ "$CEILO" == "true" ]]; then
-        salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:ceilometer_collector:enabled' state.sls heka.ceilometer_collector;
-        salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:ceilometer_collector:enabled' service.restart ceilometer_collector
-    fi
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-  ######################################
-  ######################################
-  ######################################
-
-- description: Install Galera on first server
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:master' state.sls galera
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 5}
-  skip_fail: false
-
-- description: Install Galera on other servers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:slave' state.sls galera -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 5}
-  skip_fail: false
-
-- description: Check Galera on first server
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:master' mysql.status
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: true
-
-- description: Check Galera on other servers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:slave' mysql.status
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: true
-
-- description: Collect Grains
-  cmd: |
-    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' state.sls salt.minion.grains;
-    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' saltutil.refresh_modules;
-    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' mine.update
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-- description: Check docker ps
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:server' state.sls prometheus
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 10}
-  skip_fail: false
-
-- description: launch prometheus containers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master and I@prometheus:server' state.sls docker.client
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 10}
-  skip_fail: false
-
-- description: Check docker ps
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:server' cmd.run "docker ps"
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-- description: Configure Grafana dashboards and datasources
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@grafana:client' state.sls grafana.client
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 5, delay: 60}
-  skip_fail: false
-
-- description: Configure Alerta if it is exists
-  cmd: |
-    if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:alerta' match.pillar 'prometheus:alerta' ; then
-      salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:alerta' state.sls prometheus.alerta
-    fi
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-- description: Run salt minion to create cert files
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False "*" state.sls salt.minion.cert
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: true
-
-{{  SHARED_SL_TESTS.MACRO_CLONE_SL_TESTS() }}
-{{  SHARED_SL_TESTS.MACRO_CONFIGURE_TESTS() }}
diff --git a/tcp_tests/templates/cookied-bm-k8s-contrail/underlay--meta-data.yaml b/tcp_tests/templates/cookied-bm-k8s-contrail/underlay--meta-data.yaml
deleted file mode 100644
index 3699401..0000000
--- a/tcp_tests/templates/cookied-bm-k8s-contrail/underlay--meta-data.yaml
+++ /dev/null
@@ -1,4 +0,0 @@
-| # All the data below will be stored as a string object
-  instance-id: iid-local1
-  hostname: {hostname}
-  local-hostname: {hostname}
diff --git a/tcp_tests/templates/cookied-bm-k8s-contrail/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-bm-k8s-contrail/underlay--user-data-cfg01.yaml
deleted file mode 100644
index 16bd9f6..0000000
--- a/tcp_tests/templates/cookied-bm-k8s-contrail/underlay--user-data-cfg01.yaml
+++ /dev/null
@@ -1,78 +0,0 @@
-| # All the data below will be stored as a string object
-  #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
-  ssh_pwauth: True
-  users:
-   - name: root
-     sudo: ALL=(ALL) NOPASSWD:ALL
-     shell: /bin/bash
-     ssh_authorized_keys:
-     {% for key in config.underlay.ssh_keys %}
-      - ssh-rsa {{ key['public'] }}
-     {% endfor %}
-
-  disable_root: false
-  chpasswd:
-   list: |
-    root:r00tme
-   expire: False
-
-  bootcmd:
-   # Enable root access
-   - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
-   - sed -i -e '/^PasswordAuthentication/s/^.*$/PasswordAuthentication yes/' /etc/ssh/sshd_config
-   - service sshd restart
-  output:
-    all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
-  runcmd:
-   # Configure dhclient
-   - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
-   - sudo resolvconf -u
-
-   # Enable grub menu using updated config below
-   - update-grub
-
-   # Prepare network connection
-   - sudo ifdown ens3
-   - sudo ip r d default || true  # remove existing default route to get it from dhcp
-   - sudo ifup ens3
-   #- sudo route add default gw {gateway} {interface_name}
-
-   # Create swap
-   - fallocate -l 4G /swapfile
-   - chmod 600 /swapfile
-   - mkswap /swapfile
-   - swapon /swapfile
-   - echo "/swapfile   none    swap    defaults    0   0" >> /etc/fstab
-
-   - echo "nameserver 172.17.41.2" > /etc/resolv.conf;
-
-   - mkdir -p /srv/salt/reclass/nodes
-   - systemctl enable salt-master
-   - systemctl enable salt-minion
-   - systemctl start salt-master
-   - systemctl start salt-minion
-   - salt-call -l info --timeout=120 test.ping
-
-  write_files:
-   - path: /etc/default/grub.d/97-enable-grub-menu.cfg
-     content: |
-         GRUB_RECORDFAIL_TIMEOUT=30
-         GRUB_TIMEOUT=3
-         GRUB_TIMEOUT_STYLE=menu
-
-   - path: /etc/network/interfaces
-     content: |
-          auto ens3
-          iface ens3 inet dhcp
-
-   - path: /root/.ssh/config
-     owner: root:root
-     permissions: '0600'
-     content: |
-          Host *
-            ServerAliveInterval 300
-            ServerAliveCountMax 10
-            StrictHostKeyChecking no
-            UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/cookied-bm-k8s-contrail/underlay--user-data1604-hwe-compute.yaml b/tcp_tests/templates/cookied-bm-k8s-contrail/underlay--user-data1604-hwe-compute.yaml
deleted file mode 100644
index dd34ede..0000000
--- a/tcp_tests/templates/cookied-bm-k8s-contrail/underlay--user-data1604-hwe-compute.yaml
+++ /dev/null
@@ -1,130 +0,0 @@
-| # All the data below will be stored as a string object
-  #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
-  ssh_pwauth: True
-  users:
-   - name: root
-     sudo: ALL=(ALL) NOPASSWD:ALL
-     shell: /bin/bash
-     ssh_authorized_keys:
-     {% for key in config.underlay.ssh_keys %}
-      - ssh-rsa {{ key['public'] }}
-     {% endfor %}
-
-  disable_root: false
-  chpasswd:
-   list: |
-    root:r00tme
-   expire: False
-
-  bootcmd:
-   #   # Block access to SSH while node is preparing
-   #   - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
-   # Enable root access
-   - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
-   # - sed -i -e '/^PasswordAuthentication/s/^.*$/PasswordAuthentication yes/' /etc/ssh/sshd_config
-   - service sshd restart
-  output:
-    all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
-  runcmd:
-   - if lvs vg0; then pvresize /dev/vda3; fi
-   - if lvs vg0; then /usr/bin/growlvm.py --image-layout-file /usr/share/growlvm/image-layout.yml; fi
-
-   - export TERM=linux
-   - export LANG=C
-   # Configure dhclient
-   - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
-   - sudo resolvconf -u
-
-   # Enable grub menu using updated config below
-   - update-grub
-
-   # Prepare network connection
-   - sudo ifup {interface_name}
-   #- sudo route add default gw {gateway} {interface_name}
-
-   # Create swap
-   #   - fallocate -l 4G /swapfile
-   #   - chmod 600 /swapfile
-   #   - mkswap /swapfile
-   #   - swapon /swapfile
-   #   - echo "/swapfile   none    swap    defaults   0   0" >> /etc/fstab
-
-
-   ############## TCP Cloud cfg01 node ##################
-   #- sleep 120
-   #   - echo "Preparing base OS"
-   - echo "nameserver 172.17.41.2" > /etc/resolv.conf;
-   #   - which wget >/dev/null || (apt-get update; apt-get install -y wget)
-
-   # Configure Ubuntu mirrors
-   #   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
-   #   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
-   #   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
-
-   #   - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
-   #   - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
-   #   - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;
-   #   - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;
-
-   #   - apt-get clean
-   #   - eatmydata apt-get update && apt-get -y upgrade
-
-   # Install common packages
-   #   - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc
-
-   # Install salt-minion and stop it until it is configured
-   #   - eatmydata apt-get install -y salt-minion && service salt-minion stop
-
-   # Install latest kernel
-   #   - eatmydata apt-get install -y {{ os_env('LINUX_KERNEL_HWE_PACKAGE_NAME', 'linux-image-extra-4.10.0-42-generic') }}
-
-   # Register compute node in salt master
-   # - salt-call event.send "reclass/minion/classify" "{{ "{{" }}\"node_master_ip\": \"{{ ETH0_IP_ADDRESS_CFG01 }}\", \"node_os\": \"xenial\", \"node_domain\": \"{{ DOMAIN_NAME }}\", \"node_cluster\": \"{{ LAB_CONFIG_NAME }}\"{{ "}}" }}"
-
-
-   ########################################################
-   # Node is ready, allow SSH access
-   #- echo "Allow SSH access ..."
-   #- sudo iptables -D INPUT -p tcp --dport 22 -j DROP
-   #   - reboot
-   ########################################################
-
-  write_files:
-   - path: /etc/default/grub.d/97-enable-grub-menu.cfg
-     content: |
-         GRUB_RECORDFAIL_TIMEOUT=30
-         GRUB_TIMEOUT=3
-         GRUB_TIMEOUT_STYLE=menu
-
-   - path: /etc/network/interfaces
-     content: |
-          # The loopback network interface
-          auto lo
-          iface lo inet loopback
-          auto {interface_name}
-          iface {interface_name} inet dhcp
-
-   - path: /usr/share/growlvm/image-layout.yml
-     content: |
-       root:
-         size: '30%VG'
-       home:
-         size: '1G'
-       var_log:
-         size: '11%VG'
-       var_log_audit:
-         size: '5G'
-       var_tmp:
-         size: '11%VG'
-       tmp:
-         size: '5G'
-     owner: root:root
-
-  growpart:
-    mode: auto
-    devices:
-      - '/'
-      - '/dev/vda3'
-    ignore_growroot_disabled: false
diff --git a/tcp_tests/templates/cookied-bm-k8s-contrail/underlay--user-data1604.yaml b/tcp_tests/templates/cookied-bm-k8s-contrail/underlay--user-data1604.yaml
deleted file mode 100644
index 51fbc96..0000000
--- a/tcp_tests/templates/cookied-bm-k8s-contrail/underlay--user-data1604.yaml
+++ /dev/null
@@ -1,121 +0,0 @@
-| # All the data below will be stored as a string object
-  #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
-  ssh_pwauth: True
-  users:
-   - name: root
-     sudo: ALL=(ALL) NOPASSWD:ALL
-     shell: /bin/bash
-     ssh_authorized_keys:
-     {% for key in config.underlay.ssh_keys %}
-      - ssh-rsa {{ key['public'] }}
-     {% endfor %}
-
-  disable_root: false
-  chpasswd:
-   list: |
-    root:r00tme
-   expire: False
-
-  bootcmd:
-   # Block access to SSH while node is preparing
-   #   - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
-   # Enable root access
-   - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
-   - service sshd restart
-  output:
-    all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
-  runcmd:
-   - if lvs vg0; then pvresize /dev/vda3; fi
-   - if lvs vg0; then /usr/bin/growlvm.py --image-layout-file /usr/share/growlvm/image-layout.yml; fi
-
-   - export TERM=linux
-   - export LANG=C
-   # Configure dhclient
-   - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
-   - sudo resolvconf -u
-
-   # Enable grub menu using updated config below
-   - update-grub
-
-   # Prepare network connection
-   - sudo ifup {interface_name}
-   #- sudo route add default gw {gateway} {interface_name}
-
-   # Create swap
-   #   - fallocate -l 4G /swapfile
-   #   - chmod 600 /swapfile
-   #   - mkswap /swapfile
-   #   - swapon /swapfile
-   #   - echo "/swapfile   none    swap    defaults   0   0" >> /etc/fstab
-
-
-   ############## TCP Cloud cfg01 node ##################
-   #- sleep 120
-   #   - echo "Preparing base OS"
-   - echo "nameserver 172.17.41.2" > /etc/resolv.conf;
-   #   - which wget >/dev/null || (apt-get update; apt-get install -y wget)
-
-   # Configure Ubuntu mirrors
-   #   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
-   #   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
-   #   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
-
-   #   - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
-   #   - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
-   #   - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;
-   #   - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;
-
-   #   - apt-get clean
-   #   - eatmydata apt-get update && apt-get -y upgrade
-
-   # Install common packages
-   #   - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc
-
-   # Install salt-minion and stop it until it is configured
-   #   - eatmydata apt-get install -y salt-minion && service salt-minion stop
-
-   ########################################################
-   # Node is ready, allow SSH access
-   #   - echo "Allow SSH access ..."
-   #   - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
-   ########################################################
-
-  write_files:
-   - path: /etc/default/grub.d/97-enable-grub-menu.cfg
-     content: |
-         GRUB_RECORDFAIL_TIMEOUT=30
-         GRUB_TIMEOUT=3
-         GRUB_TIMEOUT_STYLE=menu
-
-   - path: /etc/network/interfaces
-     content: |
-          # The loopback network interface
-          auto lo
-          iface lo inet loopback
-          auto {interface_name}
-          iface {interface_name} inet dhcp
-
-   - path: /usr/share/growlvm/image-layout.yml
-     content: |
-       root:
-         size: '30%VG'
-       home:
-         size: '1G'
-       var_log:
-         size: '11%VG'
-       var_log_audit:
-         size: '5G'
-       var_tmp:
-         size: '11%VG'
-       tmp:
-         size: '5G'
-     owner: root:root
-
-  growpart:
-    mode: auto
-    devices:
-      - '/'
-      - '/dev/vda3'
-    ignore_growroot_disabled: false
diff --git a/tcp_tests/templates/cookied-bm-k8s-contrail/underlay.yaml b/tcp_tests/templates/cookied-bm-k8s-contrail/underlay.yaml
deleted file mode 100644
index 089f343..0000000
--- a/tcp_tests/templates/cookied-bm-k8s-contrail/underlay.yaml
+++ /dev/null
@@ -1,544 +0,0 @@
-# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
-{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'proposed') %}
-
-#{% set DOMAIN_NAME = os_env('LAB_CONFIG_NAME', 'physical_mcp11_ovs_dpdk') + '.local' %}
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-bm-k8s-contrail') %}
-{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME + '.local') %}
-{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_KVM01 = os_env('HOSTNAME_KVM01', 'kvm01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_KVM02 = os_env('HOSTNAME_KVM02', 'kvm02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_KVM03 = os_env('HOSTNAME_KVM03', 'kvm03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP001 = os_env('HOSTNAME_CMP001', 'cmp001.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP002 = os_env('HOSTNAME_CMP002', 'cmp002.' + DOMAIN_NAME) %}
-{% set ETH0_IP_ADDRESS_CFG01 = os_env('ETH0_IP_ADDRESS_CFG01', '172.17.41.3') %}
-{% set ETH0_IP_ADDRESS_KVM01 = os_env('ETH0_IP_ADDRESS_KVM01', '172.17.41.4') %}
-{% set ETH0_IP_ADDRESS_KVM02 = os_env('ETH0_IP_ADDRESS_KVM02', '172.17.41.5') %}
-{% set ETH0_IP_ADDRESS_KVM03 = os_env('ETH0_IP_ADDRESS_KVM03', '172.17.41.6') %}
-{% set ETH0_IP_ADDRESS_CMP001 = os_env('ETH0_IP_ADDRESS_CMP001', '172.17.41.7') %}
-{% set ETH0_IP_ADDRESS_CMP002 = os_env('ETH0_IP_ADDRESS_CMP002', '172.17.41.8') %}
-{% set ETH0_IP_ADDRESS_CTL01 = os_env('ETH0_IP_ADDRESS_CTL01', '172.17.41.9') %}
-{% set ETH0_IP_ADDRESS_CTL02 = os_env('ETH0_IP_ADDRESS_CTL02', '172.17.41.10') %}
-{% set ETH0_IP_ADDRESS_CTL03 = os_env('ETH0_IP_ADDRESS_CTL03', '172.17.41.11') %}
-{% import 'cookied-bm-k8s-contrail/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
-{% import 'cookied-bm-k8s-contrail/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
-{% import 'cookied-bm-k8s-contrail/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA with context %}
-{% import 'cookied-bm-k8s-contrail/underlay--user-data1604-hwe-compute.yaml' as CLOUDINIT_USER_DATA_HWE_CMP with context %}
-
----
-aliases:
- - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
- - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
- - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
- - &cloudinit_user_data {{ CLOUDINIT_USER_DATA }}
- - &cloudinit_user_data_hwe_cmp {{ CLOUDINIT_USER_DATA_HWE_CMP }}
-
-
-template:
-  devops_settings:
-    env_name: {{ os_env('ENV_NAME', 'cookied-bm-k8s-contrail_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
-
-    address_pools:
-      admin-pool01:
-        net: {{ os_env('ADMIN_ADDRESS_POOL01', '172.17.41.0/26:26') }}
-        params:
-          ip_reserved:
-            gateway: +1
-            l2_network_device: +61
-            default_{{ HOSTNAME_CFG01 }}: {{ ETH0_IP_ADDRESS_CFG01 }}
-            default_{{ HOSTNAME_KVM01 }}: {{ ETH0_IP_ADDRESS_KVM01 }}
-            default_{{ HOSTNAME_KVM02 }}: {{ ETH0_IP_ADDRESS_KVM02 }}
-            default_{{ HOSTNAME_KVM03 }}: {{ ETH0_IP_ADDRESS_KVM03 }}
-            default_{{ HOSTNAME_CMP001 }}: {{ ETH0_IP_ADDRESS_CMP001 }}
-            default_{{ HOSTNAME_CMP002 }}: {{ ETH0_IP_ADDRESS_CMP002 }}
-            default_{{ HOSTNAME_CTL01 }}: {{ ETH0_IP_ADDRESS_CTL01 }}
-            default_{{ HOSTNAME_CTL02 }}: {{ ETH0_IP_ADDRESS_CTL02 }}
-            default_{{ HOSTNAME_CTL03 }}: {{ ETH0_IP_ADDRESS_CTL03 }}
-
-            virtual_{{ HOSTNAME_CFG01 }}: {{ ETH0_IP_ADDRESS_CFG01 }}
-            virtual_{{ HOSTNAME_KVM01 }}: {{ ETH0_IP_ADDRESS_KVM01 }}
-            virtual_{{ HOSTNAME_KVM02 }}: {{ ETH0_IP_ADDRESS_KVM02 }}
-            virtual_{{ HOSTNAME_KVM03 }}: {{ ETH0_IP_ADDRESS_KVM03 }}
-            virtual_{{ HOSTNAME_CMP001 }}: {{ ETH0_IP_ADDRESS_CMP001 }}
-            virtual_{{ HOSTNAME_CMP002 }}: {{ ETH0_IP_ADDRESS_CMP002 }}
-            virtual_{{ HOSTNAME_CTL01 }}: {{ ETH0_IP_ADDRESS_CTL01 }}
-            virtual_{{ HOSTNAME_CTL02 }}: {{ ETH0_IP_ADDRESS_CTL02 }}
-            virtual_{{ HOSTNAME_CTL03 }}: {{ ETH0_IP_ADDRESS_CTL03 }}
-          #ip_ranges:
-          #    dhcp: [+2, -4]
-      private-pool01:
-        net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.167.13.0/24:24') }}
-        params:
-          ip_reserved:
-            gateway: +1
-            l2_network_device: +1
-
-      tenant-pool01:
-        net: {{ os_env('TENANT_ADDRESS_POOL01', '10.167.14.0/24:24') }}
-        params:
-          ip_reserved:
-            gateway: +1
-            l2_network_device: +1
-
-      external-pool01:
-        net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '172.17.41.128/26:26') }}
-        params:
-          ip_reserved:
-            gateway: +1
-            l2_network_device: -2
-
-    groups:
-
-      - name: virtual
-        driver:
-          name: devops.driver.libvirt
-          params:
-            connection_string: !os_env CONNECTION_STRING, qemu:///system
-            storage_pool_name: !os_env STORAGE_POOL_NAME, default
-            stp: False
-            hpet: False
-            enable_acpi: true
-            use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
-
-        network_pools:
-          admin: admin-pool01
-
-        l2_network_devices:
-          # Ironic management interface
-          admin:
-            address_pool: admin-pool01
-            dhcp: false
-            parent_iface:
-              phys_dev: !os_env IRONIC_LAB_PXE_IFACE_0
-
-        group_volumes:
-         - name: cloudimage1604    # This name is used for 'backing_store' option for node volumes.
-           source_image: !os_env IMAGE_PATH1604  # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
-           format: qcow2
-         - name: cfg01_day01_image               # Pre-configured day01 image
-           source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01', os_env('IMAGE_PATH1604')) }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
-           format: qcow2
-
-        nodes:
-          - name: {{ HOSTNAME_CFG01 }}
-            role: salt_master
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 4
-              memory: !os_env SLAVE_NODE_MEMORY, 8192
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: cfg01_day01_image
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_cfg01
-
-              interfaces:
-                - label: ens3
-                  l2_network_device: admin
-                  interface_model: *interface_model
-                  mac_address: !os_env ETH0_MAC_ADDRESS_CFG01
-                #- label: ens4
-                #  l2_network_device: private
-                #  interface_model: *interface_model
-              network_config:
-                ens3:
-                  networks:
-                    - admin
-                #ens4:
-                #  networks:
-                #    - private
-
-
-      - name: default
-        driver:
-          name: devops_driver_ironic
-          params:
-            os_auth_token: fake-token
-            ironic_url: !os_env IRONIC_URL  # URL that will be used by fuel-devops
-                                            # to access Ironic API
-            # Agent URL that is accessible from deploying node when nodes
-            # are bootstrapped with PXE. Usually PXE/provision network address is used.
-            agent_kernel_url: !os_env IRONIC_AGENT_KERNEL_URL
-            agent_ramdisk_url: !os_env IRONIC_AGENT_RAMDISK_URL
-
-        network_pools:
-          admin: admin-pool01
-
-        nodes:
-
-          - name: {{ HOSTNAME_KVM01 }}
-            role: salt_minion
-            params:
-              ipmi_user: !os_env IPMI_USER
-              ipmi_password: !os_env IPMI_PASSWORD
-              ipmi_previlegies: OPERATOR
-              ipmi_host: !os_env IPMI_HOST_KVM01  # hostname or IP address
-              ipmi_lan_interface: lanplus
-              ipmi_port: 623
-
-              root_volume_name: system     # see 'volumes' below
-              cloud_init_volume_name: iso  # see 'volumes' below
-              cloud_init_iface_up: enp9s0f0  # see 'interfaces' below.
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 200
-
-                  # The same as for agent URL, here is an URL to the image that should be
-                  # used for deploy the node. It should also be accessible from deploying
-                  # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
-                  source_image: !os_env IRONIC_SOURCE_IMAGE_URL
-                  source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
-
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data
-
-              interfaces:
-                - label: enp9s0f0
-                  l2_network_device: admin
-                  mac_address: !os_env ETH0_MAC_ADDRESS_KVM01
-                - label: enp9s0f1
-                  mac_address: !os_env ETH1_MAC_ADDRESS_KVM01
-
-              network_config:
-                enp9s0f0:
-                  networks:
-                   - admin
-                bond0:
-                  networks:
-                   - control
-                  aggregation: active-backup
-                  parents:
-                   - enp9s0f1
-
-          - name: {{ HOSTNAME_KVM02 }}
-            role: salt_minion
-            params:
-              ipmi_user: !os_env IPMI_USER
-              ipmi_password: !os_env IPMI_PASSWORD
-              ipmi_previlegies: OPERATOR
-              ipmi_host: !os_env IPMI_HOST_KVM02  # hostname or IP address
-              ipmi_lan_interface: lanplus
-              ipmi_port: 623
-
-              root_volume_name: system     # see 'volumes' below
-              cloud_init_volume_name: iso  # see 'volumes' below
-              cloud_init_iface_up: enp9s0f0  # see 'interfaces' below.
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 200
-
-                  # The same as for agent URL, here is an URL to the image that should be
-                  # used for deploy the node. It should also be accessible from deploying
-                  # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
-                  source_image: !os_env IRONIC_SOURCE_IMAGE_URL
-                  source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
-
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data
-
-              interfaces:
-                - label: enp9s0f0
-                  l2_network_device: admin
-                  mac_address: !os_env ETH0_MAC_ADDRESS_KVM02
-                - label: enp9s0f1
-                  mac_address: !os_env ETH1_MAC_ADDRESS_KVM02
-
-              network_config:
-                enp9s0f0:
-                  networks:
-                   - admin
-                bond0:
-                  networks:
-                   - control
-                  aggregation: active-backup
-                  parents:
-                   - enp9s0f1
-
-          - name: {{ HOSTNAME_KVM03 }}
-            role: salt_minion
-            params:
-              ipmi_user: !os_env IPMI_USER
-              ipmi_password: !os_env IPMI_PASSWORD
-              ipmi_previlegies: OPERATOR
-              ipmi_host: !os_env IPMI_HOST_KVM03  # hostname or IP address
-              ipmi_lan_interface: lanplus
-              ipmi_port: 623
-
-              root_volume_name: system     # see 'volumes' below
-              cloud_init_volume_name: iso  # see 'volumes' below
-              # cloud_init_iface_up: eno1  # see 'interfaces' below.
-              cloud_init_iface_up: enp9s0f0  # see 'interfaces' below.
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 200
-
-                  # The same as for agent URL, here is an URL to the image that should be
-                  # used for deploy the node. It should also be accessible from deploying
-                  # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
-                  source_image: !os_env IRONIC_SOURCE_IMAGE_URL
-                  source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
-
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data
-
-              interfaces:
-                # - label: eno1
-                - label: enp9s0f0
-                  l2_network_device: admin
-                  mac_address: !os_env ETH0_MAC_ADDRESS_KVM03
-                # - label: eno2
-                - label: enp9s0f1
-                  mac_address: !os_env ETH1_MAC_ADDRESS_KVM03
-
-              network_config:
-                # eno1:
-                enp9s0f0:
-                  networks:
-                   - admin
-                bond0:
-                  networks:
-                   - control
-                  aggregation: active-backup
-                  parents:
-                   - enp9s0f1
-
-          - name: {{ HOSTNAME_CTL01 }}
-            role: k8s_controller
-            params:
-              ipmi_user: !os_env IPMI_USER
-              ipmi_password: !os_env IPMI_PASSWORD
-              ipmi_previlegies: OPERATOR
-              ipmi_host: !os_env IPMI_HOST_CTL01  # hostname or IP address
-              ipmi_lan_interface: lanplus
-              ipmi_port: 623
-
-              root_volume_name: system     # see 'volumes' below
-              cloud_init_volume_name: iso  # see 'volumes' below
-              cloud_init_iface_up: enp2s0f0  # see 'interfaces' below.
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 200
-
-                  # The same as for agent URL, here is an URL to the image that should be
-                  # used for deploy the node. It should also be accessible from deploying
-                  # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
-                  source_image: !os_env IRONIC_SOURCE_IMAGE_URL
-                  source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
-
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data
-
-              interfaces:
-                - label: enp2s0f0
-                  l2_network_device: admin
-                  mac_address: !os_env ETH0_MAC_ADDRESS_CTL01
-                - label: enp2s0f1
-                  mac_address: !os_env ETH1_MAC_ADDRESS_CTL01
-
-              network_config:
-                enp2s0f0:
-                  networks:
-                   - admin
-
-          - name: {{ HOSTNAME_CTL02 }}
-            role: salt_minion
-            params:
-              ipmi_user: !os_env IPMI_USER
-              ipmi_password: !os_env IPMI_PASSWORD_CTL
-              ipmi_previlegies: OPERATOR
-              ipmi_host: !os_env IPMI_HOST_CTL02  # hostname or IP address
-              ipmi_lan_interface: lanplus
-              ipmi_port: 623
-
-              root_volume_name: system     # see 'volumes' below
-              cloud_init_volume_name: iso  # see 'volumes' below
-              cloud_init_iface_up: enp2s0f0  # see 'interfaces' below.
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 200
-
-                  # The same as for agent URL, here is an URL to the image that should be
-                  # used for deploy the node. It should also be accessible from deploying
-                  # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
-                  source_image: !os_env IRONIC_SOURCE_IMAGE_URL
-                  source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
-
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data
-
-              interfaces:
-                - label: enp2s0f0
-                  l2_network_device: admin
-                  mac_address: !os_env ETH0_MAC_ADDRESS_CTL02
-                - label: enp2s0f1
-                  mac_address: !os_env ETH1_MAC_ADDRESS_CTL02
-
-              network_config:
-                enp2s0f0:
-                  networks:
-                   - admin
-
-          - name: {{ HOSTNAME_CTL03 }}
-            role: salt_minion
-            params:
-              ipmi_user: !os_env IPMI_USER
-              ipmi_password: !os_env IPMI_PASSWORD_CTL
-              ipmi_previlegies: OPERATOR
-              ipmi_host: !os_env IPMI_HOST_CTL03  # hostname or IP address
-              ipmi_lan_interface: lanplus
-              ipmi_port: 623
-
-              root_volume_name: system     # see 'volumes' below
-              cloud_init_volume_name: iso  # see 'volumes' below
-              # cloud_init_iface_up: eno1  # see 'interfaces' below.
-              cloud_init_iface_up: enp2s0f0  # see 'interfaces' below.
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 200
-
-                  # The same as for agent URL, here is an URL to the image that should be
-                  # used for deploy the node. It should also be accessible from deploying
-                  # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
-                  source_image: !os_env IRONIC_SOURCE_IMAGE_URL
-                  source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
-
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data
-
-              interfaces:
-                # - label: eno1
-                - label: enp2s0f0
-                  l2_network_device: admin
-                  mac_address: !os_env ETH0_MAC_ADDRESS_CTL03
-                # - label: eno2
-                - label: enp2s0f1
-                  mac_address: !os_env ETH1_MAC_ADDRESS_CTL03
-
-              network_config:
-                # eno1:
-                enp2s0f0:
-                  networks:
-                   - admin
-
-          - name: {{ HOSTNAME_CMP001 }}
-            role: salt_minion
-            params:
-              ipmi_user: !os_env IPMI_USER
-              ipmi_password: !os_env IPMI_PASSWORD
-              ipmi_previlegies: OPERATOR
-              ipmi_host: !os_env IPMI_HOST_CMP001  # hostname or IP address
-              ipmi_lan_interface: lanplus
-              ipmi_port: 623
-
-              root_volume_name: system     # see 'volumes' below
-              cloud_init_volume_name: iso  # see 'volumes' below
-              # cloud_init_iface_up: enp3s0f0  # see 'interfaces' below.
-              cloud_init_iface_up: enp9s0f0  # see 'interfaces' below.
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 200
-
-                  # The same as for agent URL, here is an URL to the image that should be
-                  # used for deploy the node. It should also be accessible from deploying
-                  # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
-                  source_image: !os_env IRONIC_SOURCE_IMAGE_URL
-                  source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
-
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_hwe_cmp
-
-              interfaces:
-                - label: enp9s0f0
-                  l2_network_device: admin
-                  mac_address: !os_env ETH0_MAC_ADDRESS_CMP001
-                - label: enp9s0f1
-                  mac_address: !os_env ETH1_MAC_ADDRESS_CMP001
-                # - label: enp5s0f0
-                #   mac_address: !os_env ETH2_MAC_ADDRESS_CMP001
-                #   features: ['dpdk', 'dpdk_pci: 0000:05:00.0']
-                # - label: enp5s0f1
-                #   mac_address: !os_env ETH3_MAC_ADDRESS_CMP001
-                #   features: ['dpdk', 'dpdk_pci: 0000:05:00.1']
-                # - label: enp5s0f2
-                #   mac_address: !os_env ETH4_MAC_ADDRESS_CMP001
-                #   features: ['dpdk', 'dpdk_pci: 0000:05:00.2']
-
-              network_config:
-                enp9s0f0:
-                  networks:
-                   - admin
-
-          - name: {{ HOSTNAME_CMP002 }}
-            role: salt_minion
-            params:
-              ipmi_user: !os_env IPMI_USER
-              ipmi_password: !os_env IPMI_PASSWORD
-              ipmi_previlegies: OPERATOR
-              ipmi_host: !os_env IPMI_HOST_CMP002  # hostname or IP address
-              ipmi_lan_interface: lanplus
-              ipmi_port: 623
-
-              root_volume_name: system     # see 'volumes' below
-              cloud_init_volume_name: iso  # see 'volumes' below
-              # cloud_init_iface_up: enp3s0f0  # see 'interfaces' below.
-              cloud_init_iface_up: enp9s0f0  # see 'interfaces' below.
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 200
-
-                  # The same as for agent URL, here is an URL to the image that should be
-                  # used for deploy the node. It should also be accessible from deploying
-                  # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
-                  source_image: !os_env IRONIC_SOURCE_IMAGE_URL
-                  source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
-
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_hwe_cmp
-
-              interfaces:
-                - label: enp9s0f0
-                  l2_network_device: admin
-                  mac_address: !os_env ETH0_MAC_ADDRESS_CMP002
-                - label: enp9s0f1
-                  mac_address: !os_env ETH1_MAC_ADDRESS_CMP002
-              network_config:
-                enp9s0f0:
-                  networks:
-                   - admin
diff --git a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/core.yaml b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/core.yaml
deleted file mode 100644
index a3de973..0000000
--- a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/core.yaml
+++ /dev/null
@@ -1,11 +0,0 @@
-{% from 'cookied-bm-mcp-dvr-vxlan/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% import 'shared-core.yaml' as SHARED_CORE with context %}
-
-{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
-{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
-{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
-{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
-{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
-{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
-{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
-{{ SHARED_CORE.MACRO_CHECK_VIP() }}
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/openstack.yaml b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/openstack.yaml
deleted file mode 100644
index 1d8cbbf..0000000
--- a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/openstack.yaml
+++ /dev/null
@@ -1,25 +0,0 @@
-{% from 'cookied-bm-mcp-dvr-vxlan/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-bm-mcp-dvr-vxlan/underlay.yaml' import HOSTNAME_CTL01 with context %}
-{% from 'cookied-bm-mcp-dvr-vxlan/underlay.yaml' import HOSTNAME_GTW01 with context %}
-{% from 'cookied-bm-mcp-dvr-vxlan/underlay.yaml' import HOSTNAME_GTW02 with context %}
-{% import 'shared-salt.yaml' as SHARED with context %}
-{% import 'shared-openstack.yaml' as SHARED_OPENSTACK with context %}
-
-# Install OpenStack control services
-{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE() }}
-{{ SHARED_OPENSTACK.MACRO_INSTALL_GLANCE() }}
-{{ SHARED_OPENSTACK.MACRO_INSTALL_NOVA() }}
-{{ SHARED_OPENSTACK.MACRO_INSTALL_CINDER(INSTALL_VOLUME=false) }}
-
-- description: Install cinder volume
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@cinder:volume' state.sls cinder
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 5}
-  skip_fail: false
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_NEUTRON(INSTALL_GATEWAY=true) }}
-{{ SHARED_OPENSTACK.MACRO_INSTALL_HEAT() }}
-{{ SHARED_OPENSTACK.MACRO_INSTALL_HORIZON() }}
-{{ SHARED_OPENSTACK.MACRO_INSTALL_COMPUTE(CELL_MAPPING=true) }}
-
diff --git a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt-context-cookiecutter-openstack_ovs_dvr_vxlan.yaml b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt-context-cookiecutter-openstack_ovs_dvr_vxlan.yaml
deleted file mode 100644
index 7585c41..0000000
--- a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt-context-cookiecutter-openstack_ovs_dvr_vxlan.yaml
+++ /dev/null
@@ -1,198 +0,0 @@
-default_context:
-  mcp_version: proposed
-  ceph_enabled: 'False'
-  cicd_enabled: 'False'
-  cicd_control_node01_address: 10.167.4.91
-  cicd_control_node01_hostname: cid01
-  cicd_control_node02_address: 10.167.4.92
-  cicd_control_node02_hostname: cid02
-  cicd_control_node03_address: 10.167.4.93
-  cicd_control_node03_hostname: cid03
-  cicd_control_vip_address: 10.167.4.90
-  cicd_control_vip_hostname: cid
-  kqueen_custom_mail_enabled: 'False'
-  kqueen_enabled: 'False'
-  cluster_domain: cookied-bm-mcp-dvr-vxlan.local
-  cluster_name: cookied-bm-mcp-dvr-vxlan
-  compute_bond_mode: active-backup
-  compute_primary_first_nic: eth1
-  compute_primary_second_nic: eth2
-  context_seed: Psupdi5ne1kCk31iDWV7fhbHnBALIr3SWhce7Z01jCaMwlAhGKxeLPFPQ9CgYzJD
-  control_network_netmask: 255.255.255.0
-  control_network_subnet: 10.167.4.0/24
-  control_vlan: '2404'
-  cookiecutter_template_branch: proposed
-  cookiecutter_template_credentials: gerrit
-  cookiecutter_template_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.com:29418/mk/cookiecutter-templates.git
-  deploy_network_gateway: 172.16.164.1
-  deploy_network_netmask: 255.255.255.192
-  deploy_network_subnet: 172.16.164.0/26
-  deployment_type: physical
-  dns_server01: 172.18.176.6
-  dns_server02: 172.18.208.44
-  email_address: obutenko@mirantis.com
-  gateway_primary_first_nic: eth1
-  gateway_primary_second_nic: eth2
-  infra_bond_mode: active-backup
-  infra_deploy_nic: eth0
-  infra_kvm01_control_address: 10.167.4.241
-  infra_kvm01_deploy_address: 172.16.164.11
-  infra_kvm01_hostname: kvm01
-  infra_kvm02_control_address: 10.167.4.242
-  infra_kvm02_deploy_address: 172.16.164.12
-  infra_kvm02_hostname: kvm02
-  infra_kvm03_control_address: 10.167.4.243
-  infra_kvm03_deploy_address: 172.16.164.13
-  infra_kvm03_hostname: kvm03
-  infra_kvm_vip_address: 10.167.4.240
-  infra_primary_first_nic: eth1
-  infra_primary_second_nic: eth2
-  kubernetes_enabled: 'False'
-  local_repositories: 'False'
-  maas_deploy_address: 172.16.164.14
-  maas_hostname: cfg01
-  offline_deployment: 'False'
-  opencontrail_enabled: 'False'
-  openldap_enabled: 'False'
-  bmk_enabled: 'False'
-  static_ips_on_deploy_network_enabled: 'False'
-  penstack_benchmark_node01_address: 10.167.4.95
-  openstack_benchmark_node01_hostname: bmk01
-  openstack_compute_count: '2'
-  openstack_compute_rack01_hostname: cmp
-  openstack_compute_rack01_single_subnet: 172.16.10
-  openstack_compute_rack01_tenant_subnet: 10.1.0
-  openstack_compute_single_address_ranges: 172.16.10.105-172.16.10.106
-  openstack_compute_deploy_address_ranges: 192.168.10.105-192.168.10.106
-  openstack_compute_tenant_address_ranges: 10.1.0.105-10.1.0.106
-  openstack_compute_node01_hostname: cmp01
-  openstack_compute_node02_hostname: cmp02
-  openstack_compute_node01_address: 10.167.4.3
-  openstack_compute_node02_address: 10.167.4.31
-  openstack_compute_node01_single_address: 10.167.4.3
-  openstack_compute_node02_single_address: 10.167.4.31
-  openstack_compute_node01_deploy_address: 172.16.164.3
-  openstack_compute_node02_deploy_address: 172.16.164.31
-  openstack_control_address: 10.167.4.10
-  openstack_control_hostname: ctl
-  openstack_control_node01_address: 10.167.4.11
-  openstack_control_node01_hostname: ctl01
-  openstack_control_node02_address: 10.167.4.12
-  openstack_control_node02_hostname: ctl02
-  openstack_control_node03_address: 10.167.4.13
-  openstack_control_node03_hostname: ctl03
-  openstack_database_address: 10.167.4.50
-  openstack_database_hostname: dbs
-  openstack_database_node01_address: 10.167.4.51
-  openstack_database_node01_hostname: dbs01
-  openstack_database_node02_address: 10.167.4.52
-  openstack_database_node02_hostname: dbs02
-  openstack_database_node03_address: 10.167.4.53
-  openstack_database_node03_hostname: dbs03
-  openstack_enabled: 'True'
-  openstack_gateway_node01_address: 10.167.4.224
-  openstack_gateway_node01_hostname: gtw01
-  openstack_gateway_node01_tenant_address: 10.167.6.4
-  openstack_gateway_node02_address: 10.167.4.225
-  openstack_gateway_node02_hostname: gtw02
-  openstack_gateway_node02_tenant_address: 10.167.6.5
-  openstack_message_queue_address: 10.167.4.40
-  openstack_message_queue_hostname: msg
-  openstack_message_queue_node01_address: 10.167.4.41
-  openstack_message_queue_node01_hostname: msg01
-  openstack_message_queue_node02_address: 10.167.4.42
-  openstack_message_queue_node02_hostname: msg02
-  openstack_message_queue_node03_address: 10.167.4.43
-  openstack_message_queue_node03_hostname: msg03
-  openstack_network_engine: ovs
-  openstack_neutron_qos: 'False'
-  openstack_neutron_vlan_aware_vms: 'False'
-  openstack_nfv_dpdk_enabled: 'False'
-  openstack_nfv_sriov_enabled: 'False'
-  openstack_nova_compute_nfv_req_enabled: 'False'
-  openstack_ovs_dvr_enabled: 'True'
-  openstack_ovs_encapsulation_type: vxlan
-  openstack_proxy_address: 10.167.4.80
-  openstack_proxy_hostname: prx
-  openstack_proxy_node01_address: 10.167.4.81
-  openstack_proxy_node01_hostname: prx01
-  openstack_proxy_node02_address: 10.167.4.82
-  openstack_proxy_node02_hostname: prx02
-  openstack_version: pike
-  cinder_version: ${_param:openstack_version}
-  oss_enabled: 'False'
-  platform: openstack_enabled
-  public_host: ${_param:openstack_proxy_address}
-  publication_method: email
-  reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
-  backup_private_key: |
-      -----BEGIN RSA PRIVATE KEY-----
-      MIIEowIBAAKCAQEAxL6/rVgCetsETpZaUmXmkj8cZ1WN0eubH1FvMDOi/La9ZJyT
-      k0C6AYpJnIyEm93pMj5cLm08qRqMW+2pdOhYjcH69yg5MrX5SkRk8jCmIHIYoIbh
-      Qnwbnj3dd3I39ZdfU2FO7u2vlbglVou6ZoQxlJDItuLNtzq6EG+w9eF19e7+OsC6
-      6iUItp618zfw1l3J/8nKvCGe2RYDf7mJW6XwCl/DwryJmwwzvPgYJ3QMuDD8/HFj
-      lrJ3xjFTXj4b4Ws1XIoy78fFbtiLr4OwqCYkho03u2E5rOOP1qZxZB63sivHMLMO
-      MM5bOAQKbulFNoyALADGYfc7sf0bZ4u9XXDXxQIDAQABAoIBAQCfmc2MJRT97KW1
-      yqpCpX9BrAiymuiNHf+cjEcSZxEUyHkjIRFmJt+9WB0W7ba1anM92vCUiPDojSzH
-      dig9Oi578JxR20NrK8uqv4jUHzrknynzLveVI3CUEcOSnglfJQijbxDFKfOCFPvV
-      FUyE1UATMNBh6+LNfMprgu+exuMWOPnDyUiYQ+WZ0JfuZY8fuaZte4woJJOb9LUu
-      5rsMG/smIzjpgZ0Z9ZVDMurfq565qhpaXRAqKeIuyht8pacTo31iMQdHB78AvY/3
-      g0z21Gk8k3z0Kr/YFKr2r4FmXY5m/gAUvZly2ZrVQM5XsbTVCzq/JpI5fssNvSbU
-      AKmXzf4RAoGBAOO3d4/cstxERzW6hyOTjZIN1ppR52CsnZTsVPbfd0pCtmzmVZce
-      CtHKdcXSbTwZvvkK09QSWAp3MoSpd0gIOiLU8Wx/R/RIZsu9BlhTS3r3EQLnk72d
-      H/1TTA+j4T/LIYLSojQ1RxvIrHetAD44j732aTwKAHj/SybEAVqNkOB/AoGBAN0u
-      gLcrgqIHGrk4VjWSvlCGymfF40equcx+ud7XhfZDGETUOSahW4dPZ52cjPAkrCBQ
-      MMfcDwSVGsOAjd+mNt11BHUKobnhXwFaWWuyqyn9NmWFbjMbICVh7E3Of5aVN38o
-      lrmo/7LuKMVG7XRwphCv5NkaJmQG4njDyUQWlaW7AoGADCd8wDb9bPhP/LQqBmIX
-      ylXmwHHisaxE9O/wUQT4bwREjGd25gv6c9wkkRx8LBsLsGs9hzI7dMOL9Ly+2x9l
-      SvqmsC3S/1zl77X1Ir2/Z57MT6Vgo1xBmtnZU3Rhz2/eKAdqFPNLClaZrgGT475N
-      HcyLLWMzR0IJFtabY+Puea0CgYA8Zb5wRkldxWLewSuJZZDinGwY+kieAVjLJq/K
-      0j+ah6fQ48LXcah0wpIgz+cMjHcUO9GWQdk3/x9X03rqX5EL2DBnZYfUIl63F9zj
-      M97ZkHOSNWVqPzX//0Vv2butewG0j3jZKfTo/2/SrxOYgEpYtC9huWpSVi7xm0US
-      erhSkQKBgFIf9JEsfgE57ANhvITZ3ZI0uZXNxZkXQaVg8jvScDi79IIhy9iPzhKC
-      aIIQoDNIlWv1ftCRZ5AlBvVXgvQ/QNrwy48JiQTzWZlb9Ezg8w+olQmSbG6fq7Y+
-      7r3i+QUZ7RBdOb24QcQ618q54ozNTCB7OywY78ptFzeoBeptiNr1
-      -----END RSA PRIVATE KEY-----
-  backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDEvr+tWAJ62wROllpSZeaSPxxnVY3R65sfUW8wM6L8tr1knJOTQLoBikmcjISb3ekyPlwubTypGoxb7al06FiNwfr3KDkytflKRGTyMKYgchighuFCfBuePd13cjf1l19TYU7u7a+VuCVWi7pmhDGUkMi24s23OroQb7D14XX17v46wLrqJQi2nrXzN/DWXcn/ycq8IZ7ZFgN/uYlbpfAKX8PCvImbDDO8+BgndAy4MPz8cWOWsnfGMVNePhvhazVcijLvx8Vu2Iuvg7CoJiSGjTe7YTms44/WpnFkHreyK8cwsw4wzls4BApu6UU2jIAsAMZh9zux/Rtni71dcNfF
-  salt_master_address: 10.167.4.2
-  salt_master_hostname: cfg01
-  salt_master_management_address: 172.16.164.2
-  shared_reclass_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.com:29418/salt-models/reclass-system.git
-  stacklight_enabled: 'False'
-  fluentd_enabled: 'False'
-  stacklight_log_address: 10.167.4.60
-  stacklight_log_hostname: log
-  stacklight_log_node01_address: 10.167.4.61
-  stacklight_log_node01_hostname: log01
-  stacklight_log_node02_address: 10.167.4.62
-  stacklight_log_node02_hostname: log02
-  stacklight_log_node03_address: 10.167.4.63
-  stacklight_log_node03_hostname: log03
-  stacklight_monitor_address: 10.167.4.70
-  stacklight_monitor_hostname: mon
-  stacklight_monitor_node01_address: 10.167.4.71
-  stacklight_monitor_node01_hostname: mon01
-  stacklight_monitor_node02_address: 10.167.4.72
-  stacklight_monitor_node02_hostname: mon02
-  stacklight_monitor_node03_address: 10.167.4.73
-  stacklight_monitor_node03_hostname: mon03
-  stacklight_notification_address: alerts@localhost
-  stacklight_notification_smtp_host: 127.0.0.1
-  stacklight_telemetry_address: 10.167.4.85
-  stacklight_telemetry_hostname: mtr
-  stacklight_telemetry_node01_address: 10.167.4.86
-  stacklight_telemetry_node01_hostname: mtr01
-  stacklight_telemetry_node02_address: 10.167.4.87
-  stacklight_telemetry_node02_hostname: mtr02
-  stacklight_telemetry_node03_address: 10.167.4.88
-  stacklight_telemetry_node03_hostname: mtr03
-  stacklight_version: '2'
-  tenant_network_gateway: 10.167.6.1
-  tenant_network_netmask: 255.255.255.0
-  tenant_network_subnet: 10.167.6.0/24
-  stacklight_long_term_storage_type: prometheus
-  prometheus_relay_bind_port: 9094
-  tenant_vlan: '2406'
-  upstream_proxy_enabled: 'False'
-  use_default_network_scheme: 'True'
-  vnf_onboarding_enabled: 'False'
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt-context-lab03-environment.yaml b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt-context-lab03-environment.yaml
deleted file mode 100644
index 720c275..0000000
--- a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt-context-lab03-environment.yaml
+++ /dev/null
@@ -1,97 +0,0 @@
-nodes:
-    cfg01.cookied-bm-mcp-dvr-vxlan.local:
-      reclass_storage_name: infra_config_node01
-      roles:
-      - infra_config
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_static_ctl
-
-    # Physical nodes
-    kvm01.cookied-bm-mcp-dvr-vxlan.local:
-      reclass_storage_name: infra_kvm_node01
-      roles:
-      - infra_kvm
-      - linux_system_codename_xenial
-      interfaces:
-        enp9s0f0:
-          role: single_mgm_dhcp
-        enp9s0f1:
-          role: bond0_ab_ovs_vlan_ctl
-
-    kvm02.cookied-bm-mcp-dvr-vxlan.local:
-      reclass_storage_name: infra_kvm_node02
-      roles:
-      - infra_kvm
-      - linux_system_codename_xenial
-      interfaces:
-        enp9s0f0:
-          role: single_mgm_dhcp
-        enp9s0f1:
-          role: bond0_ab_ovs_vlan_ctl
-
-    kvm03.cookied-bm-mcp-dvr-vxlan.local:
-      reclass_storage_name: infra_kvm_node03
-      roles:
-      - infra_kvm
-      - linux_system_codename_xenial
-      interfaces:
-        enp9s0f0:
-          role: single_mgm_dhcp
-        enp9s0f1:
-          role: bond0_ab_ovs_vlan_ctl
-
-    cmp001.cookied-bm-mcp-dvr-vxlan.local:
-      reclass_storage_name: openstack_compute_node01
-      roles:
-      - openstack_compute
-      - features_lvm_backend_volume_sdb
-      - linux_system_codename_xenial
-      interfaces:
-        enp9s0f0:
-          role: single_mgm_dhcp
-        enp9s0f1:
-          role: bond0_ab_dvr_vxlan_ctl_mesh_floating
-
-    cmp002.cookied-bm-mcp-dvr-vxlan.local:
-      reclass_storage_name: openstack_compute_node02
-      roles:
-      - openstack_compute
-      - features_lvm_backend_volume_sdb
-      - linux_system_codename_xenial
-      interfaces:
-        enp9s0f0:
-          role: single_mgm_dhcp
-        enp9s0f1:
-          role: bond0_ab_dvr_vxlan_ctl_mesh_floating
-
-    gtw01.cookied-bm-mcp-dvr-vxlan.local:
-      reclass_storage_name: openstack_gateway_node01
-      roles:
-      - openstack_gateway
-      - linux_system_codename_xenial
-      classes:
-      - system.linux.system.repo.mcp.apt_mirantis.docker
-      interfaces:
-        enp2s0f0:
-          role: single_mgm_dhcp
-        enp2s0f1:
-          role: bond0_ab_dvr_vxlan_ctl_mesh_external
-          external_address: 172.17.42.4
-          external_network_netmask: 255.255.255.192
-
-    gtw02.cookied-bm-mcp-dvr-vxlan.local:
-      reclass_storage_name: openstack_gateway_node02
-      roles:
-      - openstack_gateway
-      - linux_system_codename_xenial
-      interfaces:
-        enp2s0f0:
-          role: single_mgm_dhcp
-        enp2s0f1:
-          role: bond0_ab_dvr_vxlan_ctl_mesh_external
-          external_address: 172.17.42.5
-          external_network_netmask: 255.255.255.192
diff --git a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt-context-vcp-environment.yaml b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt-context-vcp-environment.yaml
deleted file mode 100644
index 6cace03..0000000
--- a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt-context-vcp-environment.yaml
+++ /dev/null
@@ -1,124 +0,0 @@
-nodes:
-    ctl01.cookied-bm-mcp-dvr-vxlan.local:
-      reclass_storage_name: openstack_control_node01
-      roles:
-      - openstack_control_leader
-      - linux_system_codename_xenial
-      - features_lvm_backend_control
-      interfaces:
-        ens2:
-          role: single_dhcp
-        ens3:
-          role: single_ctl
-
-    ctl02.cookied-bm-mcp-dvr-vxlan.local:
-      reclass_storage_name: openstack_control_node02
-      roles:
-      - openstack_control
-      - linux_system_codename_xenial
-      - features_lvm_backend_control
-      interfaces:
-        ens2:
-          role: single_dhcp
-        ens3:
-          role: single_ctl
-
-    ctl03.cookied-bm-mcp-dvr-vxlan.local:
-      reclass_storage_name: openstack_control_node03
-      roles:
-      - openstack_control
-      - linux_system_codename_xenial
-      - features_lvm_backend_control
-      interfaces:
-        ens2:
-          role: single_dhcp
-        ens3:
-          role: single_ctl
-
-    dbs01.cookied-bm-mcp-dvr-vxlan.local:
-      reclass_storage_name: openstack_database_node01
-      roles:
-      - openstack_database_leader
-      - linux_system_codename_xenial
-      interfaces:
-        ens2:
-          role: single_dhcp
-        ens3:
-          role: single_ctl
-
-    dbs02.cookied-bm-mcp-dvr-vxlan.local:
-      reclass_storage_name: openstack_database_node02
-      roles:
-      - openstack_database
-      - linux_system_codename_xenial
-      interfaces:
-        ens2:
-          role: single_dhcp
-        ens3:
-          role: single_ctl
-
-    dbs03.cookied-bm-mcp-dvr-vxlan.local:
-      reclass_storage_name: openstack_database_node03
-      roles:
-      - openstack_database
-      - linux_system_codename_xenial
-      interfaces:
-        ens2:
-          role: single_dhcp
-        ens3:
-          role: single_ctl
-
-    msg01.cookied-bm-mcp-dvr-vxlan.local:
-      reclass_storage_name: openstack_message_queue_node01
-      roles:
-      - openstack_message_queue
-      - linux_system_codename_xenial
-      interfaces:
-        ens2:
-          role: single_dhcp
-        ens3:
-          role: single_ctl
-
-    msg02.cookied-bm-mcp-dvr-vxlan.local:
-      reclass_storage_name: openstack_message_queue_node02
-      roles:
-      - openstack_message_queue
-      - linux_system_codename_xenial
-      interfaces:
-        ens2:
-          role: single_dhcp
-        ens3:
-          role: single_ctl
-
-    msg03.cookied-bm-mcp-dvr-vxlan.local:
-      reclass_storage_name: openstack_message_queue_node03
-      roles:
-      - openstack_message_queue
-      - linux_system_codename_xenial
-      interfaces:
-        ens2:
-          role: single_dhcp
-        ens3:
-          role: single_ctl
-
-    prx01.cookied-bm-mcp-dvr-vxlan.local:
-      reclass_storage_name: openstack_proxy_node01
-      roles:
-      - openstack_proxy
-      - linux_system_codename_xenial
-      interfaces:
-        ens2:
-          role: single_dhcp
-        ens3:
-          role: single_ctl
-
-    prx02.cookied-bm-mcp-dvr-vxlan.local:
-      reclass_storage_name: openstack_proxy_node02
-      roles:
-      - openstack_proxy
-      - linux_system_codename_xenial
-      interfaces:
-        ens2:
-          role: single_dhcp
-        ens3:
-          role: single_ctl
diff --git a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt.yaml b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt.yaml
deleted file mode 100644
index 8804721..0000000
--- a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt.yaml
+++ /dev/null
@@ -1,97 +0,0 @@
-{% from 'cookied-bm-mcp-dvr-vxlan/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-bm-mcp-dvr-vxlan/underlay.yaml' import LAB_CONFIG_NAME with context %}
-{% from 'cookied-bm-mcp-dvr-vxlan/underlay.yaml' import DOMAIN_NAME with context %}
-{% from 'cookied-bm-mcp-dvr-vxlan/underlay.yaml' import HOSTNAME_KVM01 with context %}
-{% from 'cookied-bm-mcp-dvr-vxlan/underlay.yaml' import HOSTNAME_KVM02 with context %}
-{% from 'cookied-bm-mcp-dvr-vxlan/underlay.yaml' import HOSTNAME_KVM03 with context %}
-
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
-
-{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME','cookied-bm-mcp-dvr-vxlan') %}
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-{{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
-{{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='\*') }}
-{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
-{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
-{{ SHARED.MACRO_GENERATE_INVENTORY() }}
-{{ SHARED.MACRO_NETWORKING_WORKAROUNDS() }}
-
-- description: Rerun openssh after env model is generated
-  cmd: |
-    salt-call state.sls openssh
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
-
-########################################
-# Spin up Control Plane VMs on KVM nodes
-########################################
-
-- description: Hack resolv.conf on nodes for internal services access
-  cmd: |
-    salt --hard-crash --state-output=mixed --state-verbose=False -C '*' cmd.run "echo 'nameserver 172.18.208.44' > /etc/resolv.conf;"
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Execute 'libvirt' states to create necessary libvirt networks
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'kvm*' state.sls libvirt
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 10}
-  skip_fail: false
-
-- description: Temporary WR for correct bridge name according to envoronment templates
-  cmd: |
-    sed -i 's/br\-ctl/br\_ctl/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
-    sed -i 's/br\-mgm/br\_mgm/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
-    sed -i 's/br\-ctl/br\_ctl/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/gateway.yml;
-    salt '*' saltutil.refresh_pillar;
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-- description: Create VMs for control plane
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'kvm*' state.sls salt.control
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 3, delay: 10}
-  skip_fail: false
-
-- description: '*Workaround* for waiting the control-plane VMs in the salt-key (instead of sleep)'
-  cmd: |
-    salt-key -l acc| sort > /tmp/current_keys.txt &&
-    salt 'kvm*' cmd.run 'virsh list --name' | grep -v 'kvm'|sort|xargs -I {} fgrep {} /tmp/current_keys.txt
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 20, delay: 30}
-  skip_fail: false
-
-#########################################
-# Configure all running salt minion nodes
-#########################################
-
-- description: Refresh pillars on all minions
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.refresh_pillar
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Sync all salt resources
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.sync_all && sleep 5
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Show  reclass-salt --top for generated nodes
-  cmd: reclass-salt --top -u /srv/salt/reclass/nodes/_generated/
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
diff --git a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/underlay--meta-data.yaml b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/underlay--meta-data.yaml
deleted file mode 100644
index 3699401..0000000
--- a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/underlay--meta-data.yaml
+++ /dev/null
@@ -1,4 +0,0 @@
-| # All the data below will be stored as a string object
-  instance-id: iid-local1
-  hostname: {hostname}
-  local-hostname: {hostname}
diff --git a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/underlay--user-data-cfg01.yaml
deleted file mode 100644
index b77550a..0000000
--- a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/underlay--user-data-cfg01.yaml
+++ /dev/null
@@ -1,74 +0,0 @@
-| # All the data below will be stored as a string object
-  #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
-  ssh_pwauth: True
-  users:
-   - name: root
-     sudo: ALL=(ALL) NOPASSWD:ALL
-     shell: /bin/bash
-     ssh_authorized_keys:
-     {% for key in config.underlay.ssh_keys %}
-      - ssh-rsa {{ key['public'] }}
-     {% endfor %}
-
-  disable_root: false
-  chpasswd:
-   list: |
-    root:r00tme
-   expire: False
-
-  bootcmd:
-   # Enable root access
-   - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
-   - service sshd restart
-  output:
-    all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
-  runcmd:
-   # Configure dhclient
-   - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
-   - sudo resolvconf -u
-
-   # Enable grub menu using updated config below
-   - update-grub
-
-   # Prepare network connection
-   - sudo ifdown ens3
-   - sudo ip r d default || true  # remove existing default route to get it from dhcp
-   - sudo ifup ens3
-
-   # Create swap
-   - fallocate -l 4G /swapfile
-   - chmod 600 /swapfile
-   - mkswap /swapfile
-   - swapon /swapfile
-   - echo "/swapfile   none    swap    defaults    0   0" >> /etc/fstab
-
-   ############## TCP Cloud cfg01 node ##################
-   #- sleep 120
-   - echo "Preparing base OS"
-
-   - echo "nameserver 172.18.208.44" > /etc/resolv.conf;
-
-   - mkdir -p /srv/salt/reclass/nodes
-   - systemctl enable salt-master
-   - systemctl enable salt-minion
-   - systemctl start salt-master
-   - systemctl start salt-minion
-   - salt-call -l info --timeout=120 test.ping
-
-  write_files:
-   - path: /etc/network/interfaces
-     content: |
-          auto ens3
-          iface ens3 inet dhcp
-
-   - path: /root/.ssh/config
-     owner: root:root
-     permissions: '0600'
-     content: |
-          Host *
-            ServerAliveInterval 300
-            ServerAliveCountMax 10
-            StrictHostKeyChecking no
-            UserKnownHostsFile /dev/null
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/underlay--user-data1604-hwe.yaml b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/underlay--user-data1604-hwe.yaml
deleted file mode 100644
index 4983612..0000000
--- a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/underlay--user-data1604-hwe.yaml
+++ /dev/null
@@ -1,126 +0,0 @@
-| # All the data below will be stored as a string object
-  #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
-  ssh_pwauth: True
-  users:
-   - name: root
-     sudo: ALL=(ALL) NOPASSWD:ALL
-     shell: /bin/bash
-     ssh_authorized_keys:
-     {% for key in config.underlay.ssh_keys %}
-      - ssh-rsa {{ key['public'] }}
-     {% endfor %}
-
-  disable_root: false
-  chpasswd:
-   list: |
-    root:r00tme
-   expire: False
-
-  bootcmd:
-   #   # Block access to SSH while node is preparing
-   #   - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
-   # Enable root access
-   - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
-   - service sshd restart
-  output:
-    all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
-  runcmd:
-   - if lvs vg0; then pvresize /dev/vda3; fi
-   - if lvs vg0; then /usr/bin/growlvm.py --image-layout-file /usr/share/growlvm/image-layout.yml; fi
-
-   - export TERM=linux
-   - export LANG=C
-   # Configure dhclient
-   - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
-   - sudo resolvconf -u
-
-   # Enable grub menu using updated config below
-   - update-grub
-
-   # Prepare network connection
-   - sudo ifup {interface_name}
-   #- sudo route add default gw {gateway} {interface_name}
-
-   # Create swap
-   - fallocate -l 4G /swapfile
-   - chmod 600 /swapfile
-   - mkswap /swapfile
-   - swapon /swapfile
-   - echo "/swapfile   none    swap    defaults   0   0" >> /etc/fstab
-
-
-   ############## TCP Cloud cfg01 node ##################
-   #- sleep 120
-   #   - echo "Preparing base OS"
-   - echo "nameserver 172.18.208.44" > /etc/resolv.conf;
-   #   - echo "nameserver 8.8.8.8" >> /etc/resolv.conf;
-   #   - which wget >/dev/null || (apt-get update; apt-get install -y wget)
-
-   # Configure Ubuntu mirrors
-   #   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
-   #   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
-   #   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
-
-   #   - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
-   #   - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
-   #   - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;
-   #   - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;
-
-   #   - apt-get clean
-   #   - eatmydata apt-get update && apt-get -y upgrade
-
-   # Install common packages
-   #   - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc
-
-   # Install salt-minion and stop it until it is configured
-   #   - eatmydata apt-get install -y salt-minion && service salt-minion stop
-
-   # Install latest kernel
-   #   - eatmydata apt-get install -y {{ os_env('LINUX_KERNEL_HWE_PACKAGE_NAME', 'linux-image-extra-4.10.0-42-generic') }}
-
-   ########################################################
-   # Node is ready, allow SSH access
-   #- echo "Allow SSH access ..."
-   #- sudo iptables -D INPUT -p tcp --dport 22 -j DROP
-   #   - reboot
-   ########################################################
-
-  write_files:
-   - path: /etc/default/grub.d/97-enable-grub-menu.cfg
-     content: |
-         GRUB_RECORDFAIL_TIMEOUT=30
-         GRUB_TIMEOUT=3
-         GRUB_TIMEOUT_STYLE=menu
-
-   - path: /etc/network/interfaces
-     content: |
-          # The loopback network interface
-          auto lo
-          iface lo inet loopback
-          auto {interface_name}
-          iface {interface_name} inet dhcp
-
-   - path: /usr/share/growlvm/image-layout.yml
-     content: |
-       root:
-         size: '30%VG'
-       home:
-         size: '1G'
-       var_log:
-         size: '11%VG'
-       var_log_audit:
-         size: '5G'
-       var_tmp:
-         size: '11%VG'
-       tmp:
-         size: '5G'
-     owner: root:root
-
-  growpart:
-    mode: auto
-    devices:
-      - '/'
-      - '/dev/vda3'
-    ignore_growroot_disabled: false
diff --git a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/underlay--user-data1604.yaml b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/underlay--user-data1604.yaml
deleted file mode 100644
index b39b37a..0000000
--- a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/underlay--user-data1604.yaml
+++ /dev/null
@@ -1,87 +0,0 @@
-| # All the data below will be stored as a string object
-  #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
-  ssh_pwauth: True
-  users:
-   - name: root
-     sudo: ALL=(ALL) NOPASSWD:ALL
-     shell: /bin/bash
-     ssh_authorized_keys:
-     {% for key in config.underlay.ssh_keys %}
-      - ssh-rsa {{ key['public'] }}
-     {% endfor %}
-
-  disable_root: false
-  chpasswd:
-   list: |
-    root:r00tme
-   expire: False
-
-  bootcmd:
-   # Enable root access
-   - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
-   - service sshd restart
-  output:
-    all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
-  runcmd:
-   - if lvs vg0; then pvresize /dev/vda3; fi
-   - if lvs vg0; then /usr/bin/growlvm.py --image-layout-file /usr/share/growlvm/image-layout.yml; fi
-
-   - export TERM=linux
-   - export LANG=C
-   # Configure dhclient
-   - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
-   - sudo resolvconf -u
-
-   # Enable grub menu using updated config below
-   - update-grub
-
-   # Prepare network connection
-   - sudo ifup {interface_name}
-
-   # Create swap
-   - fallocate -l 4G /swapfile
-   - chmod 600 /swapfile
-   - mkswap /swapfile
-   - swapon /swapfile
-   - echo "/swapfile   none    swap    defaults   0   0" >> /etc/fstab
-   - echo "nameserver 172.18.208.44" > /etc/resolv.conf;
-
-  write_files:
-   - path: /etc/default/grub.d/97-enable-grub-menu.cfg
-     content: |
-         GRUB_RECORDFAIL_TIMEOUT=30
-         GRUB_TIMEOUT=3
-         GRUB_TIMEOUT_STYLE=menu
-
-   - path: /etc/network/interfaces
-     content: |
-          # The loopback network interface
-          auto lo
-          iface lo inet loopback
-          auto {interface_name}
-          iface {interface_name} inet dhcp
-
-   - path: /usr/share/growlvm/image-layout.yml
-     content: |
-       root:
-         size: '30%VG'
-       home:
-         size: '1G'
-       var_log:
-         size: '11%VG'
-       var_log_audit:
-         size: '5G'
-       var_tmp:
-         size: '11%VG'
-       tmp:
-         size: '5G'
-     owner: root:root
-
-  growpart:
-    mode: auto
-    devices:
-      - '/'
-      - '/dev/vda3'
-    ignore_growroot_disabled: false
diff --git a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/underlay.yaml b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/underlay.yaml
deleted file mode 100644
index 8d2bf09..0000000
--- a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/underlay.yaml
+++ /dev/null
@@ -1,500 +0,0 @@
-# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
-{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-bm-mcp-dvr-vxlan') %}
-{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME + '.local') %}
-{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_KVM01 = os_env('HOSTNAME_KVM01', 'kvm01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_KVM02 = os_env('HOSTNAME_KVM02', 'kvm02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_KVM03 = os_env('HOSTNAME_KVM03', 'kvm03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP001 = os_env('HOSTNAME_CMP001', 'cmp001.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP002 = os_env('HOSTNAME_CMP002', 'cmp002.' + DOMAIN_NAME) %}
-{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_GTW02 = os_env('HOSTNAME_GTW02', 'gtw02.' + DOMAIN_NAME) %}
-{% set ETH1_IP_ADDRESS_CFG01 = os_env('ETH1_IP_ADDRESS_CFG01', '172.16.164.2') %}
-{% set ETH2_IP_ADDRESS_CFG01 = os_env('ETH2_IP_ADDRESS_CFG01', '10.167.4.253') %}
-{% set ETH0_IP_ADDRESS_KVM01 = os_env('ETH0_IP_ADDRESS_KVM01', '172.16.164.11') %}
-{% set ETH0_IP_ADDRESS_KVM02 = os_env('ETH0_IP_ADDRESS_KVM02', '172.16.164.12') %}
-{% set ETH0_IP_ADDRESS_KVM03 = os_env('ETH0_IP_ADDRESS_KVM03', '172.16.164.13') %}
-{% set ETH0_IP_ADDRESS_CMP001 = os_env('ETH0_IP_ADDRESS_CMP001', '172.16.164.3') %}
-{% set ETH0_IP_ADDRESS_CMP002 = os_env('ETH0_IP_ADDRESS_CMP002', '172.16.164.31') %}
-{% set ETH0_IP_ADDRESS_GTW01 = os_env('ETH0_IP_ADDRESS_GTW01', '172.16.164.4') %}
-{% set ETH0_IP_ADDRESS_GTW02 = os_env('ETH0_IP_ADDRESS_GTW02', '172.16.164.5') %}
-
-{% import 'cookied-bm-mcp-dvr-vxlan/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
-{% import 'cookied-bm-mcp-dvr-vxlan/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
-{% import 'cookied-bm-mcp-dvr-vxlan/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
-{% import 'cookied-bm-mcp-dvr-vxlan/underlay--user-data1604-hwe.yaml' as CLOUDINIT_USER_DATA_1604_HWE with context %}
-
----
-aliases:
- - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
- - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
- - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
- - &cloudinit_user_data_1604 {{ CLOUDINIT_USER_DATA_1604 }}
- - &cloudinit_user_data_1604_hwe {{ CLOUDINIT_USER_DATA_1604_HWE }}
-
-template:
-  devops_settings:
-    env_name: {{ os_env('ENV_NAME', LAB_CONFIG_NAME + '_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
-
-    address_pools:
-      admin-pool01:
-        net: {{ os_env('ADMIN_ADDRESS_POOL01', '172.16.164.0/26:26') }}
-        params:
-          ip_reserved:
-            gateway: +62
-            l2_network_device: +61
-            virtual_{{ HOSTNAME_CFG01 }}: {{ ETH1_IP_ADDRESS_CFG01 }}
-            default_{{ HOSTNAME_KVM01 }}: {{ ETH0_IP_ADDRESS_KVM01 }}
-            default_{{ HOSTNAME_KVM02 }}: {{ ETH0_IP_ADDRESS_KVM02 }}
-            default_{{ HOSTNAME_KVM03 }}: {{ ETH0_IP_ADDRESS_KVM03 }}
-            default_{{ HOSTNAME_CMP001 }}: {{ ETH0_IP_ADDRESS_CMP001 }}
-            default_{{ HOSTNAME_CMP002 }}: {{ ETH0_IP_ADDRESS_CMP002 }}
-            default_{{ HOSTNAME_GTW01 }}: {{ ETH0_IP_ADDRESS_GTW01 }}
-            default_{{ HOSTNAME_GTW02 }}: {{ ETH0_IP_ADDRESS_GTW02 }}
-          ip_ranges:
-              dhcp: [+2, -4]
-      private-pool01:
-        net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.167.4.0/24:24') }}
-        params:
-          ip_reserved:
-            virtual_{{ HOSTNAME_CFG01 }}: {{ ETH2_IP_ADDRESS_CFG01 }}
-            gateway: +1
-            l2_network_device: +1
-
-      tenant-pool01:
-        net: {{ os_env('TENANT_ADDRESS_POOL01', '10.167.6.0/24:24') }}
-        params:
-          ip_reserved:
-            gateway: +1
-            l2_network_device: +1
-
-      external-pool01:
-        net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '172.17.42.0/26:26') }}
-        params:
-          ip_reserved:
-            gateway: '172.17.42.1'
-          ip_ranges:
-              dhcp: ['172.17.42.10', '172.17.42.60']
-
-
-    groups:
-      - name: virtual
-        driver:
-          name: devops.driver.libvirt
-          params:
-            connection_string: !os_env CONNECTION_STRING, qemu:///system
-            storage_pool_name: !os_env STORAGE_POOL_NAME, default
-            stp: False
-            hpet: False
-            enable_acpi: true
-            use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
-            use_hugepages: !os_env DRIVER_USE_HUGEPAGES, false
-
-        network_pools:
-          admin: admin-pool01
-
-        l2_network_devices:
-          # Ironic management interface
-          admin:
-            address_pool: admin-pool01
-            dhcp: false
-            parent_iface:
-              phys_dev: !os_env IRONIC_LAB_PXE_IFACE_0
-          private:
-            parent_iface:
-              phys_dev: !os_env CONTROL_IFACE
-
-        group_volumes:
-         - name: cloudimage1604    # This name is used for 'backing_store' option for node volumes.
-           source_image: !os_env IMAGE_PATH1604  # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
-           format: qcow2
-         - name: cfg01_day01_image               # Pre-configured day01 image
-           source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01', os_env('IMAGE_PATH1604')) }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
-           format: qcow2
-
-        nodes:
-          - name: {{ HOSTNAME_CFG01 }}
-            role: salt_master
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 4
-              memory: !os_env SLAVE_NODE_MEMORY, 8192
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: cfg01_day01_image
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_cfg01
-
-              interfaces:
-                - label: ens3
-                  l2_network_device: admin
-                  interface_model: *interface_model
-                  mac_address: !os_env ETH1_MAC_ADDRESS_CFG01
-                - label: ens4
-                  l2_network_device: private
-                  interface_model: *interface_model
-              network_config:
-                ens3:
-                  networks:
-                    - admin
-                ens4:
-                  networks:
-                    - private
-
-
-      - name: default
-        driver:
-          name: devops_driver_ironic
-          params:
-            os_auth_token: fake-token
-            ironic_url: !os_env IRONIC_URL  # URL that will be used by fuel-devops
-                                            # to access Ironic API
-            # Agent URL that is accessible from deploying node when nodes
-            # are bootstrapped with PXE. Usually PXE/provision network address is used.
-            agent_kernel_url: !os_env IRONIC_AGENT_KERNEL_URL
-            agent_ramdisk_url: !os_env IRONIC_AGENT_RAMDISK_URL
-
-        network_pools:
-          admin: admin-pool01
-
-        nodes:
-          - name: {{ HOSTNAME_KVM01 }}
-            role: salt_minion
-            params:
-              ipmi_user: !os_env IPMI_USER
-              ipmi_password: !os_env IPMI_PASSWORD
-              ipmi_previlegies: OPERATOR
-              ipmi_host: !os_env IPMI_HOST_KVM01  # hostname or IP address
-              ipmi_lan_interface: lanplus
-              ipmi_port: 623
-
-              root_volume_name: system     # see 'volumes' below
-              cloud_init_volume_name: iso  # see 'volumes' below
-              cloud_init_iface_up: enp9s0f0  # see 'interfaces' below.
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 200
-
-                  # The same as for agent URL, here is an URL to the image that should be
-                  # used for deploy the node. It should also be accessible from deploying
-                  # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
-                  source_image: !os_env IRONIC_SOURCE_IMAGE_URL
-                  source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
-
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces:
-                - label: enp9s0f0
-                  l2_network_device: admin
-                  mac_address: !os_env ETH0_MAC_ADDRESS_KVM01
-                - label: enp9s0f1
-                  mac_address: !os_env ETH1_MAC_ADDRESS_KVM01
-
-              network_config:
-                enp9s0f0:
-                  networks:
-                   - admin
-                bond0:
-                  networks:
-                   - control
-                  aggregation: active-backup
-                  parents:
-                   - enp9s0f1
-
-          - name: {{ HOSTNAME_KVM02 }}
-            role: salt_minion
-            params:
-              ipmi_user: !os_env IPMI_USER
-              ipmi_password: !os_env IPMI_PASSWORD
-              ipmi_previlegies: OPERATOR
-              ipmi_host: !os_env IPMI_HOST_KVM02  # hostname or IP address
-              ipmi_lan_interface: lanplus
-              ipmi_port: 623
-
-              root_volume_name: system     # see 'volumes' below
-              cloud_init_volume_name: iso  # see 'volumes' below
-              cloud_init_iface_up: enp9s0f0  # see 'interfaces' below.
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 200
-
-                  # The same as for agent URL, here is an URL to the image that should be
-                  # used for deploy the node. It should also be accessible from deploying
-                  # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
-                  source_image: !os_env IRONIC_SOURCE_IMAGE_URL
-                  source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
-
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces:
-                - label: enp9s0f0
-                  l2_network_device: admin
-                  mac_address: !os_env ETH0_MAC_ADDRESS_KVM02
-                - label: enp9s0f1
-                  mac_address: !os_env ETH1_MAC_ADDRESS_KVM02
-
-              network_config:
-                enp9s0f0:
-                  networks:
-                   - admin
-                bond0:
-                  networks:
-                   - control
-                  aggregation: active-backup
-                  parents:
-                   - enp9s0f1
-
-          - name: {{ HOSTNAME_KVM03 }}
-            role: salt_minion
-            params:
-              ipmi_user: !os_env IPMI_USER
-              ipmi_password: !os_env IPMI_PASSWORD
-              ipmi_previlegies: OPERATOR
-              ipmi_host: !os_env IPMI_HOST_KVM03  # hostname or IP address
-              ipmi_lan_interface: lanplus
-              ipmi_port: 623
-
-              root_volume_name: system     # see 'volumes' below
-              cloud_init_volume_name: iso  # see 'volumes' below
-              cloud_init_iface_up: enp9s0f0  # see 'interfaces' below.
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 200
-
-                  # The same as for agent URL, here is an URL to the image that should be
-                  # used for deploy the node. It should also be accessible from deploying
-                  # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
-                  source_image: !os_env IRONIC_SOURCE_IMAGE_URL
-                  source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
-
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces:
-                - label: enp9s0f0
-                  l2_network_device: admin
-                  mac_address: !os_env ETH0_MAC_ADDRESS_KVM03
-                - label: enp9s0f1
-                  mac_address: !os_env ETH1_MAC_ADDRESS_KVM03
-
-              network_config:
-                enp9s0f0:
-                  networks:
-                   - admin
-                bond0:
-                  networks:
-                   - control
-                  aggregation: active-backup
-                  parents:
-                   - enp9s0f1
-
-          - name: {{ HOSTNAME_CMP001 }}
-            role: salt_minion
-            params:
-              ipmi_user: !os_env IPMI_USER
-              ipmi_password: !os_env IPMI_PASSWORD
-              ipmi_previlegies: OPERATOR
-              ipmi_host: !os_env IPMI_HOST_CMP001  # hostname or IP address
-              ipmi_lan_interface: lanplus
-              ipmi_port: 623
-
-              root_volume_name: system     # see 'volumes' below
-              cloud_init_volume_name: iso  # see 'volumes' below
-              cloud_init_iface_up: enp9s0f0  # see 'interfaces' below.
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 200
-
-                  # The same as for agent URL, here is an URL to the image that should be
-                  # used for deploy the node. It should also be accessible from deploying
-                  # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
-                  source_image: !os_env IRONIC_SOURCE_IMAGE_URL
-                  source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
-
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604_hwe
-
-              interfaces:
-                - label: enp9s0f0
-                  l2_network_device: admin
-                  mac_address: !os_env ETH0_MAC_ADDRESS_CMP001
-                - label: enp9s0f1
-                  mac_address: !os_env ETH1_MAC_ADDRESS_CMP001
-              network_config:
-                enp9s0f0:
-                  networks:
-                   - admin
-                bond0:
-                  networks:
-                   - control
-                  aggregation: active-backup
-                  parents:
-                   - enp9s0f0
-                   - enp9s0f1
-
-          - name: {{ HOSTNAME_CMP002 }}
-            role: salt_minion
-            params:
-              ipmi_user: !os_env IPMI_USER
-              ipmi_password: !os_env IPMI_PASSWORD
-              ipmi_previlegies: OPERATOR
-              ipmi_host: !os_env IPMI_HOST_CMP002  # hostname or IP address
-              ipmi_lan_interface: lanplus
-              ipmi_port: 623
-
-              root_volume_name: system     # see 'volumes' below
-              cloud_init_volume_name: iso  # see 'volumes' below
-              cloud_init_iface_up: enp9s0f0  # see 'interfaces' below.
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 200
-
-                  # The same as for agent URL, here is an URL to the image that should be
-                  # used for deploy the node. It should also be accessible from deploying
-                  # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
-                  source_image: !os_env IRONIC_SOURCE_IMAGE_URL
-                  source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
-
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604_hwe
-
-              interfaces:
-                - label: enp9s0f0
-                  l2_network_device: admin
-                  mac_address: !os_env ETH0_MAC_ADDRESS_CMP002
-                - label: enp9s0f1
-                  mac_address: !os_env ETH1_MAC_ADDRESS_CMP002
-              network_config:
-                enp9s0f0:
-                  networks:
-                   - admin
-                bond0:
-                  networks:
-                   - control
-                  aggregation: active-backup
-                  parents:
-                   - enp9s0f0
-                   - enp9s0f1
-
-          - name: {{ HOSTNAME_GTW01 }}
-            role: salt_minion
-            params:
-              ipmi_user: !os_env IPMI_USER
-              ipmi_password: !os_env IPMI_PASSWORD
-              ipmi_previlegies: OPERATOR
-              ipmi_host: !os_env IPMI_HOST_GTW01  # hostname or IP address
-              ipmi_lan_interface: lanplus
-              ipmi_port: 623
-
-              root_volume_name: system     # see 'volumes' below
-              cloud_init_volume_name: iso  # see 'volumes' below
-              cloud_init_iface_up: enp2s0f0  # see 'interfaces' below.
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 200
-
-                  # The same as for agent URL, here is an URL to the image that should be
-                  # used for deploy the node. It should also be accessible from deploying
-                  # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
-                  source_image: !os_env IRONIC_SOURCE_IMAGE_URL
-                  source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
-
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604_hwe
-
-              interfaces:
-                - label: enp2s0f0
-                  l2_network_device: admin
-                  mac_address: !os_env ETH0_MAC_ADDRESS_GTW01
-                - label: enp2s0f1
-                  mac_address: !os_env ETH1_MAC_ADDRESS_GTW01
-
-              network_config:
-                enp2s0f0:
-                  networks:
-                   - admin
-                bond0:
-                  networks:
-                   - control
-                  aggregation: active-backup
-                  parents:
-                   - enp2s0f1
-
-          - name: {{ HOSTNAME_GTW02 }}
-            role: salt_minion
-            params:
-              ipmi_user: !os_env IPMI_USER
-              ipmi_password: !os_env IPMI_PASSWORD
-              ipmi_previlegies: OPERATOR
-              ipmi_host: !os_env IPMI_HOST_GTW02  # hostname or IP address
-              ipmi_lan_interface: lanplus
-              ipmi_port: 623
-
-              root_volume_name: system     # see 'volumes' below
-              cloud_init_volume_name: iso  # see 'volumes' below
-              cloud_init_iface_up: enp2s0f0  # see 'interfaces' below.
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 200
-
-                  # The same as for agent URL, here is an URL to the image that should be
-                  # used for deploy the node. It should also be accessible from deploying
-                  # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
-                  source_image: !os_env IRONIC_SOURCE_IMAGE_URL
-                  source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
-
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604_hwe
-
-              interfaces:
-                - label: enp2s0f0
-                  l2_network_device: admin
-                  mac_address: !os_env ETH0_MAC_ADDRESS_GTW02
-                - label: enp2s0f1
-                  mac_address: !os_env ETH1_MAC_ADDRESS_GTW02
-
-              network_config:
-                enp2s0f0:
-                  networks:
-                   - admin
-                bond0:
-                  networks:
-                   - control
-                  aggregation: active-backup
-                  parents:
-                   - enp2s0f1
diff --git a/tcp_tests/templates/cookied-bm-oc40-queens/core.yaml b/tcp_tests/templates/cookied-bm-oc40-queens/core.yaml
deleted file mode 100644
index 80073cf..0000000
--- a/tcp_tests/templates/cookied-bm-oc40-queens/core.yaml
+++ /dev/null
@@ -1,11 +0,0 @@
-{% from 'cookied-bm-oc40-queens/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% import 'shared-core.yaml' as SHARED_CORE with context %}
-
-{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
-{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
-{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
-{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
-{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
-{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
-{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
-{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/cookied-bm-oc40-queens/lab04-physical-inventory.yaml b/tcp_tests/templates/cookied-bm-oc40-queens/lab04-physical-inventory.yaml
deleted file mode 100644
index 5aa9ebe..0000000
--- a/tcp_tests/templates/cookied-bm-oc40-queens/lab04-physical-inventory.yaml
+++ /dev/null
@@ -1,94 +0,0 @@
-nodes:

-    cfg01.cookied-bm-oc40-queens.local:

-      reclass_storage_name: infra_config_node01

-      roles:

-      - infra_config

-      - linux_system_codename_xenial

-      interfaces:

-        ens3:

-          role: single_dhcp

-        ens4:

-          role: single_static_ctl

-          single_address: 10.167.8.99

-

-    # Physical nodes

-    kvm01.cookied-bm-oc40-queens.local:

-      reclass_storage_name: infra_kvm_node01

-      roles:

-      - infra_kvm

-      - linux_system_codename_xenial

-      interfaces:

-        enp9s0f0:

-          role: single_mgm

-        enp9s0f1:

-          role: bond0_ab_ovs_vlan_ctl

-

-    kvm02.cookied-bm-oc40-queens.local:

-      reclass_storage_name: infra_kvm_node02

-      roles:

-      - infra_kvm

-      - linux_system_codename_xenial

-      interfaces:

-        enp9s0f0:

-          role: single_mgm

-        enp9s0f1:

-          role: bond0_ab_ovs_vlan_ctl

-

-    kvm03.cookied-bm-oc40-queens.local:

-      reclass_storage_name: infra_kvm_node03

-      roles:

-      - infra_kvm

-      - linux_system_codename_xenial

-      interfaces:

-        enp9s0f0:

-          role: single_mgm

-        enp9s0f1:

-          role: bond0_ab_ovs_vlan_ctl

-

-    cmp<<count>>:

-      reclass_storage_name: openstack_compute_rack01

-      roles:

-      - openstack_compute

-      - features_lvm_backend_volume_sdb

-      - linux_system_codename_xenial

-      interfaces:

-        enp2s0f1:

-        enp2s0f1:

-          role: single_dhcp

-        enp5s0f0:

-          role: bond0_ab_contrail

-        enp5s0f1:

-          role: single_vlan_ctl

-

-#    cmp001.cookied-bm-oc40-queens.local:

-#      reclass_storage_name: openstack_compute_node01

-#      roles:

-#      - openstack_compute

-#      - features_lvm_backend_volume_sdb

-#      - linux_system_codename_xenial

-#      interfaces:

-#        enp2s0f1:

-#          role: single_mgm

-#          deploy_address: 172.16.49.73

-#        enp5s0f0:

-#          role: single_contrail_vlan_prv

-#          tenant_address: 192.168.0.101

-#        enp5s0f1:

-#          role: single_vlan_ctl

-#          single_address: 10.167.8.101

-#    cmp002.cookied-bm-oc40-queens.local:

-#      reclass_storage_name: openstack_compute_node02

-#      roles:

-#      - openstack_compute

-#      - features_lvm_backend_volume_sdb

-#      - linux_system_codename_xenial

-#      interfaces:

-#        enp2s0f1:

-#          role: single_mgm

-#          deploy_address: 172.16.49.74

-#        enp5s0f0:

-#          role: single_contrail_vlan_prv

-#          tenant_address: 192.168.0.102

-#        enp5s0f1:

-#          role: single_vlan_ctl

-#          single_address: 10.167.8.102

diff --git a/tcp_tests/templates/cookied-bm-oc40-queens/openstack.yaml b/tcp_tests/templates/cookied-bm-oc40-queens/openstack.yaml
deleted file mode 100644
index 42d6de1..0000000
--- a/tcp_tests/templates/cookied-bm-oc40-queens/openstack.yaml
+++ /dev/null
@@ -1,322 +0,0 @@
-{% from 'cookied-bm-oc40-queens/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-bm-oc40-queens/underlay.yaml' import HOSTNAME_CTL01 with context %}
-{% from 'cookied-bm-oc40-queens/underlay.yaml' import DOMAIN_NAME with context %}
-{% from 'cookied-bm-oc40-queens/underlay.yaml' import LAB_CONFIG_NAME with context %}
-{% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
-{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
-
-{% set PATTERN = os_env('PATTERN', 'false') %}
-{% set RUN_TEMPEST = os_env('RUN_TEMPEST', 'false') %}
-
-{% import 'shared-openstack.yaml' as SHARED_OPENSTACK with context %}
-
-# Install OpenStack control services
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_GLANCE() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_NOVA() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_CINDER(INSTALL_VOLUME=false) }}
-
-- description: WR Install cinder volume
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@cinder:volume' state.sls cinder
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 5}
-  skip_fail: false
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_NEUTRON(INSTALL_GATEWAY=false) }}
-
-# install contrail
-
-- description: Install Docker services
-  cmd: |
-    if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:host' match.pillar 'docker:host' ; then
-      salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:host' state.sls docker.host
-    fi; sleep 10;
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 20}
-  skip_fail: false
-
-- description: Install opencontrail database services on first minion
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@opencontrail:database and *01*' state.sls opencontrail.database
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 20}
-  skip_fail: false
-
-- description: Install opencontrail database services
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@opencontrail:database' state.sls opencontrail.database
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 20}
-  skip_fail: false
-
-- description: Install Opencontrail control services on first minion
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@opencontrail:control and *01*' state.sls opencontrail exclude=opencontrail.client
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 20}
-  skip_fail: false
-
-- description: Install Opencontrail control services
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@opencontrail:control' state.sls opencontrail exclude=opencontrail.client
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 20}
-  skip_fail: false
-
-- description: Install Opencontrail collectors on first minion
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@opencontrail:collector and *01*' state.sls opencontrail exclude=opencontrail.client
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 20}
-  skip_fail: false
-
-- description: Install Opencontrail collectors
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@opencontrail:collector' state.sls opencontrail exclude=opencontrail.client
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 20}
-  skip_fail: false
-
-- description: Spawn Opencontrail docker images
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@opencontrail:control or I@opencontrail:collector' state.sls docker.client && sleep 15;
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 3, delay: 5}
-  skip_fail: false
-
-- description: Finalize opencontrail services
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@opencontrail:database:id:1' state.sls opencontrail.client
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 3, delay: 30}
-  skip_fail: false
-
-- description: Finalize opencontrail services
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@opencontrail:client and not I@opencontrail:compute' state.sls opencontrail.client
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Finalize opencontrail services
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@opencontrail:compute' state.sls opencontrail exclude=opencontrail.client
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 3, delay: 30}
-  skip_fail: true
-
-- description: Check contrail status
-  cmd: sleep 15; salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@opencontrail:database' cmd.run 'doctrail all contrail-status'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Reboot computes
-  cmd: |
-    salt "cmp*" system.reboot;
-    sleep 600;
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: true
-
-- description: Remove crashes files from /var/crashes/ while vrouter was crashed
-  cmd: salt  "cmp*" cmd.run "rm -rf /var/crashes/*"
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: true
-
-- description: Apply Opencontrail compute
-  cmd: salt -C 'I@opencontrail:compute' state.sls opencontrail.client
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 3, delay: 30}
-  skip_fail: false
-
-- description: Apply Opencontrail compute
-  cmd: salt -C 'I@opencontrail:compute' state.sls opencontrail
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Check status for contrail services
-  cmd: |
-    sleep 15;
-    salt -C 'I@opencontrail:database' cmd.run 'doctrail all contrail-status'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_HEAT() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_HORIZON() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_COMPUTE(CELL_MAPPING=true) }}
-
-- description: sync time
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run
-    'service ntp stop; ntpd -gq;  service ntp start'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Hack resolv.conf on VCP nodes for internal services access
-  cmd: |
-    salt --hard-crash --state-output=mixed --state-verbose=False -C '* and not kvm* and not cmp* and not gtw* and not cfg*' cmd.run "echo 'nameserver 172.18.208.44' > /etc/resolv.conf;"
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Create heat-net before external net create
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-    '. /root/keystonercv3; neutron net-create heat-net'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Create public network for contrail
-  cmd: |
-    salt 'ntw01*' contrail.virtual_network_create public '{"external":true,"ip_prefix":"192.168.200.0","ip_prefix_len":24,"asn":64512,"target":10000}'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: true
-
-- description: Steps from neutron client for contrail
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-    '. /root/keystonercv3; neutron subnet-create heat-net 10.20.30.0/24 --allocation-pool start=10.20.30.10,end=10.20.30.254 --gateway 10.20.30.1 --name heat-subnet'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Steps from neutron client for contrail
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-    '. /root/keystonercv3; neutron router-create heat-router'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Steps from neutron client for contrail
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-    '. /root/keystonercv3; neutron router-gateway-set heat-router public'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Steps from neutron client for contrail
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-    '. /root/keystonercv3; neutron router-interface-add heat-router heat-subnet'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Fix default security group for access to external net from outside
-  cmd: |
-    salt 'ctl01*' cmd.run '. /root/keystonercv3; openstack security group rule list --column ID -f value | xargs openstack security group rule delete';
-    salt 'ctl01*' cmd.run '. /root/keystonercv3; openstack security group rule create default --egress --protocol tcp';
-    salt 'ctl01*' cmd.run '. /root/keystonercv3; openstack security group rule create default --ingress --protocol tcp';
-    salt 'ctl01*' cmd.run '. /root/keystonercv3; openstack security group rule create default --egress --protocol icmp';
-    salt 'ctl01*' cmd.run '. /root/keystonercv3; openstack security group rule create default --ingress --protocol icmp';
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: true
-
-# Starting prepare runtest
-
-- description: Upload tempest template
-  upload:
-    local_path: {{ config.salt_deploy.templates_dir }}{{ LAB_CONFIG_NAME }}/
-    local_filename: runtest.yml
-    remote_path: /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/
-  node_name: {{ HOSTNAME_CFG01 }}
-  skip_fail: False
-
-- description: Include class with tempest template into cfg node
-  cmd: |
-    sed -i 's/classes\:/classes\:\n- cluster.{{ LAB_CONFIG_NAME }}.infra.runtest/g' /srv/salt/reclass/nodes/_generated/cfg01.{{ DOMAIN_NAME }}.yml;
-    salt '*' saltutil.refresh_pillar;
-    salt '*' saltutil.sync_all;
-    salt 'ctl01*' pkg.install docker.io;
-    salt 'ctl01*' cmd.run 'iptables --policy FORWARD ACCEPT';
-    salt 'cfg01*' state.sls salt.minion && sleep 20;
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-- description: Enforce keystone client
-  cmd: |
-    salt 'cfg01*' state.sls keystone.client;
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Create flavors for tests
-  cmd: |
-    salt 'cfg01*' state.sls nova.client;
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Upload cirros image
-  cmd: |
-    salt 'cfg01*' state.sls glance.client;
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Generate tempest config
-  cmd: |
-    salt 'cfg01*' state.sls runtest;
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Download cirros image for runtest
-  cmd: |
-    wget http://172.19.112.216:8099/cirros-0.3.5-x86_64-disk.img -O /tmp/TestCirros-0.3.5.img
-  node_name: {{ HOSTNAME_CTL01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Test future contrail manipulation
-  cmd: |
-    apt install crudini jq -y;
-    crudini --set /tmp/test/tempest.conf auth tempest_roles admin;
-    crudini --set /tmp/test/tempest.conf patrole custom_policy_files /etc/opencontrail/policy.json;
-    crudini --set /tmp/test/tempest.conf sdn service_name opencontrail;
-    cat /tmp/test/tempest.conf;
-  node_name: {{ HOSTNAME_CTL01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: true
-
-- description: Run tempest from new docker image
-  cmd: |
-    OPENSTACK_VERSION=`salt-call --out=newline_values_only pillar.get _param:openstack_version`;
-    docker run --name "run-tempest-yml" -d -e ARGS="-r test -w 2" -v /tmp/test/tempest.conf:/etc/tempest/tempest.conf -v /tmp/:/tmp/ -v /tmp/test:/root/tempest -v /etc/ssl/certs/:/etc/ssl/certs/ docker-prod-virtual.docker.mirantis.net/mirantis/cicd/ci-tempest:$OPENSTACK_VERSION /bin/bash -c "run-tempest";
-  node_name: {{ HOSTNAME_CTL01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Test Wait container script
-  cmd: |
-    report_file=`find /tmp/test -maxdepth 1 -name 'report_*xml' -print -quit`;
-    if [ `docker inspect run-tempest-yml | jq -M '.[]."State"."Status"' | tr -d '"'` == "exited" ] && [ -f "$report_file" ];
-    then echo "All done!"; docker logs run-tempest-yml;
-    elif [ `docker inspect run-tempest-yml | jq -M '.[]."State"."Status"' | tr -d '"'` == "exited" ] && [ ! -f "$report_file" ];
-    then echo "Exit without report!"; docker logs run-tempest-yml;
-    else echo "Tempest not finished... ";sleep 900; false;
-    fi
-  node_name: {{ HOSTNAME_CTL01 }}
-  retry: {count: 25, delay: 30}
-  skip_fail: false
-
-- description: Download xml results
-  download:
-    remote_path: /tmp/test/
-    remote_filename: "report_*.xml"
-    local_path: {{ os_env('PWD') }}
-  node_name: {{ HOSTNAME_CTL01 }}
-  skip_fail: true
diff --git a/tcp_tests/templates/cookied-bm-oc40-queens/runtest.yml b/tcp_tests/templates/cookied-bm-oc40-queens/runtest.yml
deleted file mode 100644
index 85d4d67..0000000
--- a/tcp_tests/templates/cookied-bm-oc40-queens/runtest.yml
+++ /dev/null
@@ -1,47 +0,0 @@
-classes:
-- service.runtest.tempest
-- service.runtest.tempest.public_net
-- service.runtest.tempest.services.manila.glance
-parameters:
-  _param:
-    glance_image_cirros_location: http://172.19.112.216:8099/cirros-0.3.5-x86_64-disk.img
-    glance_image_fedora_location: http://172.19.112.216:8099/Fedora-Cloud-Base-27-1.6.x86_64.qcow2
-    glance_image_manila_location: http://172.19.112.216:8099/manila-service-image-master.qcow2
-    openstack_public_neutron_subnet_allocation_end: 192.168.200.220
-    openstack_public_neutron_subnet_allocation_start: 192.168.200.130
-    openstack_public_neutron_subnet_cidr: 192.168.200.0/24
-    openstack_public_neutron_subnet_gateway: 192.168.200.1
-    runtest_tempest_cfg_dir: /tmp/test
-    runtest_tempest_cfg_name: tempest.conf
-    runtest_tempest_public_net: public
-    tempest_test_target: ctl01*
-  neutron:
-    client:
-      enabled: true
-  runtest:
-    enabled: true
-    keystonerc_node: ctl01*
-    tempest:
-      DEFAULT:
-        log_file: tempest.log
-      cfg_dir: ${_param:runtest_tempest_cfg_dir}
-      cfg_name: ${_param:runtest_tempest_cfg_name}
-      compute:
-        min_compute_nodes: 2
-      convert_to_uuid:
-        network:
-          public_network_id: ${_param:runtest_tempest_public_net}
-      enabled: true
-      heat_plugin:
-        build_timeout: '600'
-      put_keystone_rc_enabled: false
-      put_local_image_file_enabled: false
-      share:
-        capability_snapshot_support: true
-        run_driver_assisted_migration_tests: false
-        run_manage_unmanage_snapshot_tests: false
-        run_manage_unmanage_tests: false
-        run_migration_with_preserve_snapshots_tests: false
-        run_quota_tests: true
-        run_replication_tests: false
-        run_snapshot_tests: true
diff --git a/tcp_tests/templates/cookied-bm-oc40-queens/salt-context-cookiecutter-contrail.yaml b/tcp_tests/templates/cookied-bm-oc40-queens/salt-context-cookiecutter-contrail.yaml
deleted file mode 100644
index bfcd153..0000000
--- a/tcp_tests/templates/cookied-bm-oc40-queens/salt-context-cookiecutter-contrail.yaml
+++ /dev/null
@@ -1,257 +0,0 @@
-default_context:
-  backup_private_key: |-
-    -----BEGIN RSA PRIVATE KEY-----
-    MIIEpAIBAAKCAQEApq5WxkagvkNWO85FtS1ByHDKkNWhmFdpY9D49dZrSwuE9XGQ
-    +WW79F2AGwKki2N2j1iyfpMEKRIEIb/5cbl6fZzTGTndhd7Jxkx6xGdhZkX9VM6N
-    qotaO4ckj7NsfiZKTwp58/YSRkz3Ii1XPpt0NQqZLuNAwus4Bl9e1Wk5dNw+gHN3
-    m4JmAczJbQ81lrQURC7f3d2xjoFkXWXC2FKkMS6AOl1j87ATeeSG9xeHLbOvIyBw
-    7IwP9MFA5vUtHl8DzsdmzWmVRabe2VMtGa1Ya5JTTgK8nXmtYW3dvEQ/DtgzcKPJ
-    2fO31cze9LRpDSS0E6d/cISBgzsPfBJuUCGHTQIDAQABAoIBAQCmFVVVoA6PRt1o
-    HjMLQpsntGvDQXsRJxhWY2WO4CZs0n+baZvBRgOwjHIXd9ypH2SFlSXWRXuByPfh
-    AT72eJB7FYaqviPjPojjVFWH2lMM63RvypkSdGRmqFRf87KJSHIGrDO0SV8QOaSO
-    o4spURDLwVG9jKd9EY/zmZgPIhgkPazzVrFoGr8YnKE6qSJh5HivscNl8D3+36SN
-    5uhuElzBTNGd2iU4elLJIGjahetIalEZqL0Fvi1ZzAWoK0YXDmbI8uG8/epJ5Sy4
-    XyyHc7+0Jvm1JWwXczdDFuy+RlL9r66Ja8V9MauuJyigOKnNOJhE2b5/klEcczhC
-    AHA/Hw4pAoGBANcJ/gdouXgcuq3JNXq5Cb4w9lvZbDwQdEtY3+qdHAVndomoGsDT
-    USKq6ZRZzkAAnjiN2YywAQzqFGevoYig+WNLTPd2TdNdlNHfw9Wc4G2iSFb1pIr2
-    uoJ+TQGv4Ck/7LS2NVnWfqNoeo8Iq+Wvnh+F3twv0UIazGI8Bj/xLxvrAoGBAMZu
-    QErf3vzbY4g50HFVbPNi2Nl63A7/P421pEe4JAT1clwIVMyntRpNdVyHKkkKdDWr
-    98tBOhf71+shgsVPEMkfPyZ2nuiBit7LzZ+EAztG9i3hhm8yIUPXoipo0YCOe+yF
-    r+r03pX97aciXuRMPmMTHH6N1vFaUXHSgVs6Y7OnAoGAP4v1ZO0eug8LX6XxRuX9
-    qhXAB96VrJ5UL5wA980b5cDwd7eUyFzqQittwWhUmfdUynOo0XmFpfJau1VckAq6
-    CAzNnud4Ejk6bFcLAUpNzDhD1mbbDDHjZgK68P+vZ6E7ax/ZXkYTwGh0p2Yxnjuq
-    p7gg5sK+vSE8Ot9wHV9Bw6cCgYEAguPq6PjvgF+/Mfbg9kFhUtKbNCoEyqe4ZmOw
-    79YZfGPjga3FMhJWNfluNxC55eBNc7HyDFMEXRm0/dbnCfvzmJdR8q9AdyIsVnad
-    NmHAN/PBI9al9OdeZf/xaoQl3eUe/Y/Z0OShhtMvVpYnffSFGplarGgnpqDrJGe1
-    CFZlufUCgYBemuy+C6gLwTOzhcTcCo4Ir5ZiKcXAE6ufk8OIdGnMWJcmTxxmIMY6
-    XyKu0oobWpOBXPiipQ6TmDpI+flxWYRHwPFFzPa+jhCtupRuTdORKrklV2UfdIWZ
-    N4e+J2yCu7lyz0upwa3MkFIVQ1ez0o8X9NRvAz243qi64y1+KOMPmQ==
-    -----END RSA PRIVATE KEY-----
-  backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCmrlbGRqC+Q1Y7zkW1LUHIcMqQ1aGYV2lj0Pj11mtLC4T1cZD5Zbv0XYAbAqSLY3aPWLJ+kwQpEgQhv/lxuXp9nNMZOd2F3snGTHrEZ2FmRf1Uzo2qi1o7hySPs2x+JkpPCnnz9hJGTPciLVc+m3Q1Cpku40DC6zgGX17VaTl03D6Ac3ebgmYBzMltDzWWtBRELt/d3bGOgWRdZcLYUqQxLoA6XWPzsBN55Ib3F4cts68jIHDsjA/0wUDm9S0eXwPOx2bNaZVFpt7ZUy0ZrVhrklNOArydea1hbd28RD8O2DNwo8nZ87fVzN70tGkNJLQTp39whIGDOw98Em5QIYdN
-  bmk_enabled: 'False'
-  ceph_enabled: 'False'
-  cicd_control_node01_address: 10.167.8.91
-  cicd_control_node01_hostname: cid01
-  cicd_control_node02_address: 10.167.8.92
-  cicd_control_node02_hostname: cid02
-  cicd_control_node03_address: 10.167.8.93
-  cicd_control_node03_hostname: cid03
-  cicd_control_vip_address: 10.167.8.90
-  cicd_control_vip_hostname: cid
-  cicd_enabled: 'True'
-  cicd_private_key: |-
-    -----BEGIN RSA PRIVATE KEY-----
-    MIIEowIBAAKCAQEAuBC224XQZFyzqC56EyS7yr/rlpRRYsr2vji77faoWQFmgYbZ
-    oeyqqqm8eSN0Cc0wAnxWsQ7H3ZN9uTnyWVrsogs1vx8597iorZAT4Mu6JDbkWlZh
-    IUHo9P9itWJdUWpxjDavqIvjZo+DmOO1mfv9K1asP8COanQEsgHSyuf+XKMBg0ko
-    kEammAUtS9HRxCAJ47QgLPSCFij5ih/MRWY3HWFUFEF3gRdUodWmeJNmW+7JH7T2
-    wId1kn8oRya7eadKxd6wEaCGm5ILXwwVFmFkOGlEeC8wHnbkatd/A53DxzUfOHBi
-    27Gaf83DPxKqDWW0aAh7b49EnFhdkuF3ZyXbYwIDAQABAoIBAFtioQbYpyBNDj2f
-    5af/guUk6Di4pregAWVsEZIR9n9KPLRuWTsVn55f611Rhtke8IkrZnc92WlfQvpl
-    lLdcd0P0wNiFDmi5W7XgZJ4lR+OXBUT8wfibGqgY688WaTJ04K82r3vFCD/xXOrZ
-    k15CR+3ueFKmrY6Yz4P5d8iZ6iXfR47ZYm+wdmx3vmJ+IVfZCRRPAGP25GxqsOs5
-    3qMl9hV7a1MGVVaVPmVzrq0Xzk6IAW2+0p5udGmezn4y6HFPIvOriUVUkni3mNjX
-    dokrETqVbOjkdFkSw28cMBfP/tO3vyfGh5VX24xvRztWtcpAm6Qr5lKEDSvFv13r
-    0z/DxRECgYEA8oZ4+w2cqLJz91fKpWutGZKj4m/HEY6FZfjVflsTT2bKTt+nTtRY
-    qAeKGYIbrjZMAyy4dG+RgW7WORFcRHFyeSrS5Aw51zO+JQ0KzuBv83UqcbqNLcsz
-    BAPHPk/7f30W4wuInqgXrWMTiGePz0hQsvNU6aR7MH4Sd2C0ot4W+00CgYEAwkq+
-    UtugC8ywK+F0xZvjXHi3VJRJZf4WLtRxZGy8CimaritSKpZZRG23Sk0ifDE6+4fD
-    VtxeTfTmeZBictg/fEAPVHzhsNPNyDMA8t7t4ZKmMX9DNYAqVX21s5YQ9encH6KT
-    1q0NRpjvw7QzhfbFfsxeAxHKZFbFlVmROplF+W8CgYAWHVz6x4r5dwxMCZ1Y6DCo
-    nE6FX1vvpedUHRSaqQNhwiXAe3RuI77R054sJUkQ4bKct386XtIN02WFXqfjNdUS
-    Z21DjjnX/cfg6QeLRbvvn0d3h2NIQbctLosEi5aLUYS8v1h93yYJkXc+gPMEG7wA
-    FWAwzebNzTEx4YeXMlk2IQKBgCt8JxTMawm5CkUH9Oa1eTGdIwsfFT5qm/RnP+nG
-    HF/559DLiVxWwiv6kmdi1DEPo6/gNuwd7k1sXpkeo6oolCzu+X9jY+/7t7bzE2dI
-    Vd2CwQebACPdR5xSwnQrRiiD6ux5qrUFjk8as68NieqVzKYQf4oYVUAX26kNnt+K
-    poqpAoGBAINHTGBFVK3XC+fCbu7rhFS8wZAjBmvEDHGnUBp19JREEr3q7a2D84T3
-    17zo0bwxL09QFnOCDDJcXsh8eGbCONV0hJvJU2o7wGol+lRFSd+v6WYZ37bPEyEx
-    l8kv0xXAElriC1RE1CNtvoOn/uxyRs+2OnNgBVxtAGqUWVdpm6CD
-    -----END RSA PRIVATE KEY-----
-  cicd_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC4ELbbhdBkXLOoLnoTJLvKv+uWlFFiyva+OLvt9qhZAWaBhtmh7Kqqqbx5I3QJzTACfFaxDsfdk325OfJZWuyiCzW/Hzn3uKitkBPgy7okNuRaVmEhQej0/2K1Yl1RanGMNq+oi+Nmj4OY47WZ+/0rVqw/wI5qdASyAdLK5/5cowGDSSiQRqaYBS1L0dHEIAnjtCAs9IIWKPmKH8xFZjcdYVQUQXeBF1Sh1aZ4k2Zb7skftPbAh3WSfyhHJrt5p0rF3rARoIabkgtfDBUWYWQ4aUR4LzAeduRq138DncPHNR84cGLbsZp/zcM/EqoNZbRoCHtvj0ScWF2S4XdnJdtj
-  cluster_domain: cookied-bm-oc40-queens.local
-  cluster_name: cookied-bm-oc40-queens
-  opencontrail_version: 4.0
-  linux_repo_contrail_component: oc40
-  compute_bond_mode: active-backup
-  compute_padding_with_zeros: 'True'
-  compute_primary_first_nic: eth1
-  compute_primary_second_nic: eth2
-  context_seed: TFWH0xgUevQkslwhbWVedwwYhBtImHLiGUIExjT9ahxPAUBHh9Kg3QSAIrqTqtvk
-  control_network_netmask: 255.255.255.0
-  control_network_subnet: 10.167.8.0/24
-  control_vlan: '2422'
-  cookiecutter_template_branch: ''
-  cookiecutter_template_credentials: gerrit
-  cookiecutter_template_url: https://gerrit.mcp.mirantis.com/mk/cookiecutter-templates.git
-  deploy_network_gateway: 172.16.49.65
-  deploy_network_netmask: 255.255.255.192
-  deploy_network_subnet: 172.16.49.64/26
-  deployment_type: physical
-  dns_server01: 172.18.176.6
-  dns_server02: 172.18.208.44
-  email_address: sgudz@mirantis.com
-  infra_bond_mode: active-backup
-  infra_deploy_nic: eth0
-  infra_kvm01_control_address: 10.167.8.241
-  infra_kvm01_deploy_address: 172.16.49.67
-  infra_kvm01_hostname: kvm01
-  infra_kvm02_control_address: 10.167.8.242
-  infra_kvm02_deploy_address: 172.16.49.68
-  infra_kvm02_hostname: kvm02
-  infra_kvm03_control_address: 10.167.8.243
-  infra_kvm03_deploy_address: 172.16.49.69
-  infra_kvm03_hostname: kvm03
-  infra_kvm_vip_address: 10.167.8.240
-  infra_primary_first_nic: eth1
-  infra_primary_second_nic: eth2
-  internal_proxy_enabled: 'False'
-  kqueen_custom_mail_enabled: 'False'
-  kqueen_enabled: 'False'
-  kubernetes_enabled: 'False'
-  local_repositories: 'False'
-  maas_deploy_address: 172.16.49.66
-  maas_deploy_cidr: 172.16.49.64/26
-  maas_deploy_gateway: 172.16.49.65
-  maas_deploy_range_end: 172.16.49.119
-  maas_deploy_range_start: 172.16.49.77
-  maas_deploy_vlan: '0'
-  maas_dhcp_enabled: 'True'
-  maas_fabric_name: fabric-51
-  maas_hostname: cfg01
-  maas_manage_deploy_network: 'True'
-  mcp_common_scripts_branch: ''
-  mcp_version: proposed
-  offline_deployment: 'False'
-  opencontrail_analytics_address: 10.167.8.30
-  opencontrail_analytics_hostname: nal
-  opencontrail_analytics_node01_address: 10.167.8.31
-  opencontrail_analytics_node01_hostname: nal01
-  opencontrail_analytics_node02_address: 10.167.8.32
-  opencontrail_analytics_node02_hostname: nal02
-  opencontrail_analytics_node03_address: 10.167.8.33
-  opencontrail_analytics_node03_hostname: nal03
-  opencontrail_compute_iface_mask: '24'
-  opencontrail_control_address: 10.167.8.20
-  opencontrail_control_hostname: ntw
-  opencontrail_control_node01_address: 10.167.8.21
-  opencontrail_control_node01_hostname: ntw01
-  opencontrail_control_node02_address: 10.167.8.22
-  opencontrail_control_node02_hostname: ntw02
-  opencontrail_control_node03_address: 10.167.8.23
-  opencontrail_control_node03_hostname: ntw03
-  opencontrail_enabled: 'True'
-  opencontrail_router01_address: 10.167.8.220
-  opencontrail_router01_hostname: rtr01
-  openldap_enabled: 'False'
-  openssh_groups: ''
-  openstack_benchmark_node01_address: 10.167.8.95
-  openstack_benchmark_node01_hostname: bmk01
-  openstack_cluster_size: compact
-  openstack_compute_count: '2'
-  openstack_compute_rack01_hostname: cmp
-  openstack_compute_rack01_single_subnet: 10.167.8
-  openstack_compute_rack01_tenant_subnet: 192.168.0
-  openstack_compute_single_address_ranges: 10.167.8.101-10.167.8.102
-  openstack_compute_deploy_address_ranges: 172.16.49.73-172.16.49.74
-  openstack_compute_tenant_address_ranges: 192.168.0.101-192.168.0.102
-  openstack_compute_backend_address_ranges: 192.168.0.101-192.168.0.102
-  openstack_compute_node01_hostname: cmp01
-  openstack_compute_node02_hostname: cmp02
-  openstack_compute_node01_address: 10.167.8.101
-  openstack_compute_node02_address: 10.167.8.102
-  openstack_compute_node01_single_address: 10.167.8.101
-  openstack_compute_node02_single_address: 10.167.8.102
-  openstack_compute_node01_deploy_address: 172.16.49.73
-  openstack_compute_node02_deploy_address: 172.16.49.74
-  openstack_control_address: 10.167.8.10
-  openstack_control_hostname: ctl
-  openstack_control_node01_address: 10.167.8.11
-  openstack_control_node01_hostname: ctl01
-  openstack_control_node02_address: 10.167.8.12
-  openstack_control_node02_hostname: ctl02
-  openstack_control_node03_address: 10.167.8.13
-  openstack_control_node03_hostname: ctl03
-  openstack_database_address: 10.167.8.50
-  openstack_database_hostname: dbs
-  openstack_database_node01_address: 10.167.8.51
-  openstack_database_node01_hostname: dbs01
-  openstack_database_node02_address: 10.167.8.52
-  openstack_database_node02_hostname: dbs02
-  openstack_database_node03_address: 10.167.8.53
-  openstack_database_node03_hostname: dbs03
-  openstack_enabled: 'True'
-  openstack_message_queue_address: 10.167.8.40
-  openstack_message_queue_hostname: msg
-  openstack_message_queue_node01_address: 10.167.8.41
-  openstack_message_queue_node01_hostname: msg01
-  openstack_message_queue_node02_address: 10.167.8.42
-  openstack_message_queue_node02_hostname: msg02
-  openstack_message_queue_node03_address: 10.167.8.43
-  openstack_message_queue_node03_hostname: msg03
-  openstack_network_engine: opencontrail
-  openstack_neutron_bgp_vpn: 'False'
-  openstack_neutron_bgp_vpn_driver: bagpipe
-  openstack_nfv_dpdk_enabled: 'False'
-  openstack_nfv_sriov_enabled: 'False'
-  openstack_nova_compute_nfv_req_enabled: 'False'
-  openstack_nova_compute_reserved_host_memory_mb: '900'
-  openstack_proxy_address: 10.167.8.80
-  openstack_proxy_hostname: prx
-  openstack_proxy_node01_address: 10.167.8.81
-  openstack_proxy_node01_hostname: prx01
-  openstack_proxy_node02_address: 10.167.8.82
-  openstack_proxy_node02_hostname: prx02
-  openstack_upgrade_node01_address: 10.167.8.19
-  openstack_version: queens
-  oss_enabled: 'False'
-  oss_node03_address: ${_param:stacklight_monitor_node03_address}
-  oss_webhook_app_id: '24'
-  oss_webhook_login_id: '13'
-  platform: openstack_enabled
-  public_host: ${_param:openstack_proxy_address}
-  publication_method: email
-  reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
-  salt_api_password: BNRhXeGFdgVNx0Ikm2CAMw7eyeHf4grH
-  salt_api_password_hash: $6$jriFnsbZ$eon54Ts/Kn4ywKpexe/W8srpBF64cxr2D8jd0RzTH8zdZVjS3viYt64m1d1VlXenurwpcGLkGzaGmOI0dlOox0
-  salt_master_address: 172.16.49.66
-  salt_master_hostname: cfg01
-  salt_master_management_address: 172.16.49.66
-  shared_reclass_branch: ''
-  shared_reclass_url: https://gerrit.mcp.mirantis.com/salt-models/reclass-system.git
-  stacklight_enabled: 'True'
-  stacklight_log_address: 10.167.8.60
-  stacklight_log_hostname: log
-  stacklight_log_node01_address: 10.167.8.61
-  stacklight_log_node01_hostname: log01
-  stacklight_log_node02_address: 10.167.8.62
-  stacklight_log_node02_hostname: log02
-  stacklight_log_node03_address: 10.167.8.63
-  stacklight_log_node03_hostname: log03
-  stacklight_long_term_storage_type: prometheus
-  stacklight_monitor_address: 10.167.8.70
-  stacklight_monitor_hostname: mon
-  stacklight_monitor_node01_address: 10.167.8.71
-  stacklight_monitor_node01_hostname: mon01
-  stacklight_monitor_node02_address: 10.167.8.72
-  stacklight_monitor_node02_hostname: mon02
-  stacklight_monitor_node03_address: 10.167.8.73
-  stacklight_monitor_node03_hostname: mon03
-  stacklight_telemetry_address: 10.167.8.85
-  stacklight_telemetry_hostname: mtr
-  stacklight_telemetry_node01_address: 10.167.8.86
-  stacklight_telemetry_node01_hostname: mtr01
-  stacklight_telemetry_node02_address: 10.167.8.87
-  stacklight_telemetry_node02_hostname: mtr02
-  stacklight_telemetry_node03_address: 10.167.8.88
-  stacklight_telemetry_node03_hostname: mtr03
-  stacklight_version: '2'
-  static_ips_on_deploy_network_enabled: 'False'
-  tenant_network_gateway: 192.168.0.220
-  tenant_network_netmask: 255.255.255.0
-  tenant_network_subnet: 192.168.0.0/24
-  tenant_vlan: '2423'
-  upstream_proxy_enabled: 'False'
-  use_default_network_scheme: 'True'
-  openldap_domain: cookied-bm-oc40-queens.local
-  openldap_enabled: 'True'
-  openldap_organisation: ${_param:cluster_name}
diff --git a/tcp_tests/templates/cookied-bm-oc40-queens/salt-context-environment.yaml b/tcp_tests/templates/cookied-bm-oc40-queens/salt-context-environment.yaml
deleted file mode 100644
index 8c2c642..0000000
--- a/tcp_tests/templates/cookied-bm-oc40-queens/salt-context-environment.yaml
+++ /dev/null
@@ -1,273 +0,0 @@
-nodes:

-    # Virtual Control Plane nodes

-    cid01.cookied-bm-oc40-queens.local:

-      reclass_storage_name: cicd_control_node01

-      roles:

-      - cicd_control_leader

-      - linux_system_codename_xenial

-      interfaces:

-        ens3:

-          role: single_ctl

-

-    cid02.cookied-bm-oc40-queens.local:

-      reclass_storage_name: cicd_control_node02

-      roles:

-      - cicd_control_manager

-      - linux_system_codename_xenial

-      interfaces:

-        ens3:

-          role: single_ctl

-

-    cid03.cookied-bm-oc40-queens.local:

-      reclass_storage_name: cicd_control_node03

-      roles:

-      - cicd_control_manager

-      - linux_system_codename_xenial

-      interfaces:

-        ens3:

-          role: single_ctl 

-

-    ctl01.cookied-bm-oc40-queens.local:

-      reclass_storage_name: openstack_control_node01

-      roles:

-      - openstack_control_leader

-      - linux_system_codename_xenial

-      classes:

-      - system.linux.system.repo.mcp.apt_mirantis.docker

-      interfaces:

-        ens3:

-          role: single_ctl

-

-    ctl02.cookied-bm-oc40-queens.local:

-      reclass_storage_name: openstack_control_node02

-      roles:

-      - openstack_control

-      - linux_system_codename_xenial

-      interfaces:

-        ens3:

-          role: single_ctl

-

-    ctl03.cookied-bm-oc40-queens.local:

-      reclass_storage_name: openstack_control_node03

-      roles:

-      - openstack_control

-      - linux_system_codename_xenial

-      interfaces:

-        ens3:

-          role: single_ctl

-

-    dbs01.cookied-bm-oc40-queens.local:

-      reclass_storage_name: openstack_database_node01

-      roles:

-      - openstack_database_leader

-      - linux_system_codename_xenial

-      interfaces:

-        ens3:

-          role: single_ctl

-

-    dbs02.cookied-bm-oc40-queens.local:

-      reclass_storage_name: openstack_database_node02

-      roles:

-      - openstack_database

-      - linux_system_codename_xenial

-      interfaces:

-        ens3:

-          role: single_ctl

-

-    dbs03.cookied-bm-oc40-queens.local:

-      reclass_storage_name: openstack_database_node03

-      roles:

-      - openstack_database

-      - linux_system_codename_xenial

-      interfaces:

-        ens3:

-          role: single_ctl

-

-    msg01.cookied-bm-oc40-queens.local:

-      reclass_storage_name: openstack_message_queue_node01

-      roles:

-      - openstack_message_queue

-      - linux_system_codename_xenial

-      interfaces:

-        ens3:

-          role: single_ctl

-

-    msg02.cookied-bm-oc40-queens.local:

-      reclass_storage_name: openstack_message_queue_node02

-      roles:

-      - openstack_message_queue

-      - linux_system_codename_xenial

-      interfaces:

-        ens3:

-          role: single_ctl

-

-    msg03.cookied-bm-oc40-queens.local:

-      reclass_storage_name: openstack_message_queue_node03

-      roles:

-      - openstack_message_queue

-      - linux_system_codename_xenial

-      interfaces:

-        ens3:

-          role: single_ctl

-

-    prx01.cookied-bm-oc40-queens.local:

-      reclass_storage_name: openstack_proxy_node01

-      roles:

-      - openstack_proxy

-      - linux_system_codename_xenial

-      interfaces:

-        ens3:

-          role: single_ctl

-

-    prx02.cookied-bm-oc40-queens.local:

-      reclass_storage_name: openstack_proxy_node02

-      roles:

-      - openstack_proxy

-      - linux_system_codename_xenial

-      interfaces:

-        ens3:

-          role: single_ctl

-

-    mon01.cookied-bm-oc40-queens.local:

-      reclass_storage_name: stacklight_server_node01

-      roles:

-      - stacklightv2_server_leader

-      - linux_system_codename_xenial

-      interfaces:

-        ens3:

-          role: single_ctl

-

-    mon02.cookied-bm-oc40-queens.local:

-      reclass_storage_name: stacklight_server_node02

-      roles:

-      - stacklightv2_server

-      - linux_system_codename_xenial

-      interfaces:

-        ens3:

-          role: single_ctl

-

-    mon03.cookied-bm-oc40-queens.local:

-      reclass_storage_name: stacklight_server_node03

-      roles:

-      - stacklightv2_server

-      - linux_system_codename_xenial

-      interfaces:

-        ens3:

-          role: single_ctl

-

-    nal01.cookied-bm-oc40-queens.local:

-      reclass_storage_name: opencontrail_analytics_node01

-      roles:

-      - opencontrail_analytics

-      - linux_system_codename_xenial

-      interfaces:

-        ens3:

-          role: single_ctl

-

-    nal02.cookied-bm-oc40-queens.local:

-      reclass_storage_name: opencontrail_analytics_node02

-      roles:

-      - opencontrail_analytics

-      - linux_system_codename_xenial

-      interfaces:

-        ens3:

-          role: single_ctl

-

-    nal03.cookied-bm-oc40-queens.local:

-      reclass_storage_name: opencontrail_analytics_node03

-      roles:

-      - opencontrail_analytics

-      - linux_system_codename_xenial

-      interfaces:

-        ens3:

-          role: single_ctl

-

-    ntw01.cookied-bm-oc40-queens.local:

-      reclass_storage_name: opencontrail_control_node01

-      roles:

-      - opencontrail_control

-      - linux_system_codename_xenial

-      interfaces:

-        ens3:

-          role: single_ctl

-

-    ntw02.cookied-bm-oc40-queens.local:

-      reclass_storage_name: opencontrail_control_node02

-      roles:

-      - opencontrail_control

-      - linux_system_codename_xenial

-      interfaces:

-        ens3:

-          role: single_ctl

-

-    ntw03.cookied-bm-oc40-queens.local:

-      reclass_storage_name: opencontrail_control_node03

-      roles:

-      - opencontrail_control

-      - linux_system_codename_xenial

-      interfaces:

-        ens3:

-          role: single_ctl

-

-    mtr01.cookied-bm-oc40-queens.local:

-      reclass_storage_name: stacklight_telemetry_node01

-      roles:

-      - stacklight_telemetry

-      - linux_system_codename_xenial

-      interfaces:

-        ens3:

-          role: single_ctl

-

-    mtr02.cookied-bm-oc40-queens.local:

-      reclass_storage_name: stacklight_telemetry_node02

-      roles:

-      - stacklight_telemetry

-      - linux_system_codename_xenial

-      interfaces:

-        ens3:

-          role: single_ctl

-

-    mtr03.cookied-bm-oc40-queens.local:

-      reclass_storage_name: stacklight_telemetry_node03

-      roles:

-      - stacklight_telemetry

-      - linux_system_codename_xenial

-      interfaces:

-        ens3:

-          role: single_ctl

-

-    log01.cookied-bm-oc40-queens.local:

-      reclass_storage_name: stacklight_log_node01

-      roles:

-      - stacklight_log_leader_v2

-      - linux_system_codename_xenial

-      interfaces:

-        ens3:

-          role: single_ctl

-

-    log02.cookied-bm-oc40-queens.local:

-      reclass_storage_name: stacklight_log_node02

-      roles:

-      - stacklight_log

-      - linux_system_codename_xenial

-      interfaces:

-        ens3:

-          role: single_ctl

-

-    log03.cookied-bm-oc40-queens.local:

-      reclass_storage_name: stacklight_log_node03

-      roles:

-      - stacklight_log

-      - linux_system_codename_xenial

-      interfaces:

-        ens3:

-          role: single_ctl

-

-#    bmk01.cookied-bm-oc40-queens.local:

-#      reclass_storage_name: openstack_benchmark_node01

-#      roles:

-#      - openstack_benchmark

-#      - linux_system_codename_xenial

-#      interfaces:

-#        ens3:

-#          role: single_ctl

diff --git a/tcp_tests/templates/cookied-bm-oc40-queens/salt.yaml b/tcp_tests/templates/cookied-bm-oc40-queens/salt.yaml
deleted file mode 100644
index 3853acd..0000000
--- a/tcp_tests/templates/cookied-bm-oc40-queens/salt.yaml
+++ /dev/null
@@ -1,152 +0,0 @@
-{% from 'cookied-bm-oc40-queens/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-bm-oc40-queens/underlay.yaml' import LAB_CONFIG_NAME with context %}
-{% from 'cookied-bm-oc40-queens/underlay.yaml' import DOMAIN_NAME with context %}
-
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
-# Other salt model repository parameters see in shared-salt.yaml
-
-# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
-{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME','physical-cookied-bm-oc40-queens') %}
-# Path to the context files used to render Cluster and Environment models
-{%- set CLUSTER_CONTEXT_NAME = os_env('CLUSTER_CONTEXT_NAME', 'salt-context-cookiecutter-contrail.yaml') %}
-{%- set ENVIRONMENT_CONTEXT_NAMES = ['salt-context-environment.yaml','lab04-physical-inventory.yaml'] %}
-{%- set CONTROL_VLAN = os_env('CONTROL_VLAN', '2422') %}
-{%- set TENANT_VLAN = os_env('TENANT_VLAN', '2423') %}
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-{{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
-
-{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL(CONTROL_VLAN=CONTROL_VLAN, TENANT_VLAN=TENANT_VLAN) }}
-
-{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
-
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='\*') }}
-
-{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
-
-{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
-
-- description: "Change path to internal storage for salt.control images"
-  cmd: |
-    set -e;
-    . /root/venv-reclass-tools/bin/activate;
-    reclass-tools add-key parameters._param.salt_control_xenial_image 'https://apt.mcp.mirantis.net/images/ubuntu-16-04-x64-mcp{{ SHARED.REPOSITORY_SUITE }}.qcow2' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/init.yml;
-    reclass-tools add-key parameters._param.salt_control_trusty_image 'https://apt.mcp.mirantis.net/images/ubuntu-14-04-x64-mcp{{ SHARED.REPOSITORY_SUITE }}.qcow2' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/init.yml;
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-- description: Temporary workaround for removing cinder-volume from CTL nodes
-  cmd: |
-    sed -i 's/\-\ system\.cinder\.volume\.single//g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control.yml;
-    sed -i 's/\-\ system\.cinder\.volume\.notification\.messagingv2//g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control.yml;
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: true
-
-- description: Temporary WR for correct bridge name according to envoronment templates
-  cmd: |
-    sed -i 's/br\-ctl/br\_ctl/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
-    sed -i 's/br\-mgm/br\_mgm/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-{{ SHARED.MACRO_GENERATE_INVENTORY() }}
-
-{{ SHARED.MACRO_NETWORKING_WORKAROUNDS() }}
-
-- description: Update minion information
-  cmd: |
-    salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.sync_all &&
-    salt --hard-crash --state-output=mixed --state-verbose=False '*' mine.update &&
-    salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.refresh_pillar && sleep 10
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-- description: Rerun openssh after env model is generated
-  cmd: |
-    salt-call state.sls openssh
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-- description: Execute linux.network.host one more time after salt.minion to apply dynamically registered hosts on the cluster nodes
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@linux:system' state.sls linux.network.host
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 10}
-  skip_fail: false
-
-{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
-
-########################################
-# Spin up Control Plane VMs on KVM nodes
-########################################
-
-- description: Execute 'libvirt' states to create necessary libvirt networks
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'kvm*' state.sls libvirt
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 10}
-  skip_fail: false
-
-- description: Create VMs for control plane
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'kvm*' state.sls salt.control
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 3, delay: 10}
-  skip_fail: false
-
-- description: '*Workaround* for waiting the control-plane VMs in the salt-key (instead of sleep)'
-  cmd: |
-    salt-key -l acc| sort > /tmp/current_keys.txt &&
-    salt 'kvm*' cmd.run 'virsh list --name' | grep -v 'kvm'|sort|xargs -I {} fgrep {} /tmp/current_keys.txt
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 20, delay: 30}
-  skip_fail: false
-
-#########################################
-# Configure all running salt minion nodes
-#########################################
-
-- description: Hack resolv.conf on VCP nodes for internal services access
-  cmd: |
-    salt --hard-crash --state-output=mixed --state-verbose=False -C '* and not cfg*' cmd.run "echo 'nameserver 172.18.208.44' > /etc/resolv.conf;"
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Refresh pillars on all minions
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.refresh_pillar
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Sync all salt resources
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.sync_all && sleep 5
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Show  reclass-salt --top for generated nodes
-  cmd: reclass-salt --top -u /srv/salt/reclass/nodes/_generated/
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
-
-- description: "Lab04 workaround: Give each node root acces with key from cfg01"
-  cmd: |
-    set -e;
-    set -x;
-    key=$(ssh-keygen -y -f /root/.ssh/id_rsa);
-    salt '*' cmd.run "echo $key >> /root/.ssh/authorized_keys";
-    salt '*' cmd.run "service sshd restart"
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: true
diff --git a/tcp_tests/templates/cookied-bm-oc40-queens/sl.yaml b/tcp_tests/templates/cookied-bm-oc40-queens/sl.yaml
deleted file mode 100644
index 2ff8f3d..0000000
--- a/tcp_tests/templates/cookied-bm-oc40-queens/sl.yaml
+++ /dev/null
@@ -1,15 +0,0 @@
-{% from 'cookied-bm-oc40-queens/underlay.yaml' import HOSTNAME_CFG01 with context %}
-
-{% import 'shared-sl.yaml' as SHARED_SL with context %}
-{% import 'shared-sl-tests.yaml' as SHARED_SL_TESTS with context %}
-
-{{  SHARED_SL.MACRO_INSTALL_DOCKER_SWARM() }}
-{{  SHARED_SL.MACRO_INSTALL_MONGODB() }}
-{{  SHARED_SL.MACRO_INSTALL_MONGODB_CLUSTER() }}
-{{  SHARED_SL.MACRO_INSTALL_TELEGRAF_AND_PROMETHEUS() }}
-{{  SHARED_SL.MACRO_INSTALL_ELASTICSEARCH_AND_KIBANA() }}
-{{  SHARED_SL.MACRO_INSTALL_LOG_COLLECTION() }}
-{{  SHARED_SL.MACRO_INSTALL_CEILOMETER_COLLECTOR() }}
-{{  SHARED_SL.MACRO_CONFIGURE_SERVICES() }}
-{{  SHARED_SL_TESTS.MACRO_CLONE_SL_TESTS() }}
-{{  SHARED_SL_TESTS.MACRO_CONFIGURE_TESTS() }}
diff --git a/tcp_tests/templates/cookied-bm-oc40-queens/underlay--meta-data.yaml b/tcp_tests/templates/cookied-bm-oc40-queens/underlay--meta-data.yaml
deleted file mode 100644
index a594a53..0000000
--- a/tcp_tests/templates/cookied-bm-oc40-queens/underlay--meta-data.yaml
+++ /dev/null
@@ -1,4 +0,0 @@
-| # All the data below will be stored as a string object

-  instance-id: iid-local1

-  hostname: {hostname}

-  local-hostname: {hostname}

diff --git a/tcp_tests/templates/cookied-bm-oc40-queens/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-bm-oc40-queens/underlay--user-data-cfg01.yaml
deleted file mode 100644
index 6c9e48f..0000000
--- a/tcp_tests/templates/cookied-bm-oc40-queens/underlay--user-data-cfg01.yaml
+++ /dev/null
@@ -1,102 +0,0 @@
-| # All the data below will be stored as a string object

-  #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html

-

-  ssh_pwauth: True

-  users:

-   - name: root

-     sudo: ALL=(ALL) NOPASSWD:ALL

-     shell: /bin/bash

-     ssh_authorized_keys:

-     {% for key in config.underlay.ssh_keys %}

-      - ssh-rsa {{ key['public'] }}

-     {% endfor %}

-

-  disable_root: false

-  chpasswd:

-   list: |

-    root:r00tme

-   expire: False

-

-  bootcmd:

-   #   # Block access to SSH while node is preparing

-   #   - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP

-   # Enable root access

-   - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config

-   - service sshd restart

-  output:

-    all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'

-

-  runcmd:

-   # Configure dhclient

-   - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base

-   - sudo resolvconf -u

-

-   # Enable grub menu using updated config below

-   - update-grub

-

-   # Prepare network connection

-   - sudo ifdown ens3

-   - sudo ip r d default || true  # remove existing default route to get it from dhcp
-   - sudo ifup ens3

-   #- sudo route add default gw {gateway} {interface_name}

-

-   # Create swap

-   - fallocate -l 4G /swapfile

-   - chmod 600 /swapfile

-   - mkswap /swapfile

-   - swapon /swapfile

-   - echo "/swapfile   none    swap    defaults    0   0" >> /etc/fstab

-

-   ############## TCP Cloud cfg01 node ##################

-   #- sleep 120

-   #   - echo "Preparing base OS"

-

-   - echo "nameserver 172.18.208.44" > /etc/resolv.conf;

-   #   - which wget >/dev/null || (apt-get update; apt-get install -y wget);

-

-   # Configure Ubuntu mirrors
-   #   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
-   #   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
-   #   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
-
-   #   - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;

-   #   - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;

-   #   - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;

-   #   - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;

-

-   #   - apt-get clean

-   #   - apt-get update

-

-   # Install common packages

-   #   - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc

-

-   # Install salt-minion and stop it until it is configured

-   #   - eatmydata apt-get install -y salt-minion && service salt-minion stop

-

-   ########################################################

-   # Node is ready, allow SSH access

-   #   - echo "Allow SSH access ..."

-   #   - sudo iptables -D INPUT -p tcp --dport 22 -j DROP

-   ########################################################

-

-  write_files:

-   - path: /etc/default/grub.d/97-enable-grub-menu.cfg

-     content: |

-         GRUB_RECORDFAIL_TIMEOUT=30

-         GRUB_TIMEOUT=3

-         GRUB_TIMEOUT_STYLE=menu

-

-   - path: /etc/network/interfaces

-     content: |

-          auto ens3

-          iface ens3 inet dhcp

-

-   - path: /root/.ssh/config

-     owner: root:root

-     permissions: '0600'

-     content: |

-          Host *

-            ServerAliveInterval 300

-            ServerAliveCountMax 10

-            StrictHostKeyChecking no

-            UserKnownHostsFile /dev/null

diff --git a/tcp_tests/templates/cookied-bm-oc40-queens/underlay--user-data1604-hwe.yaml b/tcp_tests/templates/cookied-bm-oc40-queens/underlay--user-data1604-hwe.yaml
deleted file mode 100644
index ba69177..0000000
--- a/tcp_tests/templates/cookied-bm-oc40-queens/underlay--user-data1604-hwe.yaml
+++ /dev/null
@@ -1,125 +0,0 @@
-| # All the data below will be stored as a string object

-  #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html

-

-  ssh_pwauth: True

-  users:

-   - name: root

-     sudo: ALL=(ALL) NOPASSWD:ALL

-     shell: /bin/bash

-     ssh_authorized_keys:

-     {% for key in config.underlay.ssh_keys %}

-      - ssh-rsa {{ key['public'] }}

-     {% endfor %}

-

-  disable_root: false

-  chpasswd:

-   list: |

-    root:r00tme

-   expire: False

-

-  bootcmd:

-   #   # Block access to SSH while node is preparing

-   #   - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP

-   # Enable root access

-   - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config

-   - service sshd restart

-  output:

-    all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'

-

-  runcmd:

-   - if lvs vg0; then pvresize /dev/vda3; fi
-   - if lvs vg0; then /usr/bin/growlvm.py --image-layout-file /usr/share/growlvm/image-layout.yml; fi
-
-   - export TERM=linux

-   - export LANG=C

-   # Configure dhclient

-   - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base

-   - sudo resolvconf -u

-

-   # Enable grub menu using updated config below

-   - update-grub

-

-   # Prepare network connection

-   - sudo ifup {interface_name}

-   #- sudo route add default gw {gateway} {interface_name}

-

-   # Create swap

-   - fallocate -l 4G /swapfile

-   - chmod 600 /swapfile

-   - mkswap /swapfile

-   - swapon /swapfile

-   - echo "/swapfile   none    swap    defaults   0   0" >> /etc/fstab

-

-

-   ############## TCP Cloud cfg01 node ##################

-   #- sleep 120

-   #   - echo "Preparing base OS"

-   - echo "nameserver 172.18.208.44" > /etc/resolv.conf;

-   #   - which wget >/dev/null || (apt-get update; apt-get install -y wget)

-

-   # Configure Ubuntu mirrors
-   #   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
-   #   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
-   #   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
-
-   #   - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;

-   #   - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;

-   #   - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;

-   #   - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;

-

-   #   - apt-get clean

-   #   - eatmydata apt-get update && apt-get -y upgrade

-

-   # Install common packages

-   #   - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc

-

-   # Install salt-minion and stop it until it is configured

-   #   - eatmydata apt-get install -y salt-minion && service salt-minion stop

-

-   # Install latest kernel

-   #   - eatmydata apt-get install -y {{ os_env('LINUX_KERNEL_HWE_PACKAGE_NAME', 'linux-image-extra-4.10.0-42-generic') }}
-

-   ########################################################

-   # Node is ready, allow SSH access

-   #- echo "Allow SSH access ..."

-   #- sudo iptables -D INPUT -p tcp --dport 22 -j DROP

-   #   - reboot

-   ########################################################

-

-  write_files:

-   - path: /etc/default/grub.d/97-enable-grub-menu.cfg

-     content: |

-         GRUB_RECORDFAIL_TIMEOUT=30

-         GRUB_TIMEOUT=3

-         GRUB_TIMEOUT_STYLE=menu

-

-   - path: /etc/network/interfaces

-     content: |

-          # The loopback network interface

-          auto lo

-          iface lo inet loopback

-          auto {interface_name}

-          iface {interface_name} inet dhcp

-
-   - path: /usr/share/growlvm/image-layout.yml
-     content: |
-       root:
-         size: '30%VG'
-       home:
-         size: '1G'
-       var_log:
-         size: '11%VG'
-       var_log_audit:
-         size: '5G'
-       var_tmp:
-         size: '11%VG'
-       tmp:
-         size: '5G'
-     owner: root:root
-
-  growpart:
-    mode: auto
-    devices:
-      - '/'
-      - '/dev/vda3'
-    ignore_growroot_disabled: false
diff --git a/tcp_tests/templates/cookied-bm-oc40-queens/underlay--user-data1604.yaml b/tcp_tests/templates/cookied-bm-oc40-queens/underlay--user-data1604.yaml
deleted file mode 100644
index bdcd21d..0000000
--- a/tcp_tests/templates/cookied-bm-oc40-queens/underlay--user-data1604.yaml
+++ /dev/null
@@ -1,121 +0,0 @@
-| # All the data below will be stored as a string object

-  #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html

-

-  ssh_pwauth: True

-  users:

-   - name: root

-     sudo: ALL=(ALL) NOPASSWD:ALL

-     shell: /bin/bash

-     ssh_authorized_keys:

-     {% for key in config.underlay.ssh_keys %}

-      - ssh-rsa {{ key['public'] }}

-     {% endfor %}

-

-  disable_root: false

-  chpasswd:

-   list: |

-    root:r00tme

-   expire: False

-

-  bootcmd:

-   # Block access to SSH while node is preparing

-   #   - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP

-   # Enable root access

-   - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config

-   - service sshd restart

-  output:

-    all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'

-

-  runcmd:

-   - if lvs vg0; then pvresize /dev/vda3; fi
-   - if lvs vg0; then /usr/bin/growlvm.py --image-layout-file /usr/share/growlvm/image-layout.yml; fi
-
-   - export TERM=linux

-   - export LANG=C

-   # Configure dhclient

-   - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base

-   - sudo resolvconf -u

-

-   # Enable grub menu using updated config below

-   - update-grub

-

-   # Prepare network connection

-   - sudo ifup {interface_name}

-   #- sudo route add default gw {gateway} {interface_name}

-

-   # Create swap

-   - fallocate -l 4G /swapfile

-   - chmod 600 /swapfile

-   - mkswap /swapfile

-   - swapon /swapfile

-   - echo "/swapfile   none    swap    defaults   0   0" >> /etc/fstab

-

-

-   ############## TCP Cloud cfg01 node ##################

-   #- sleep 120

-   #   - echo "Preparing base OS"

-   - echo "nameserver 172.18.208.44" > /etc/resolv.conf;

-   #   - which wget >/dev/null || (apt-get update; apt-get install -y wget)

-

-   # Configure Ubuntu mirrors
-   #   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
-   #   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
-   #   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
-
-   #   - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;

-   #   - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;

-   #   - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;

-   #   - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;

-

-   #   - apt-get clean

-   #   - eatmydata apt-get update && apt-get -y upgrade

-

-   # Install common packages

-   #   - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc

-

-   # Install salt-minion and stop it until it is configured

-   #   - eatmydata apt-get install -y salt-minion && service salt-minion stop

-

-   ########################################################

-   # Node is ready, allow SSH access

-   #   - echo "Allow SSH access ..."

-   #   - sudo iptables -D INPUT -p tcp --dport 22 -j DROP

-   ########################################################

-

-  write_files:

-   - path: /etc/default/grub.d/97-enable-grub-menu.cfg

-     content: |

-         GRUB_RECORDFAIL_TIMEOUT=30

-         GRUB_TIMEOUT=3

-         GRUB_TIMEOUT_STYLE=menu

-

-   - path: /etc/network/interfaces

-     content: |

-          # The loopback network interface

-          auto lo

-          iface lo inet loopback

-          auto {interface_name}

-          iface {interface_name} inet dhcp

-
-   - path: /usr/share/growlvm/image-layout.yml
-     content: |
-       root:
-         size: '30%VG'
-       home:
-         size: '1G'
-       var_log:
-         size: '11%VG'
-       var_log_audit:
-         size: '5G'
-       var_tmp:
-         size: '11%VG'
-       tmp:
-         size: '5G'
-     owner: root:root
-
-  growpart:
-    mode: auto
-    devices:
-      - '/'
-      - '/dev/vda3'
-    ignore_growroot_disabled: false
diff --git a/tcp_tests/templates/cookied-bm-oc40-queens/underlay.yaml b/tcp_tests/templates/cookied-bm-oc40-queens/underlay.yaml
deleted file mode 100644
index e84e22d..0000000
--- a/tcp_tests/templates/cookied-bm-oc40-queens/underlay.yaml
+++ /dev/null
@@ -1,574 +0,0 @@
-# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
-{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'proposed') %}
-
-#{% set DOMAIN_NAME = os_env('LAB_CONFIG_NAME', 'physical_mcp11_ovs_dpdk') + '.local' %}
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-bm-oc40-queens') %}
-{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME + '.local') %}
-{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_KVM01 = os_env('HOSTNAME_KVM01', 'kvm01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_KVM02 = os_env('HOSTNAME_KVM02', 'kvm02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_KVM03 = os_env('HOSTNAME_KVM03', 'kvm03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP001 = os_env('HOSTNAME_CMP001', 'cmp001.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP002 = os_env('HOSTNAME_CMP002', 'cmp002.' + DOMAIN_NAME) %}
-#{% set HOSTNAME_CMP003 = os_env('HOSTNAME_CMP003', 'cmp003.' + DOMAIN_NAME) %}
-#{% set HOSTNAME_KVM04 = os_env('HOSTNAME_KVM04', 'kvm04.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
-
-{% set ETH1_IP_ADDRESS_CFG01 = os_env('ETH1_IP_ADDRESS_CFG01', '172.16.49.66') %}
-{% set ETH2_IP_ADDRESS_CFG01 = os_env('ETH2_IP_ADDRESS_CFG01', '10.167.8.99') %}
-{% set ETH0_IP_ADDRESS_KVM01 = os_env('ETH0_IP_ADDRESS_KVM01', '172.16.49.67') %}
-{% set ETH0_IP_ADDRESS_KVM02 = os_env('ETH0_IP_ADDRESS_KVM02', '172.16.49.68') %}
-{% set ETH0_IP_ADDRESS_KVM03 = os_env('ETH0_IP_ADDRESS_KVM03', '172.16.49.69') %}
-{% set ETH0_IP_ADDRESS_CMP001 = os_env('ETH0_IP_ADDRESS_CMP001', '172.16.49.73') %}
-{% set ETH0_IP_ADDRESS_CMP002 = os_env('ETH0_IP_ADDRESS_CMP002', '172.16.49.74') %}
-#{% set ETH0_IP_ADDRESS_CMP003 = os_env('ETH0_IP_ADDRESS_CMP003', '172.16.49.121') %}
-#{% set ETH0_IP_ADDRESS_KVM04 = os_env('ETH0_IP_ADDRESS_KVM04', '172.16.49.122') %}
-# {% set ETH0_IP_ADDRESS_GTW02 = os_env('ETH0_IP_ADDRESS_GTW02', '172.16.49.4') %}
-
-{% import 'cookied-bm-oc40-queens/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
-{% import 'cookied-bm-oc40-queens/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
-{% import 'cookied-bm-oc40-queens/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA with context %}
-{% import 'cookied-bm-oc40-queens/underlay--user-data1604-hwe.yaml' as CLOUDINIT_USER_DATA_HWE with context %}
-
----
-aliases:
- - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
- - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
- - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
- - &cloudinit_user_data {{ CLOUDINIT_USER_DATA }}
- - &cloudinit_user_data_hwe {{ CLOUDINIT_USER_DATA_HWE }}
-
-
-template:
-  devops_settings:
-    env_name: {{ os_env('ENV_NAME', 'cookied-bm-oc4_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
-
-    address_pools:
-      admin-pool01:
-        net: {{ os_env('ADMIN_ADDRESS_POOL01', '172.16.49.64/26:26') }}
-        params:
-          ip_reserved:
-            gateway: +62
-            l2_network_device: +61
-            default_{{ HOSTNAME_CFG01 }}: {{ ETH1_IP_ADDRESS_CFG01 }}
-            default_{{ HOSTNAME_KVM01 }}: {{ ETH0_IP_ADDRESS_KVM01 }}
-            default_{{ HOSTNAME_KVM02 }}: {{ ETH0_IP_ADDRESS_KVM02 }}
-            default_{{ HOSTNAME_KVM03 }}: {{ ETH0_IP_ADDRESS_KVM03 }}
-            default_{{ HOSTNAME_CMP001 }}: {{ ETH0_IP_ADDRESS_CMP001 }}
-            default_{{ HOSTNAME_CMP002 }}: {{ ETH0_IP_ADDRESS_CMP002 }}
-            #default_{{ HOSTNAME_CMP003 }}: {{ ETH0_IP_ADDRESS_CMP003 }}
-            #default_{{ HOSTNAME_KVM04 }}: {{ ETH0_IP_ADDRESS_KVM04 }}
-            #default_{{ HOSTNAME_GTW02 }}: {{ ETH0_IP_ADDRESS_GTW02 }}
-            virtual_{{ HOSTNAME_KVM01 }}: {{ ETH0_IP_ADDRESS_KVM01 }}
-            virtual_{{ HOSTNAME_KVM02 }}: {{ ETH0_IP_ADDRESS_KVM02 }}
-            virtual_{{ HOSTNAME_KVM03 }}: {{ ETH0_IP_ADDRESS_KVM03 }}
-            virtual_{{ HOSTNAME_CMP001 }}: {{ ETH0_IP_ADDRESS_CMP001 }}
-            virtual_{{ HOSTNAME_CMP002 }}: {{ ETH0_IP_ADDRESS_CMP002 }}
-            #virtual_{{ HOSTNAME_CMP003 }}: {{ ETH0_IP_ADDRESS_CMP003 }}
-            #virtual_{{ HOSTNAME_KVM04 }}: {{ ETH0_IP_ADDRESS_KVM04 }}
-            # virtual_{{ HOSTNAME_GTW02 }}: {{ ETH0_IP_ADDRESS_GTW02 }}
-          #ip_ranges:
-          #    dhcp: [+2, -4]
-      private-pool01:
-        net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.167.8.0/24:24') }}
-        params:
-          ip_reserved:
-            virtual_{{ HOSTNAME_CFG01 }}: {{ ETH2_IP_ADDRESS_CFG01 }}
-            gateway: +1
-            l2_network_device: +1
-
-      tenant-pool01:
-        net: {{ os_env('TENANT_ADDRESS_POOL01', '192.168.5.0/24:24') }}
-        params:
-          ip_reserved:
-            gateway: +1
-            l2_network_device: +1
-
-      external-pool01:
-        net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '192.168.200.0/24:24') }}
-        params:
-          ip_reserved:
-            gateway: +1
-            l2_network_device: -2
-
-    groups:
-
-      - name: virtual
-        driver:
-          name: devops.driver.libvirt
-          params:
-            connection_string: !os_env CONNECTION_STRING, qemu:///system
-            storage_pool_name: !os_env STORAGE_POOL_NAME, default
-            stp: False
-            hpet: False
-            enable_acpi: true
-            use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
-
-        network_pools:
-          admin: admin-pool01
-
-        l2_network_devices:
-          # Ironic management interface
-          admin:
-            address_pool: admin-pool01
-            dhcp: false
-            parent_iface:
-              phys_dev: !os_env IRONIC_LAB_PXE_IFACE_0
-          private:
-            parent_iface:
-              phys_dev: !os_env CONTROL_IFACE
-
-        group_volumes:
-         - name: cloudimage1604    # This name is used for 'backing_store' option for node volumes.
-           source_image: !os_env IMAGE_PATH1604  # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
-           format: qcow2
-         - name: cfg01_day01_image               # Pre-configured day01 image
-           source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01', os_env('IMAGE_PATH1604')) }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
-           format: qcow2
-
-        nodes:
-          - name: {{ HOSTNAME_CFG01 }}
-            role: salt_master
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 4
-              memory: !os_env SLAVE_NODE_MEMORY, 8192
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: cfg01_day01_image
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_cfg01
-
-              interfaces:
-                - label: ens3
-                  l2_network_device: admin
-                  interface_model: *interface_model
-                  mac_address: !os_env ETH1_MAC_ADDRESS_CFG01
-                - label: ens4
-                  l2_network_device: private
-                  interface_model: *interface_model
-              network_config:
-                ens3:
-                  networks:
-                    - admin
-                ens4:
-                  networks:
-                    - private
-
-      - name: default
-        driver:
-          name: devops_driver_ironic
-          params:
-            os_auth_token: fake-token
-            ironic_url: !os_env IRONIC_URL  # URL that will be used by fuel-devops
-                                            # to access Ironic API
-            # Agent URL that is accessible from deploying node when nodes
-            # are bootstrapped with PXE. Usually PXE/provision network address is used.
-            agent_kernel_url: !os_env IRONIC_AGENT_KERNEL_URL
-            agent_ramdisk_url: !os_env IRONIC_AGENT_RAMDISK_URL
-
-        network_pools:
-          admin: admin-pool01
-
-        nodes:
-
-        #  - name: {{ HOSTNAME_CFG01 }}
-        #    role: salt_master
-        #    params:
-        #      ipmi_user: !os_env IPMI_USER
-        #      ipmi_password: !os_env IPMI_PASSWORD
-        #      ipmi_previlegies: OPERATOR
-        #      ipmi_host: !os_env IPMI_HOST_CFG01  # hostname or IP address
-        #      ipmi_lan_interface: lanplus
-        #      ipmi_port: 623
-
-        #      root_volume_name: system     # see 'volumes' below
-        #      cloud_init_volume_name: iso  # see 'volumes' below
-        #      cloud_init_iface_up: enp3s0f1  # see 'interfaces' below.
-        #      volumes:
-        #        - name: system
-        #          capacity: !os_env NODE_VOLUME_SIZE, 200
-
-        #          # The same as for agent URL, here is an URL to the image that should be
-        #          # used for deploy the node. It should also be accessible from deploying
-        #          # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
-        #          source_image: !os_env IRONIC_SOURCE_IMAGE_URL
-        #          source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
-
-        #        - name: iso  # Volume with name 'iso' will be used
-        #                     # for store image with cloud-init metadata.
-
-        #          cloudinit_meta_data: *cloudinit_meta_data
-        #          cloudinit_user_data: *cloudinit_user_data_cfg01
-
-        #      interfaces:
-        #        - label: enp3s0f0  # Infra interface
-        #          mac_address: !os_env ETH0_MAC_ADDRESS_CFG01
-        #        - label: enp3s0f1
-        #          l2_network_device: admin
-        #          mac_address: !os_env ETH1_MAC_ADDRESS_CFG01
-
-        #      network_config:
-        #        enp3s0f0:
-        #          networks:
-        #           - infra
-        #        enp3s0f1:
-        #          networks:
-        #           - admin
-
-          - name: {{ HOSTNAME_KVM01 }}
-            role: salt_minion
-            params:
-              ipmi_user: !os_env IPMI_USER
-              ipmi_password: !os_env IPMI_PASSWORD
-              ipmi_previlegies: OPERATOR
-              ipmi_host: !os_env IPMI_HOST_KVM01  # hostname or IP address
-              ipmi_lan_interface: lanplus
-              ipmi_port: 623
-
-              root_volume_name: system     # see 'volumes' below
-              cloud_init_volume_name: iso  # see 'volumes' below
-              cloud_init_iface_up: enp9s0f0  # see 'interfaces' below.
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 200
-
-                  # The same as for agent URL, here is an URL to the image that should be
-                  # used for deploy the node. It should also be accessible from deploying
-                  # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
-                  source_image: !os_env IRONIC_SOURCE_IMAGE_URL
-                  source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
-
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data
-
-              interfaces:
-                - label: enp9s0f0
-                  l2_network_device: admin
-                  mac_address: !os_env ETH0_MAC_ADDRESS_KVM01
-                - label: enp9s0f1
-                  mac_address: !os_env ETH1_MAC_ADDRESS_KVM01
-
-              network_config:
-                enp9s0f0:
-                  networks:
-                   - admin
-                bond0:
-                  networks:
-                   - control
-                  aggregation: active-backup
-                  parents:
-                   - enp9s0f1
-
-          - name: {{ HOSTNAME_KVM02 }}
-            role: salt_minion
-            params:
-              ipmi_user: !os_env IPMI_USER
-              ipmi_password: !os_env IPMI_PASSWORD
-              ipmi_previlegies: OPERATOR
-              ipmi_host: !os_env IPMI_HOST_KVM02  # hostname or IP address
-              ipmi_lan_interface: lanplus
-              ipmi_port: 623
-
-              root_volume_name: system     # see 'volumes' below
-              cloud_init_volume_name: iso  # see 'volumes' below
-              cloud_init_iface_up: enp9s0f0  # see 'interfaces' below.
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 200
-
-                  # The same as for agent URL, here is an URL to the image that should be
-                  # used for deploy the node. It should also be accessible from deploying
-                  # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
-                  source_image: !os_env IRONIC_SOURCE_IMAGE_URL
-                  source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
-
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data
-
-              interfaces:
-                - label: enp9s0f0
-                  l2_network_device: admin
-                  mac_address: !os_env ETH0_MAC_ADDRESS_KVM02
-                - label: enp9s0f1
-                  mac_address: !os_env ETH1_MAC_ADDRESS_KVM02
-
-              network_config:
-                enp9s0f0:
-                  networks:
-                   - admin
-                bond0:
-                  networks:
-                   - control
-                  aggregation: active-backup
-                  parents:
-                   - enp9s0f1
-
-          - name: {{ HOSTNAME_KVM03 }}
-            role: salt_minion
-            params:
-              ipmi_user: !os_env IPMI_USER
-              ipmi_password: !os_env IPMI_PASSWORD
-              ipmi_previlegies: OPERATOR
-              ipmi_host: !os_env IPMI_HOST_KVM03  # hostname or IP address
-              ipmi_lan_interface: lanplus
-              ipmi_port: 623
-
-              root_volume_name: system     # see 'volumes' below
-              cloud_init_volume_name: iso  # see 'volumes' below
-              # cloud_init_iface_up: eno1  # see 'interfaces' below.
-              cloud_init_iface_up: enp9s0f0  # see 'interfaces' below.
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 200
-
-                  # The same as for agent URL, here is an URL to the image that should be
-                  # used for deploy the node. It should also be accessible from deploying
-                  # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
-                  source_image: !os_env IRONIC_SOURCE_IMAGE_URL
-                  source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
-
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data
-
-              interfaces:
-                # - label: eno1
-                - label: enp9s0f0
-                  l2_network_device: admin
-                  mac_address: !os_env ETH0_MAC_ADDRESS_KVM03
-                # - label: eno2
-                - label: enp9s0f1
-                  mac_address: !os_env ETH1_MAC_ADDRESS_KVM03
-
-              network_config:
-                # eno1:
-                enp9s0f0:
-                  networks:
-                   - admin
-                bond0:
-                  networks:
-                   - control
-                  aggregation: active-backup
-                  parents:
-                   - enp9s0f1
-
-                     #          - name: {{ HOSTNAME_KVM04 }}
-                     #            role: salt_minion
-                     #            params:
-                     #              ipmi_user: !os_env IPMI_USER
-                     #              ipmi_password: !os_env IPMI_PASSWORD
-                     #              ipmi_previlegies: OPERATOR
-                     #              ipmi_host: !os_env IPMI_HOST_KVM04  # hostname or IP address
-                     #              ipmi_lan_interface: lanplus
-                     #              ipmi_port: 623
-                     #
-                     #              root_volume_name: system     # see 'volumes' below
-                     #              cloud_init_volume_name: iso  # see 'volumes' below
-                     #              # cloud_init_iface_up: eno1  # see 'interfaces' below.
-                     #              cloud_init_iface_up: enp2s0f0  # see 'interfaces' below.
-                     #              volumes:
-                     #                - name: system
-                     #                  capacity: !os_env NODE_VOLUME_SIZE, 200
-                     #
-                     #                  # The same as for agent URL, here is an URL to the image that should be
-                     #                  # used for deploy the node. It should also be accessible from deploying
-                     #                  # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
-                     #                  source_image: !os_env IRONIC_SOURCE_IMAGE_URL
-                     #                  source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
-                     #
-                     #                - name: iso  # Volume with name 'iso' will be used
-                     #                             # for store image with cloud-init metadata.
-                     #
-                     #                  cloudinit_meta_data: *cloudinit_meta_data
-                     #                  cloudinit_user_data: *cloudinit_user_data
-                     #
-                     #              interfaces:
-                     #                # - label: eno1
-                     #                - label: enp2s0f0
-                     #                  l2_network_device: admin
-                     #                  mac_address: !os_env ETH0_MAC_ADDRESS_KVM04
-                     #                # - label: eno2
-                     #                - label: enp2s0f1
-                     #                  mac_address: !os_env ETH1_MAC_ADDRESS_KVM04
-                     #
-                     #              network_config:
-                     #                # eno1:
-                     #                enp2s0f0:
-                     #                  networks:
-                     #                   - admin
-                     #                bond0:
-                     #                  networks:
-                     #                   - control
-                     #                  aggregation: active-backup
-                     #                  parents:
-                     #                   - enp2s0f1
-                     #
-          - name: {{ HOSTNAME_CMP001 }}
-            role: salt_minion
-            params:
-              ipmi_user: !os_env IPMI_USER
-              ipmi_password: !os_env IPMI_PASSWORD
-              ipmi_previlegies: OPERATOR
-              ipmi_host: !os_env IPMI_HOST_CMP001  # hostname or IP address
-              ipmi_lan_interface: lanplus
-              ipmi_port: 623
-
-              root_volume_name: system     # see 'volumes' below
-              cloud_init_volume_name: iso  # see 'volumes' below
-              # cloud_init_iface_up: enp3s0f0  # see 'interfaces' below.
-              cloud_init_iface_up: enp2s0f1  # see 'interfaces' below.
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 200
-
-                  # The same as for agent URL, here is an URL to the image that should be
-                  # used for deploy the node. It should also be accessible from deploying
-                  # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
-                  source_image: !os_env IRONIC_SOURCE_IMAGE_URL
-                  source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
-
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_hwe
-
-              interfaces:
-                - label: enp2s0f0
-                  mac_address: !os_env ETH0_MAC_ADDRESS_CMP001
-                - label: enp2s0f1
-                  l2_network_device: admin
-                  mac_address: !os_env ETH1_MAC_ADDRESS_CMP001
-                - label: enp5s0f0
-                  mac_address: !os_env ETH2_MAC_ADDRESS_CMP001
-                  features: ['dpdk', 'dpdk_pci: 0000:05:00.0']
-                - label: enp5s0f1
-                  mac_address: !os_env ETH3_MAC_ADDRESS_CMP001
-                  features: ['dpdk', 'dpdk_pci: 0000:05:00.1']
-                # - label: enp5s0f2
-                #   mac_address: !os_env ETH4_MAC_ADDRESS_CMP001
-                #   features: ['dpdk', 'dpdk_pci: 0000:05:00.2']
-
-              network_config:
-                enp2s0f1:
-                  networks:
-                   - admin
-
-          - name: {{ HOSTNAME_CMP002 }}
-            role: salt_minion
-            params:
-              ipmi_user: !os_env IPMI_USER
-              ipmi_password: !os_env IPMI_PASSWORD
-              ipmi_previlegies: OPERATOR
-              ipmi_host: !os_env IPMI_HOST_CMP002  # hostname or IP address
-              ipmi_lan_interface: lanplus
-              ipmi_port: 623
-
-              root_volume_name: system     # see 'volumes' below
-              cloud_init_volume_name: iso  # see 'volumes' below
-              # cloud_init_iface_up: eno1  # see 'interfaces' below.
-              cloud_init_iface_up: enp2s0f1  # see 'interfaces' below.
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 200
-
-                  # The same as for agent URL, here is an URL to the image that should be
-                  # used for deploy the node. It should also be accessible from deploying
-                  # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
-                  source_image: !os_env IRONIC_SOURCE_IMAGE_URL
-                  source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
-
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_hwe
-
-              interfaces:
-                # - label: eno1
-                - label: enp2s0f0
-                  mac_address: !os_env ETH0_MAC_ADDRESS_CMP002
-                # - label: eth0
-                - label: enp2s0f1
-                  l2_network_device: admin
-                  mac_address: !os_env ETH1_MAC_ADDRESS_CMP002
-                # - label: eth3
-                - label: enp5s0f0
-                  mac_address: !os_env ETH2_MAC_ADDRESS_CMP002
-                  features: ['dpdk', 'dpdk_pci: 0000:05:00.0']
-                # - label: eth2
-                - label: enp5s0f1
-                  mac_address: !os_env ETH3_MAC_ADDRESS_CMP002
-                  features: ['dpdk', 'dpdk_pci: 0000:05:00.1']
-                # - label: eth4
-                #   mac_address: !os_env ETH4_MAC_ADDRESS_CMP002
-                #   features: ['dpdk', 'dpdk_pci: 0000:0b:00.0']
-
-              network_config:
-                enp2s0f1:
-                  networks:
-                   - admin
-
-                     #          - name: {{ HOSTNAME_CMP003 }}
-                     #            role: salt_minion
-                     #            params:
-                     #              ipmi_user: !os_env IPMI_USER
-                     #              ipmi_password: !os_env IPMI_PASSWORD
-                     #              ipmi_previlegies: OPERATOR
-                     #              ipmi_host: !os_env IPMI_HOST_CMP003  # hostname or IP address
-                     #              ipmi_lan_interface: lanplus
-                     #              ipmi_port: 623
-                     #
-                     #              root_volume_name: system     # see 'volumes' below
-                     #              cloud_init_volume_name: iso  # see 'volumes' below
-                     #              # cloud_init_iface_up: eno1  # see 'interfaces' below.
-                     #              cloud_init_iface_up: enp2s0f0  # see 'interfaces' below.
-                     #              volumes:
-                     #                - name: system
-                     #                  capacity: !os_env NODE_VOLUME_SIZE, 200
-                     #
-                     #                  # The same as for agent URL, here is an URL to the image that should be
-                     #                  # used for deploy the node. It should also be accessible from deploying
-                     #                  # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
-                     #                  source_image: !os_env IRONIC_SOURCE_IMAGE_URL
-                     #                  source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
-                     #
-                     #                - name: iso  # Volume with name 'iso' will be used
-                     #                             # for store image with cloud-init metadata.
-                     #
-                     #                  cloudinit_meta_data: *cloudinit_meta_data
-                     #                  cloudinit_user_data: *cloudinit_user_data_hwe
-                     #
-                     #              interfaces:
-                     #                # - label: eno1
-                     #                - label: enp2s0f1
-                     #                  mac_address: !os_env ETH1_MAC_ADDRESS_CMP003
-                     #                # - label: eth0
-                     #                - label: enp2s0f0
-                     #                  l2_network_device: admin
-                     #                  mac_address: !os_env ETH0_MAC_ADDRESS_CMP003
-                     #
-                     #              network_config:
-                     #                enp2s0f0:
-                     #                  networks:
-                     #                   - admin
diff --git a/tcp_tests/templates/cookied-cicd-bm-k8s-contrail40-maas/lab04-physical-inventory.yaml b/tcp_tests/templates/cookied-cicd-bm-k8s-contrail40-maas/lab04-physical-inventory.yaml
deleted file mode 100644
index e92ddd6..0000000
--- a/tcp_tests/templates/cookied-cicd-bm-k8s-contrail40-maas/lab04-physical-inventory.yaml
+++ /dev/null
@@ -1,92 +0,0 @@
-nodes:

-    cfg01.cookied-cicd-bm-k8s-contrail40-maas.local:

-      reclass_storage_name: infra_config_node01

-      roles:

-      - infra_config

-      - linux_system_codename_xenial

-      interfaces:

-        ens3:

-          role: single_static_mgm

-        ens4:

-          role: single_static_ctl

-    # Physical nodes

-

-    kvm01.cookied-cicd-bm-k8s-contrail40-maas.local:

-      reclass_storage_name: infra_kvm_node01

-      roles:

-      - infra_kvm

-      - linux_system_codename_xenial

-      interfaces:

-        enp9s0f0:

-          role: single_mgm_dhcp

-        enp9s0f1:

-          role: single_vlan_ctl

-

-    kvm02.cookied-cicd-bm-k8s-contrail40-maas.local:

-      reclass_storage_name: infra_kvm_node02

-      roles:

-      - infra_kvm

-      - linux_system_codename_xenial

-      interfaces:

-        enp9s0f0:

-          role: single_mgm_dhcp

-        enp9s0f1:

-          role: single_vlan_ctl

-

-    kvm03.cookied-cicd-bm-k8s-contrail40-maas.local:

-      reclass_storage_name: infra_kvm_node03

-      roles:

-      - infra_kvm

-      - linux_system_codename_xenial

-      interfaces:

-        enp9s0f0:

-          role: single_mgm_dhcp

-        enp9s0f1:

-          role: single_vlan_ctl

-

-    ctl01.cookied-cicd-bm-k8s-contrail40-maas.local:

-      reclass_storage_name: kubernetes_control_node01

-      roles:

-      - kubernetes_control_contrail

-      - linux_system_codename_xenial

-      interfaces:

-        enp2s0f0:

-          role: single_dhcp

-        enp2s0f1:

-          role: single_k8s_contrail_ctl

-

-    ctl02.cookied-cicd-bm-k8s-contrail40-maas.local:

-      reclass_storage_name: kubernetes_control_node02

-      roles:

-      - kubernetes_control_contrail

-      - linux_system_codename_xenial

-      interfaces:

-        enp2s0f0:

-          role: single_dhcp

-        enp2s0f1:

-          role: single_k8s_contrail_ctl

-

-    ctl03.cookied-cicd-bm-k8s-contrail40-maas.local:

-      reclass_storage_name: kubernetes_control_node03

-      roles:

-      - kubernetes_control_contrail

-      - linux_system_codename_xenial

-      interfaces:

-        enp2s0f0:

-          role: single_dhcp

-        enp2s0f1:

-          role: single_k8s_contrail_ctl

-

-    cmp<<count>>:

-      reclass_storage_name: kubernetes_compute_rack01

-      roles:

-      - kubernetes_compute_contrail

-      - linux_system_codename_xenial

-      - salt_master_host

-      interfaces:

-        enp9s0f0:

-          role: single_dhcp

-        enp9s0f1:

-          role: single_vlan_ctl

-        ens11f1:

-          role: bond0_ab_contrail

diff --git a/tcp_tests/templates/cookied-cicd-bm-k8s-contrail40-maas/salt-context-cookiecutter-k8s-contrail.yaml b/tcp_tests/templates/cookied-cicd-bm-k8s-contrail40-maas/salt-context-cookiecutter-k8s-contrail.yaml
deleted file mode 100644
index ac12945..0000000
--- a/tcp_tests/templates/cookied-cicd-bm-k8s-contrail40-maas/salt-context-cookiecutter-k8s-contrail.yaml
+++ /dev/null
@@ -1,370 +0,0 @@
-#https://docs.mirantis.com/mcp/master/mcp-ref-arch/opencontrail-plan/contrail-overview/contrail-4.html#
-default_context:
-  backup_private_key: |-
-    -----BEGIN RSA PRIVATE KEY-----
-    MIIEowIBAAKCAQEAskZyhTySYWvGrp+dlv+q2La8oiM8Sv1JfQog8+UW28hGUbCq
-    PnWa7bzVqENEY+g+qbQYh2Pvb2xURyY9+02TWLmCYzq7+orO1eG2SDt384YzDc6B
-    nQohUbGwaSH2YsT/QA8KV1fvUW9P0gVEHmJWR8Jo3vdRyw+tEUszYkiTYkcVc9zt
-    O5eYT9a6gSjLhl+tmJXu38jdWTcE8zJP+OGpJRDJpV2EAYwv+LXde9REv4KCGMqN
-    lvli9IA+zarfMsAzSTPpL5ooslB20akKM1h5li3LG47OpKMG9sMVFGEEo7+lqTGa
-    zUJEUPbJU95PqLfxoIOjYF/MhWKU5VALLraVwwIDAQABAoIBAHUAj0CqlFej6G3L
-    DB6CBZrds8el4HhG6+hIsX/gprPwKVaL3+/GN7w35vHb1BLN5fJy5HZXPFmge1+G
-    1b8NFttwRQbjEWRJnZ352Sxm+z60oOU61w4+C8gWGnWleJMyP2PHez3/1G1Z5MUt
-    95sJZx8JlNJg9ymSTD/BXyPuBezFKf8jUSgPbhBv8B2yy82YGzqc9u7sK6BN90P1
-    3ZcynQ4cfieZLoim56dF9YEixr8plGmGpOspPZFlVCGIc1y2BC4ZUyDatcCa7/gQ
-    3aDdt9lkEfoCHezAFOmaZDCOZ70spkwCqXYk42BXpDjKF6djCXyx3WKVF+IhEOYT
-    /S1I8KECgYEA1tzUZgW43/Z7Sm+QnoK3R9hG2oZZOoiTDdHyC+f5dk6maNbJbivM
-    FAPIpzHtCyvBEiSgGmlqpUrwR2NbYnOHjOX72Yq7/e0Vl1XWmplKCsTDNFGXx5Fs
-    9AQbWjOF+drgfZ5p3dNyE9689nJZg5EhTxL8dfwnZat/l+/OKFO2tM0CgYEA1GhW
-    4FMsXe3/g7M0dj5FnbS8xjz93pei5YZanID9mY/RUcKbegdtfvtwsfkZe5trbI39
-    jv96QyJeAnf48UDFwCV6SSZF/8Ez0pYHZanwkwKsli5uZdvSz7rUyVwE6tyO24WA
-    Trgpmbb8uxhJHBNuD+bC/iGd1H0IUuJ65ChD9M8CgYEAxfp2z4boQZ2ZHw5LoHLr
-    tIyJROPUJgYgEfToZaYbC7MOzL1Un2pFwg92fPCY7dkkLraGu690r9esLOtVEhNH
-    zEFB3cJi1Gf3pBlZA9zJB8Ej6Pphs2bBkNqT5XpiMcZHYhhsjhQ+Iibz0NWuu3cn
-    zPe+nmx4VMtAZ1x0hl4UlOUCgYBh8NaWS2O60AIwrRI0r5QUGwGsevUojU0Mfr4L
-    SiMlir4e8BgW1ZP0qGYXel/4sfp/rJ1NMZojmJY2lzFPtOk6J03SqvY97s1BffJd
-    O1X1w5bnDPPUvd7f8CsryeVuklLBADbglWSBP3IbpyAW9RKb/HDPE5seHqUW6t11
-    lOd42wKBgBW0tTV6+aqao6o4ZBU0SVoNg9/PwgOubAzeSDW2+ArXn1sMmroSfXVw
-    fbUTJI5TF/1pd7A5AKoR1FfTqlssoYlMGEPI6HJ4n9/4SqLpwd99HFW0ISt+EUMh
-    Tqt9mDfKzwHxG2QTuOwyrslO8iTwRoN5OYgm4dsL471Obr4DILTz
-    -----END RSA PRIVATE KEY-----
-  backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCyRnKFPJJha8aun52W/6rYtryiIzxK/Ul9CiDz5RbbyEZRsKo+dZrtvNWoQ0Rj6D6ptBiHY+9vbFRHJj37TZNYuYJjOrv6is7V4bZIO3fzhjMNzoGdCiFRsbBpIfZixP9ADwpXV+9Rb0/SBUQeYlZHwmje91HLD60RSzNiSJNiRxVz3O07l5hP1rqBKMuGX62Yle7fyN1ZNwTzMk/44aklEMmlXYQBjC/4td171ES/goIYyo2W+WL0gD7Nqt8ywDNJM+kvmiiyUHbRqQozWHmWLcsbjs6kowb2wxUUYQSjv6WpMZrNQkRQ9slT3k+ot/Ggg6NgX8yFYpTlUAsutpXD
-  bmk_enabled: 'False'
-  ceph_enabled: 'False'
-  auditd_enabled: 'False'
-  cicd_control_node01_address: 10.167.13.91
-  cicd_control_node01_hostname: cid01
-  cicd_control_node02_address: 10.167.13.92
-  cicd_control_node02_hostname: cid02
-  cicd_control_node03_address: 10.167.13.93
-  cicd_control_node03_hostname: cid03
-  cicd_control_vip_address: 10.167.13.90
-  cicd_control_vip_hostname: cid
-  cicd_enabled: 'True'
-  cicd_private_key: |-
-    -----BEGIN RSA PRIVATE KEY-----
-    MIIEpgIBAAKCAQEAxLQy4F7sNBloj0fFvklCq9+IX/BV5OBs6OtSBf6A+ztTs68i
-    ib5W6Tz/knh7wt2URB6uKJJBRBK+Oqj91ZNJxISewP2f5gX10WpjOAti+Fghkemt
-    kvyA8aUxX1wuAz7Y2v1uv1IkfWa5ubL8oJXNjeT9yeCNJWBxvd46XT9UiCs5CnDL
-    lBjRg+AP2+u5OabUFtH7GSzVqcMzhi0qLilP+cRhKmar2tQXFEI5wnwADh0REAF/
-    OxUZPaPEPD9TW7fGxjfrMtyUKqTEbi+EPsIvldkR0IhYrKXjwcFFu3FKISuy8PVM
-    EKUM5aZaLMI/WiMs1zmx+bAOrkCsUAf+sVmocQIDAQABAoIBAQCRnSAojrxmmQSZ
-    RLVy9wK+/zwSYXUEeMrG5iUTQOM0mCePVa/IrjnicYB0anKbv7IZd2jPqe1cuk9O
-    V3mJGH68Vry6+0XaX0EpJIuMmolKdNttC8Ktk/TUbciN4kxBpM2d14ybXvCaUGhe
-    usxfCGZhi0oAnxV9vNaWiqNEEjS+k4u9XTnj3+GxstEwch+l7xJwz83WEsx7G1Zz
-    3Yxg7mh2uRPVCOZGVdClciym+9WHHrcdYw/OJCsSFsT7+qgzptsvXBVxa6EuGaVY
-    Pba+UfOnYIKlBtka4i3zXGaCQF6t2FHw5WyUEmYm3iBYmrGBbEf+3665Kh4NQs0a
-    PV4eHlLdAoGBAO8nDgkTA4gi1gyFy2YBUFP2BignkKCZGHkD8qvBnOt1Rxm6USlZ
-    7GzAtU3nSd8ODzgOBI7+zd82yRqv2hEwP7xARhr0Nx1XvyaQtRlQ6tQnBgvqLDCG
-    n0qvWoBM+Yl6sTRGYavAMCaR7PuULUcZFNWk7m0fv4vqddGijgRsje37AoGBANKP
-    nN72BujsQIhdzAYS+u5/Hxu56Tvgupe6kWkhQuV8MQcM+79I6cgJxxH6zQDP/hGt
-    3vXapgWUgi025LuEUWfkxAtTUfT4cRP2x529CH/XLQMYVqWxkoben9r+eFav+Kgw
-    C0dR3vSOlEMzYoIF+p/km0mIV1ZKZvrWymtXSdODAoGBAL4feUwDfqpKr4pzD1l/
-    r+Gf1BM2KQdTzp3eYpzjJiIWMTkl4wIRyCBJL5nIRvT6E2VH153qubY7srLxnFZP
-    2kuJeXJSNkKwkHlTT3XZ22Zfw7HTL+BAFgDk2PjouPTvwlLBpUJKXr07A4CZs0kz
-    ilmybg340GmmMpY/OdIQjuDjAoGBAMcd5hP2hqxHlfMe+CwaUM+uySU4FvZ32xxW
-    4uGPAarwWZC4V20Zr3JqxKUdDjYhersPOFQ4c129hajqSz2EsFLWRAoNvegx9QUT
-    Dsv9EgeK3Vca8f14wf7mkjbPA8++UyABZvkH1BZiqpQuCI66xrnjvnG4DBde/qlg
-    60S84+SvAoGBAKH1feNtJaNhDxF0OqRuVmSFyL3pkMDoYr/mgpT4T1ToRBW5AtEt
-    Io4egi68ph8IChAt/TGFvikW7tbEgK9ACD/RAfl+LiuhxqJJFtC1LfGfHI7ntuRj
-    DjQrUy59ULoflh3iWBPtpw2ooRlSrAwaIgGt9odMECXp3BK8WLsUG9H1
-    -----END RSA PRIVATE KEY-----
-  cicd_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDEtDLgXuw0GWiPR8W+SUKr34hf8FXk4Gzo61IF/oD7O1OzryKJvlbpPP+SeHvC3ZREHq4okkFEEr46qP3Vk0nEhJ7A/Z/mBfXRamM4C2L4WCGR6a2S/IDxpTFfXC4DPtja/W6/UiR9Zrm5svyglc2N5P3J4I0lYHG93jpdP1SIKzkKcMuUGNGD4A/b67k5ptQW0fsZLNWpwzOGLSouKU/5xGEqZqva1BcUQjnCfAAOHREQAX87FRk9o8Q8P1Nbt8bGN+sy3JQqpMRuL4Q+wi+V2RHQiFispePBwUW7cUohK7Lw9UwQpQzlploswj9aIyzXObH5sA6uQKxQB/6xWahx
-  cluster_domain: bm-mcp-k8s-contrail.local
-  cluster_name: bm-mcp-k8s-contrail
-  # compute_bond_mode: active-backup
-  compute_padding_with_zeros: 'True'
-  compute_primary_first_nic: eth1
-  compute_primary_second_nic: eth2
-  context_seed: GAZbu3hguubkeYMg15AQ1J6UuY60TAh8h0EVpNUrHWhjRS2SxRYBuxxLvVURv82m
-  control_network_netmask: 255.255.255.0
-  control_network_subnet: 10.167.13.0/24
-  control_vlan: '2410'
-  backend_vlan: '2402'
-  cookiecutter_template_branch: ''
-  cookiecutter_template_credentials: gerrit
-  cookiecutter_template_url: https://gerrit.mcp.mirantis.com/mk/cookiecutter-templates.git
-  deploy_network_gateway: 172.17.41.2
-  deploy_network_netmask: 255.255.255.192
-  deploy_network_subnet: 172.17.41.0/26
-  deployment_type: physical
-  dns_server01: 172.18.208.44
-  dns_server02: 172.18.176.6
-  email_address: dtyzhnenko@mirantis.com
-  etcd_ssl: 'True'
-  infra_bond_mode: active-backup
-  infra_deploy_nic: eth0
-  infra_kvm01_control_address: 10.167.13.241
-  infra_kvm01_deploy_address: 172.17.41.4
-  infra_kvm01_hostname: kvm01
-  infra_kvm02_control_address: 10.167.13.242
-  infra_kvm02_deploy_address: 172.17.41.5
-  infra_kvm02_hostname: kvm02
-  infra_kvm03_control_address: 10.167.13.243
-  infra_kvm03_deploy_address: 172.17.41.6
-  infra_kvm03_hostname: kvm03
-  infra_kvm_vip_address: 10.167.13.240
-  infra_primary_first_nic: eth1
-  infra_primary_second_nic: eth2
-  internal_proxy_enabled: 'False'
-  kubernetes_enabled: 'True'
-  kubernetes_compute_count: 2
-  kubernetes_compute_rack01_hostname: cmp
-  kubernetes_compute_single_address_ranges: 10.167.13.103-10.167.13.104
-  kubernetes_compute_tenant_address_ranges: 10.167.14.103-10.167.14.104
-  kubernetes_network_opencontrail_enabled: 'True'
-  kubernetes_keepalived_vip_interface: br_ctl
-  kubernetes_metallb_enabled: 'False'  # Not used with opencontrail
-  metallb_addresses: 172.17.41.160-172.17.41.180
-  kubernetes_ingressnginx_enabled: 'True'
-  kubernetes_ingressnginx_controller_replicas: 2
-  local_repositories: 'False'
-  maas_enabled: 'True'
-  maas_deploy_address: 172.17.41.3
-  maas_deploy_cidr: 172.17.41.0/26
-  maas_deploy_gateway: 172.17.41.2
-  maas_deploy_range_end: 172.17.41.60
-  maas_deploy_range_start: 172.17.41.13
-  maas_deploy_vlan: '0'
-  maas_dhcp_enabled: 'True'
-  maas_fabric_name: fabric-0
-  maas_hostname: cfg01
-  maas_manage_deploy_network: 'True'
-  maas_machines: |
-        kvm01: # cz7784-kvm.host-telecom.com
-          distro_series: "xenial"
-          # hwe_kernel: "hwe-16.04"
-          # pxe_interface_mac:
-          pxe_interface_mac: "0c:c4:7a:6c:88:fe"
-          interfaces:
-            enp9s0f0:
-              mac: "0c:c4:7a:6c:88:fe"
-              mode: "static"
-              ip: "172.17.41.4"
-              subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
-              gateway: ${_param:deploy_network_gateway}
-          power_parameters:
-            power_address: "5.43.225.115"
-            power_pass: ==IPMI_PASS==
-            power_type: ipmi
-            power_user: ==IPMI_USER==
-        kvm02: # #cz7785-kvm.host-telecom.com
-          distro_series: "xenial"
-          # hwe_kernel: "hwe-16.04"
-          pxe_interface_mac: "0c:c4:7a:34:53:8e"
-          interfaces:
-            enp9s0f0:
-              mac: "0c:c4:7a:34:53:8e"
-              mode: "static"
-              ip: "172.17.41.5"
-              subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
-              gateway: ${_param:deploy_network_gateway}
-          power_parameters:
-            power_address: "5.43.225.121"
-            power_pass: ==IPMI_PASS==
-            power_type: ipmi
-            power_user: ==IPMI_USER==
-        kvm03: # #cz7744-kvm.host-telecom.com
-          distro_series: "xenial"
-          # hwe_kernel: "hwe-16.04"
-          pxe_interface_mac: "0c:c4:7a:34:55:2c"
-          interfaces:
-            enp9s0f0:
-              mac: "0c:c4:7a:34:55:2c"
-              mode: "static"
-              ip: "172.17.41.6"
-              subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
-              gateway: ${_param:deploy_network_gateway}
-          power_parameters:
-            power_address: "5.43.225.126"
-            power_pass: ==IPMI_PASS==
-            power_type: ipmi
-            power_user: ==IPMI_USER==
-        ctl01: # #cz7609.bud.mirantis.net
-          distro_series: "xenial"
-          # hwe_kernel: "hwe-16.04"
-          pxe_interface_mac: "0c:c4:7a:54:a2:9c"
-          interfaces:
-            enp2s0f0:
-              mac: "0c:c4:7a:54:a2:9c"
-              mode: "static"
-              ip: "172.17.41.9"
-              subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
-              gateway: ${_param:deploy_network_gateway}
-          power_parameters:
-            power_address: "185.8.59.239"
-            power_pass: ==IPMI_PASS==
-            power_type: ipmi
-            power_user: ==IPMI_USER==
-        ctl02: # #cz7631-kvm.host-telecom.com
-          distro_series: "xenial"
-          # hwe_kernel: "hwe-16.04"
-          pxe_interface_mac: "0c:c4:7a:54:a0:56"
-          interfaces:
-            enp2s0f0:
-              mac: "0c:c4:7a:54:a0:56"
-              mode: "static"
-              ip: "172.17.41.10"
-              subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
-              gateway: ${_param:deploy_network_gateway}
-          power_parameters:
-            power_address: "185.8.59.237"
-            power_pass: ==IPMI_PASS==
-            power_type: ipmi
-            power_user: ==IPMI_USER==
-        ctl03: # #cz7632-kvm.host-telecom.com
-          distro_series: "xenial"
-          # hwe_kernel: "hwe-16.04"
-          pxe_interface_mac: "0c:c4:7a:54:a0:08"
-          interfaces:
-            enp2s0f0:
-              mac: "0c:c4:7a:54:a0:08"
-              mode: "static"
-              ip: "172.17.41.11"
-              subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
-              gateway: ${_param:deploy_network_gateway}
-          power_parameters:
-            power_address: "185.8.59.236"
-            power_pass: ==IPMI_PASS==
-            power_type: ipmi
-            power_user: ==IPMI_USER==
-        cmp001: # #cz7781-kvm.host-telecom.com
-          distro_series: "xenial"
-          # hwe_kernel: "hwe-16.04"
-          pxe_interface_mac: "0c:c4:7a:6c:58:06"
-          interfaces:
-            enp9s0f0:
-              mac: "0c:c4:7a:6c:58:06"
-              mode: "static"
-              ip: "172.17.41.7"
-              subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
-              gateway: ${_param:deploy_network_gateway}
-          power_parameters:
-            power_address: "5.43.225.120"
-            power_pass: ==IPMI_PASS==
-            power_type: ipmi
-            power_user: ==IPMI_USER==
-        cmp002: # cz7674.bud.mirantis.net
-          distro_series: "xenial"
-          # hwe_kernel: "hwe-16.04"
-          pxe_interface_mac: "0c:c4:7a:33:27:22"
-          interfaces:
-            enp9s0f0:
-              mac: "0c:c4:7a:33:27:22"
-              mode: "static"
-              ip: "172.17.41.8"
-              subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
-              gateway: ${_param:deploy_network_gateway}
-          power_parameters:
-            power_address: "176.74.217.166"
-            power_pass: ==IPMI_PASS==
-            power_type: ipmi
-            power_user: ==IPMI_USER==
-
-  mcp_common_scripts_branch: ''
-  mcp_version: proposed
-  offline_deployment: 'False'
-  kubernetes_control_address: 10.167.13.236
-  kubernetes_control_node01_deploy_address: 172.17.41.9
-  kubernetes_control_node01_tenant_address: 10.167.14.111
-  kubernetes_control_node01_address: 10.167.13.239
-  kubernetes_control_node01_hostname: ctl01
-  kubernetes_control_node02_deploy_address: 172.17.41.10
-  kubernetes_control_node02_tenant_address: 10.167.14.112
-  kubernetes_control_node02_address: 10.167.13.238
-  kubernetes_control_node02_hostname: ctl02
-  kubernetes_control_node03_deploy_address: 172.17.41.11
-  kubernetes_control_node03_tenant_address: 10.167.14.113
-  kubernetes_control_node03_address: 10.167.13.237
-  kubernetes_control_node03_hostname: ctl03
-  kubernetes_proxy_hostname: prx
-  kubernetes_proxy_node01_hostname: prx01
-  kubernetes_proxy_node02_hostname: prx02
-  kubernetes_proxy_address: 10.167.13.80
-  kubernetes_proxy_node01_address: 10.167.13.81
-  kubernetes_proxy_node02_address: 10.167.13.82
-  linux_repo_contrail_component: oc40
-  opencontrail_analytics_hostname: ctl
-  opencontrail_analytics_node01_hostname: ctl01
-  opencontrail_analytics_node02_hostname: ctl02
-  opencontrail_analytics_node03_hostname: ctl03
-  opencontrail_analytics_address: ${_param:opencontrail_control_address}
-  opencontrail_analytics_node01_address: ${_param:opencontrail_control_node01_address}
-  opencontrail_analytics_node02_address: ${_param:opencontrail_control_node02_address}
-  opencontrail_analytics_node03_address: ${_param:opencontrail_control_node03_address}
-  opencontrail_compute_iface_mask: '24'
-  opencontrail_compute_iface: ten2
-  opencontrail_control_address: 10.167.13.236
-  opencontrail_control_hostname: ctl
-  opencontrail_control_node01_address: 10.167.13.239
-  opencontrail_control_node01_hostname: ctl01
-  opencontrail_control_node02_address: 10.167.13.238
-  opencontrail_control_node02_hostname: ctl02
-  opencontrail_control_node03_address: 10.167.13.237
-  opencontrail_control_node03_hostname: ctl03
-  opencontrail_enabled: 'True'
-  opencontrail_router01_address: 10.167.13.220
-  opencontrail_router01_hostname: rtr01
-  opencontrail_router02_address: 10.167.13.221
-  opencontrail_router02_hostname: rtr02
-  opencontrail_public_ip_range: 172.17.41.128/26
-  opencontrail_version: '4.0'
-  openstack_enabled: 'False'
-  openssh_groups: ''
-  openstack_version: pike
-  openldap_domain: ${_param:cluster_name}.local
-  openldap_enabled: 'True'
-  openldap_organisation: ${_param:cluster_name}
-  oss_enabled: 'False'
-  oss_node03_address: ${_param:stacklight_monitor_node03_address}
-  oss_notification_smtp_use_tls: 'False'
-  oss_pushkin_email_sender_password: password
-  oss_pushkin_smtp_host: 127.0.0.1
-  oss_pushkin_smtp_port: '587'
-  oss_webhook_app_id: '24'
-  oss_webhook_login_id: '13'
-  platform: kubernetes_enabled
-  public_host: ${_param:infra_config_address}
-  publication_method: email
-  reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
-  salt_api_password: frJMLJsfGkFXCk4qTTEttKla518Akvdp
-  salt_api_password_hash: $6$ixefPtcd$vasKuySO6L2JM0FKaB/udsQvH4upI2dWoJZaR9XTqeAx4UMvkHsNNSwsALVqTTLbXIngkFqYNXpbNm1o4iMGS.
-  salt_master_address: 10.167.13.15
-  salt_master_hostname: cfg01
-  salt_master_management_address: 172.17.41.3
-  shared_reclass_branch: ''
-  shared_reclass_url: https://gerrit.mcp.mirantis.com/salt-models/reclass-system.git
-  stacklight_enabled: 'True'
-  stacklight_log_address: 10.167.13.60
-  stacklight_log_hostname: log
-  stacklight_log_node01_address: 10.167.13.61
-  stacklight_log_node01_hostname: log01
-  stacklight_log_node02_address: 10.167.13.62
-  stacklight_log_node02_hostname: log02
-  stacklight_log_node03_address: 10.167.13.63
-  stacklight_log_node03_hostname: log03
-  stacklight_monitor_address: 10.167.13.70
-  stacklight_monitor_hostname: mon
-  stacklight_monitor_node01_address: 10.167.13.71
-  stacklight_monitor_node01_hostname: mon01
-  stacklight_monitor_node02_address: 10.167.13.72
-  stacklight_monitor_node02_hostname: mon02
-  stacklight_monitor_node03_address: 10.167.13.73
-  stacklight_monitor_node03_hostname: mon03
-  stacklight_telemetry_address: 10.167.13.85
-  stacklight_telemetry_hostname: mtr
-  stacklight_telemetry_node01_address: 10.167.13.86
-  stacklight_telemetry_node01_hostname: mtr01
-  stacklight_telemetry_node02_address: 10.167.13.87
-  stacklight_telemetry_node02_hostname: mtr02
-  stacklight_telemetry_node03_address: 10.167.13.88
-  stacklight_telemetry_node03_hostname: mtr03
-  stacklight_version: '2'
-  static_ips_on_deploy_network_enabled: 'False'
-  tenant_network_gateway: 10.167.14.253
-  tenant_network_netmask: 255.255.255.0
-  tenant_network_subnet: 10.167.14.0/24
-  tenant_vlan: '2411'
-  upstream_proxy_enabled: 'False'
-  use_default_network_scheme: 'True'
-  vnf_onboarding_enabled: 'False'
diff --git a/tcp_tests/templates/cookied-cicd-bm-k8s-contrail40-maas/salt-context-environment.yaml b/tcp_tests/templates/cookied-cicd-bm-k8s-contrail40-maas/salt-context-environment.yaml
deleted file mode 100644
index e9a7dda..0000000
--- a/tcp_tests/templates/cookied-cicd-bm-k8s-contrail40-maas/salt-context-environment.yaml
+++ /dev/null
@@ -1,154 +0,0 @@
-nodes:

-    mon01.bm-k8s-contrail.local:

-      reclass_storage_name: stacklight_server_node01

-      roles:

-      - stacklightv2_server_leader

-      - linux_system_codename_xenial

-      interfaces:

-        ens2:

-          role: single_dhcp

-        ens3:

-          role: single_ctl

-

-    mon02.bm-k8s-contrail.local:

-      reclass_storage_name: stacklight_server_node02

-      roles:

-      - stacklightv2_server

-      - linux_system_codename_xenial

-      interfaces:

-        ens2:

-          role: single_dhcp

-        ens3:

-          role: single_ctl

-

-    mon03.bm-k8s-contrail.local:

-      reclass_storage_name: stacklight_server_node03

-      roles:

-      - stacklightv2_server

-      - linux_system_codename_xenial

-      interfaces:

-        ens2:

-          role: single_dhcp

-        ens3:

-          role: single_ctl

-

-    mtr01.bm-k8s-contrail.local:

-      reclass_storage_name: stacklight_telemetry_node01

-      roles:

-      - stacklight_telemetry

-      - linux_system_codename_xenial

-      interfaces:

-        ens2:

-          role: single_dhcp

-        ens3:

-          role: single_ctl

-

-    mtr02.bm-k8s-contrail.local:

-      reclass_storage_name: stacklight_telemetry_node02

-      roles:

-      - stacklight_telemetry

-      - linux_system_codename_xenial

-      interfaces:

-        ens2:

-          role: single_dhcp

-        ens3:

-          role: single_ctl

-

-    mtr03.bm-k8s-contrail.local:

-      reclass_storage_name: stacklight_telemetry_node03

-      roles:

-      - stacklight_telemetry

-      - linux_system_codename_xenial

-      interfaces:

-        ens2:

-          role: single_dhcp

-        ens3:

-          role: single_ctl

-

-    log01.bm-k8s-contrail.local:

-      reclass_storage_name: stacklight_log_node01

-      roles:

-      - stacklight_log_leader_v2

-      - linux_system_codename_xenial

-      interfaces:

-        ens2:

-          role: single_dhcp

-        ens3:

-          role: single_ctl

-

-    log02.bm-k8s-contrail.local:

-      reclass_storage_name: stacklight_log_node02

-      roles:

-      - stacklight_log

-      - linux_system_codename_xenial

-      interfaces:

-        ens2:

-          role: single_dhcp

-        ens3:

-          role: single_ctl

-

-    log03.bm-k8s-contrail.local:

-      reclass_storage_name: stacklight_log_node03

-      roles:

-      - stacklight_log

-      - linux_system_codename_xenial

-      interfaces:

-        ens2:

-          role: single_dhcp

-        ens3:

-          role: single_ctl

-

-    cid01.bm-k8s-contrail.local:

-      reclass_storage_name: cicd_control_node01

-      roles:

-      - cicd_control_leader

-      - linux_system_codename_xenial

-      interfaces:

-        ens2:

-          role: single_dhcp

-        ens3:

-          role: single_ctl

-

-    cid02.bm-k8s-contrail.local:

-      reclass_storage_name: cicd_control_node02

-      roles:

-      - cicd_control_manager

-      - linux_system_codename_xenial

-      interfaces:

-        ens2:

-          role: single_dhcp

-        ens3:

-          role: single_ctl

-

-    cid03.bm-k8s-contrail.local:

-      reclass_storage_name: cicd_control_node03

-      roles:

-      - cicd_control_manager

-      - linux_system_codename_xenial

-      interfaces:

-        ens2:

-          role: single_dhcp

-        ens3:

-          role: single_ctl

-

-    prx01:

-      reclass_storage_name: kubernetes_proxy_node01

-      roles:

-      - kubernetes_proxy

-      - linux_system_codename_xenial

-      interfaces:

-        ens2:

-          role: single_dhcp

-        ens3:

-          role: single_ctl

-

-    prx02:

-      reclass_storage_name: kubernetes_proxy_node02

-      roles:

-      - kubernetes_proxy

-      - linux_system_codename_xenial

-      interfaces:

-        ens2:

-          role: single_dhcp

-        ens3:

-          role: single_ctl

diff --git a/tcp_tests/templates/cookied-cicd-bm-k8s-contrail40-maas/salt.yaml b/tcp_tests/templates/cookied-cicd-bm-k8s-contrail40-maas/salt.yaml
deleted file mode 100644
index 4fe952a..0000000
--- a/tcp_tests/templates/cookied-cicd-bm-k8s-contrail40-maas/salt.yaml
+++ /dev/null
@@ -1,82 +0,0 @@
-{% from 'cookied-cicd-bm-k8s-contrail40-maas/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-cicd-bm-k8s-contrail40-maas/underlay.yaml' import LAB_CONFIG_NAME with context %}
-{% from 'cookied-cicd-bm-k8s-contrail40-maas/underlay.yaml' import DOMAIN_NAME with context %}
-
-# Other salt model repository parameters see in shared-salt.yaml
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-- description: Wait for salt-master is ready after configdrive user-data
-  cmd: |
-    timeout 120 salt-call test.ping
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 5}
-  skip_fail: false
-
-{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
-
-- description: Prepare maas
-  cmd: |
-    salt-call state.sls maas.cluster;
-    salt-call state.sls maas.region;
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 5}
-  skip_fail: false
-
-- description: Generate a public key for machines in MAAS
-  cmd: |
-    ssh-keygen -y -f ~root/.ssh/id_rsa > ~root/.ssh/id_rsa.pub
-    maas mirantis sshkeys create key="$(cat ~root/.ssh/id_rsa.pub)"
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Run comissioning of BM nodes
-  cmd: |
-    salt-call maas.process_machines
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Wait for machines ready
-  cmd: |
-    salt-call maas.machines_status && timeout 120 salt-call state.sls maas.machines.wait_for_ready
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 7, delay: 5}
-  skip_fail: false
-
-- description: Enforce the interfaces configuration defined in the model for servers
-  cmd: |
-    salt-call state.sls maas.machines.assign_ip;
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Remove all the salt-minions and re-register the cfg01 minion
-  cmd: |
-    salt-key -y -D;
-    salt-call test.ping
-    sleep 5
-    # Check that the cfg01 is registered
-    salt-key | grep cfg01
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: provision the automatically commissioned physical nodes through MAAS
-  cmd: |
-    salt-call maas.deploy_machines;
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 3, delay: 5}
-  skip_fail: false
-
-- description: Wait for machines deployed
-  cmd: |
-    salt-call maas.machines_status && timeout 300 salt-call state.sls maas.machines.wait_for_deployed
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 6, delay: 5}
-  skip_fail: false
diff --git a/tcp_tests/templates/cookied-cicd-bm-k8s-contrail40-maas/underlay.yaml b/tcp_tests/templates/cookied-cicd-bm-k8s-contrail40-maas/underlay.yaml
deleted file mode 100644
index e40b708..0000000
--- a/tcp_tests/templates/cookied-cicd-bm-k8s-contrail40-maas/underlay.yaml
+++ /dev/null
@@ -1,129 +0,0 @@
-# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
-{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'proposed') %}
-
-#{% set DOMAIN_NAME = os_env('LAB_CONFIG_NAME', 'physical_mcp11_ovs_dpdk') + '.local' %}
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-cicd-bm-k8s-contrail40-maas') %}
-{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME + '.local') %}
-{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01') %}
-
-{% set ETH1_IP_ADDRESS_CFG01 = os_env('ETH1_IP_ADDRESS_CFG01', '172.17.41.3') %}
-
----
-aliases:
- - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
- - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
- - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
-
-
-template:
-  devops_settings:
-    env_name: {{ os_env('ENV_NAME', 'cookied-cicd-bm-k8s-contrail40-maas_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
-
-    address_pools:
-      admin-pool01:
-        net: {{ os_env('ADMIN_ADDRESS_POOL01', '172.17.41.0/26:26') }}
-        params:
-          ip_reserved:
-            gateway: '172.17.41.2'
-            l2_network_device: +61
-            default_{{ HOSTNAME_CFG01 }}: {{ ETH1_IP_ADDRESS_CFG01 }}
-          ip_ranges:
-            dhcp: [+2, -3]
-      private-pool01:
-        net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.167.13.0/24:24') }}
-        params:
-          ip_reserved:
-            gateway: +1
-            l2_network_device: +1
-          ip_ranges:
-            dhcp: [+2, -3]
-
-      tenant-pool01:
-        net: {{ os_env('TENANT_ADDRESS_POOL01', '10.167.14.0/24:24') }}
-        params:
-          ip_reserved:
-            gateway: +1
-            l2_network_device: +1
-          ip_ranges:
-            dhcp: [+2, -3]
-
-      external-pool01:
-        net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '172.17.41.128/26:26') }}
-        params:
-          ip_reserved:
-            gateway: +1
-            l2_network_device: -2
-          ip_ranges:
-            dhcp: [+2, -3]
-
-    groups:
-
-      - name: default
-        driver:
-          name: devops.driver.libvirt
-          params:
-            connection_string: !os_env CONNECTION_STRING, qemu:///system
-            storage_pool_name: !os_env STORAGE_POOL_NAME, default
-            stp: False
-            hpet: False
-            enable_acpi: true
-            use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
-
-        network_pools:
-          admin: admin-pool01
-
-        l2_network_devices:
-          admin:
-            address_pool: admin-pool01
-            dhcp: false
-            forward:
-              mode: bridge
-            parent_iface:
-              phys_dev: !os_env LAB_MANAGEMENT_IFACE
-          private:
-            address_pool: private-pool01
-            dhcp: false
-            forward:
-              mode: bridge
-            parent_iface:
-              phys_dev: !os_env LAB_CONTROL_IFACE
-
-        group_volumes:
-         - name: cfg01_day01_image               # Pre-configured day01 image
-           source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01') }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
-           format: qcow2
-
-        nodes:
-          - name: {{ HOSTNAME_CFG01 }}
-            role: salt_master
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 4
-              memory: !os_env SLAVE_NODE_MEMORY, 16384
-              boot:
-                - hd
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: cfg01_day01_image
-                  format: qcow2
-                - name: config
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-
-              interfaces:
-                - label: ens3
-                  l2_network_device: admin
-                  interface_model: *interface_model
-                - label: ens4
-                  l2_network_device: private
-                  interface_model: *interface_model
-
-              network_config:
-                ens3:
-                  networks:
-                    - admin
-                ens4:
-                  networks:
-                    - private
diff --git a/tcp_tests/templates/cookied-cicd-bm-os-contrail40-maas/lab04-physical-inventory.yaml b/tcp_tests/templates/cookied-cicd-bm-os-contrail40-maas/lab04-physical-inventory.yaml
deleted file mode 100644
index e2788ce..0000000
--- a/tcp_tests/templates/cookied-cicd-bm-os-contrail40-maas/lab04-physical-inventory.yaml
+++ /dev/null
@@ -1,126 +0,0 @@
-nodes:
-    cfg01.cookied-cicd-bm-os-contrail40-maas.local:
-      reclass_storage_name: infra_config_node01
-      roles:
-      - infra_config
-      - linux_system_codename_xenial
-      - features_runtest_cfg
-      interfaces:
-        ens3:
-          role: single_static_mgm
-        ens4:
-          role: single_static_ctl
-    # Physical nodes
-
-    kvm01.cookied-cicd-bm-os-contrail40-maas.local:
-      reclass_storage_name: infra_kvm_node01
-      roles:
-      - infra_kvm
-      - linux_system_codename_xenial
-      interfaces:
-        enp8s0f0:
-          role: single_mgm_dhcp
-        ens4f1:
-          role: bond_ctl_contrail_lacp
-        ens11f1:
-          role: bond_ctl_contrail_lacp
-
-    kvm02.cookied-cicd-bm-os-contrail40-maas.local:
-      reclass_storage_name: infra_kvm_node02
-      roles:
-      - infra_kvm
-      - linux_system_codename_xenial
-      interfaces:
-        enp8s0f0:
-          role: single_mgm_dhcp
-        ens4f1:
-          role: bond_ctl_contrail_lacp
-        ens11f1:
-          role: bond_ctl_contrail_lacp
-
-    kvm03.cookied-cicd-bm-os-contrail40-maas.local:
-      reclass_storage_name: infra_kvm_node03
-      roles:
-      - infra_kvm
-      - linux_system_codename_xenial
-      interfaces:
-        enp8s0f0:
-          role: single_mgm_dhcp
-        ens4f1:
-          role: bond_ctl_contrail_lacp
-        ens11f1:
-          role: bond_ctl_contrail_lacp
-
-    kvm04.cookied-cicd-bm-os-contrail40-maas.local:
-      reclass_storage_name: infra_kvm_node04
-      roles:
-      - infra_kvm_wo_gluster
-      - linux_system_codename_xenial
-      - salt_master_host
-      interfaces:
-        enp8s0f0:
-          role: single_mgm_dhcp
-        ens4f1:
-          role: bond_ctl_contrail_lacp
-        ens11f1:
-          role: bond_ctl_contrail_lacp
-
-    kvm05.cookied-cicd-bm-os-contrail40-maas.local:
-      reclass_storage_name: infra_kvm_node05
-      roles:
-      - infra_kvm_wo_gluster
-      - linux_system_codename_xenial
-      - salt_master_host
-      interfaces:
-        enp8s0f0:
-          role: single_mgm_dhcp
-        ens4f1:
-          role: bond_ctl_contrail_lacp
-        ens11f1:
-          role: bond_ctl_contrail_lacp
-
-    kvm06.cookied-cicd-bm-os-contrail40-maas.local:
-      reclass_storage_name: infra_kvm_node06
-      roles:
-      - infra_kvm_wo_gluster
-      - linux_system_codename_xenial
-      - salt_master_host
-      interfaces:
-        enp8s0f0:
-          role: single_mgm_dhcp
-        ens4f1:
-          role: bond_ctl_contrail_lacp
-        ens11f1:
-          role: bond_ctl_contrail_lacp
-
-    osd<<count>>:
-      reclass_storage_name: ceph_osd_rack01
-      roles:
-      - ceph_osd
-      - linux_system_codename_xenial
-      interfaces:
-        eno1:
-          role: single_dhcp
-        ens1f1:
-          role: bond_ctl_contrail_lacp
-        ens2f1:
-          role: bond_ctl_contrail_lacp
-#          role: bond0_ab_vlan_ceph_storage_backend
-# todo: add storage net for ceph to second lacp bond
-
-    cmp<<count>>:
-      reclass_storage_name: openstack_compute_rack01
-      roles:
-      - openstack_compute
-      - linux_system_codename_xenial
-      interfaces:
-        eno1:
-          role: single_dhcp
-        ens1f0:
-          role: bond_ctl_contrail_lacp
-        ens1f1:
-          role: bond_contrail_lacp
-        ens2f0:
-          role: bond_ctl_contrail_lacp
-        ens2f1:
-          role: bond_contrail_lacp
diff --git a/tcp_tests/templates/cookied-cicd-bm-os-contrail40-maas/salt-context-cookiecutter-contrail.yaml b/tcp_tests/templates/cookied-cicd-bm-os-contrail40-maas/salt-context-cookiecutter-contrail.yaml
deleted file mode 100644
index 5116cd7..0000000
--- a/tcp_tests/templates/cookied-cicd-bm-os-contrail40-maas/salt-context-cookiecutter-contrail.yaml
+++ /dev/null
@@ -1,504 +0,0 @@
-default_context:
-  backup_private_key: |-
-    -----BEGIN RSA PRIVATE KEY-----
-    MIIEpAIBAAKCAQEApq5WxkagvkNWO85FtS1ByHDKkNWhmFdpY9D49dZrSwuE9XGQ
-    +WW79F2AGwKki2N2j1iyfpMEKRIEIb/5cbl6fZzTGTndhd7Jxkx6xGdhZkX9VM6N
-    qotaO4ckj7NsfiZKTwp58/YSRkz3Ii1XPpt0NQqZLuNAwus4Bl9e1Wk5dNw+gHN3
-    m4JmAczJbQ81lrQURC7f3d2xjoFkXWXC2FKkMS6AOl1j87ATeeSG9xeHLbOvIyBw
-    7IwP9MFA5vUtHl8DzsdmzWmVRabe2VMtGa1Ya5JTTgK8nXmtYW3dvEQ/DtgzcKPJ
-    2fO31cze9LRpDSS0E6d/cISBgzsPfBJuUCGHTQIDAQABAoIBAQCmFVVVoA6PRt1o
-    HjMLQpsntGvDQXsRJxhWY2WO4CZs0n+baZvBRgOwjHIXd9ypH2SFlSXWRXuByPfh
-    AT72eJB7FYaqviPjPojjVFWH2lMM63RvypkSdGRmqFRf87KJSHIGrDO0SV8QOaSO
-    o4spURDLwVG9jKd9EY/zmZgPIhgkPazzVrFoGr8YnKE6qSJh5HivscNl8D3+36SN
-    5uhuElzBTNGd2iU4elLJIGjahetIalEZqL0Fvi1ZzAWoK0YXDmbI8uG8/epJ5Sy4
-    XyyHc7+0Jvm1JWwXczdDFuy+RlL9r66Ja8V9MauuJyigOKnNOJhE2b5/klEcczhC
-    AHA/Hw4pAoGBANcJ/gdouXgcuq3JNXq5Cb4w9lvZbDwQdEtY3+qdHAVndomoGsDT
-    USKq6ZRZzkAAnjiN2YywAQzqFGevoYig+WNLTPd2TdNdlNHfw9Wc4G2iSFb1pIr2
-    uoJ+TQGv4Ck/7LS2NVnWfqNoeo8Iq+Wvnh+F3twv0UIazGI8Bj/xLxvrAoGBAMZu
-    QErf3vzbY4g50HFVbPNi2Nl63A7/P421pEe4JAT1clwIVMyntRpNdVyHKkkKdDWr
-    98tBOhf71+shgsVPEMkfPyZ2nuiBit7LzZ+EAztG9i3hhm8yIUPXoipo0YCOe+yF
-    r+r03pX97aciXuRMPmMTHH6N1vFaUXHSgVs6Y7OnAoGAP4v1ZO0eug8LX6XxRuX9
-    qhXAB96VrJ5UL5wA980b5cDwd7eUyFzqQittwWhUmfdUynOo0XmFpfJau1VckAq6
-    CAzNnud4Ejk6bFcLAUpNzDhD1mbbDDHjZgK68P+vZ6E7ax/ZXkYTwGh0p2Yxnjuq
-    p7gg5sK+vSE8Ot9wHV9Bw6cCgYEAguPq6PjvgF+/Mfbg9kFhUtKbNCoEyqe4ZmOw
-    79YZfGPjga3FMhJWNfluNxC55eBNc7HyDFMEXRm0/dbnCfvzmJdR8q9AdyIsVnad
-    NmHAN/PBI9al9OdeZf/xaoQl3eUe/Y/Z0OShhtMvVpYnffSFGplarGgnpqDrJGe1
-    CFZlufUCgYBemuy+C6gLwTOzhcTcCo4Ir5ZiKcXAE6ufk8OIdGnMWJcmTxxmIMY6
-    XyKu0oobWpOBXPiipQ6TmDpI+flxWYRHwPFFzPa+jhCtupRuTdORKrklV2UfdIWZ
-    N4e+J2yCu7lyz0upwa3MkFIVQ1ez0o8X9NRvAz243qi64y1+KOMPmQ==
-    -----END RSA PRIVATE KEY-----
-  backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCmrlbGRqC+Q1Y7zkW1LUHIcMqQ1aGYV2lj0Pj11mtLC4T1cZD5Zbv0XYAbAqSLY3aPWLJ+kwQpEgQhv/lxuXp9nNMZOd2F3snGTHrEZ2FmRf1Uzo2qi1o7hySPs2x+JkpPCnnz9hJGTPciLVc+m3Q1Cpku40DC6zgGX17VaTl03D6Ac3ebgmYBzMltDzWWtBRELt/d3bGOgWRdZcLYUqQxLoA6XWPzsBN55Ib3F4cts68jIHDsjA/0wUDm9S0eXwPOx2bNaZVFpt7ZUy0ZrVhrklNOArydea1hbd28RD8O2DNwo8nZ87fVzN70tGkNJLQTp39whIGDOw98Em5QIYdN
-  bmk_enabled: 'False'
-  cicd_control_node01_address: 10.167.8.91
-  cicd_control_node01_hostname: cid01
-  cicd_control_node02_address: 10.167.8.92
-  cicd_control_node02_hostname: cid02
-  cicd_control_node03_address: 10.167.8.93
-  cicd_control_node03_hostname: cid03
-  cicd_control_vip_address: 10.167.8.90
-  cicd_control_vip_hostname: cid
-  cicd_enabled: 'True'
-  cicd_private_key: |-
-    -----BEGIN RSA PRIVATE KEY-----
-    MIIEowIBAAKCAQEAuBC224XQZFyzqC56EyS7yr/rlpRRYsr2vji77faoWQFmgYbZ
-    oeyqqqm8eSN0Cc0wAnxWsQ7H3ZN9uTnyWVrsogs1vx8597iorZAT4Mu6JDbkWlZh
-    IUHo9P9itWJdUWpxjDavqIvjZo+DmOO1mfv9K1asP8COanQEsgHSyuf+XKMBg0ko
-    kEammAUtS9HRxCAJ47QgLPSCFij5ih/MRWY3HWFUFEF3gRdUodWmeJNmW+7JH7T2
-    wId1kn8oRya7eadKxd6wEaCGm5ILXwwVFmFkOGlEeC8wHnbkatd/A53DxzUfOHBi
-    27Gaf83DPxKqDWW0aAh7b49EnFhdkuF3ZyXbYwIDAQABAoIBAFtioQbYpyBNDj2f
-    5af/guUk6Di4pregAWVsEZIR9n9KPLRuWTsVn55f611Rhtke8IkrZnc92WlfQvpl
-    lLdcd0P0wNiFDmi5W7XgZJ4lR+OXBUT8wfibGqgY688WaTJ04K82r3vFCD/xXOrZ
-    k15CR+3ueFKmrY6Yz4P5d8iZ6iXfR47ZYm+wdmx3vmJ+IVfZCRRPAGP25GxqsOs5
-    3qMl9hV7a1MGVVaVPmVzrq0Xzk6IAW2+0p5udGmezn4y6HFPIvOriUVUkni3mNjX
-    dokrETqVbOjkdFkSw28cMBfP/tO3vyfGh5VX24xvRztWtcpAm6Qr5lKEDSvFv13r
-    0z/DxRECgYEA8oZ4+w2cqLJz91fKpWutGZKj4m/HEY6FZfjVflsTT2bKTt+nTtRY
-    qAeKGYIbrjZMAyy4dG+RgW7WORFcRHFyeSrS5Aw51zO+JQ0KzuBv83UqcbqNLcsz
-    BAPHPk/7f30W4wuInqgXrWMTiGePz0hQsvNU6aR7MH4Sd2C0ot4W+00CgYEAwkq+
-    UtugC8ywK+F0xZvjXHi3VJRJZf4WLtRxZGy8CimaritSKpZZRG23Sk0ifDE6+4fD
-    VtxeTfTmeZBictg/fEAPVHzhsNPNyDMA8t7t4ZKmMX9DNYAqVX21s5YQ9encH6KT
-    1q0NRpjvw7QzhfbFfsxeAxHKZFbFlVmROplF+W8CgYAWHVz6x4r5dwxMCZ1Y6DCo
-    nE6FX1vvpedUHRSaqQNhwiXAe3RuI77R054sJUkQ4bKct386XtIN02WFXqfjNdUS
-    Z21DjjnX/cfg6QeLRbvvn0d3h2NIQbctLosEi5aLUYS8v1h93yYJkXc+gPMEG7wA
-    FWAwzebNzTEx4YeXMlk2IQKBgCt8JxTMawm5CkUH9Oa1eTGdIwsfFT5qm/RnP+nG
-    HF/559DLiVxWwiv6kmdi1DEPo6/gNuwd7k1sXpkeo6oolCzu+X9jY+/7t7bzE2dI
-    Vd2CwQebACPdR5xSwnQrRiiD6ux5qrUFjk8as68NieqVzKYQf4oYVUAX26kNnt+K
-    poqpAoGBAINHTGBFVK3XC+fCbu7rhFS8wZAjBmvEDHGnUBp19JREEr3q7a2D84T3
-    17zo0bwxL09QFnOCDDJcXsh8eGbCONV0hJvJU2o7wGol+lRFSd+v6WYZ37bPEyEx
-    l8kv0xXAElriC1RE1CNtvoOn/uxyRs+2OnNgBVxtAGqUWVdpm6CD
-    -----END RSA PRIVATE KEY-----
-  cicd_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC4ELbbhdBkXLOoLnoTJLvKv+uWlFFiyva+OLvt9qhZAWaBhtmh7Kqqqbx5I3QJzTACfFaxDsfdk325OfJZWuyiCzW/Hzn3uKitkBPgy7okNuRaVmEhQej0/2K1Yl1RanGMNq+oi+Nmj4OY47WZ+/0rVqw/wI5qdASyAdLK5/5cowGDSSiQRqaYBS1L0dHEIAnjtCAs9IIWKPmKH8xFZjcdYVQUQXeBF1Sh1aZ4k2Zb7skftPbAh3WSfyhHJrt5p0rF3rARoIabkgtfDBUWYWQ4aUR4LzAeduRq138DncPHNR84cGLbsZp/zcM/EqoNZbRoCHtvj0ScWF2S4XdnJdtj
-  cluster_domain: cookied-bm-4.0-contrail.local
-  cluster_name: cookied-bm-4.0-contrail
-  opencontrail_version: 4.1
-  linux_repo_contrail_component: oc41
-  compute_bond_mode: active-backup
-  compute_padding_with_zeros: 'True'
-  compute_primary_first_nic: eth1
-  compute_primary_second_nic: eth2
-  context_seed: TFWH0xgUevQkslwhbWVedwwYhBtImHLiGUIExjT9ahxPAUBHh9Kg3QSAIrqTqtvk
-  control_network_netmask: 255.255.255.0
-  control_network_subnet: 10.167.8.0/24
-  control_vlan: '2422'
-  tenant_vlan: '2423'
-  backend_vlan: '2424'
-  storage_vlan: '2425'  # not implemented yet, placeholder
-  cookiecutter_template_branch: ''
-  cookiecutter_template_credentials: gerrit
-  cookiecutter_template_url: https://gerrit.mcp.mirantis.com/mk/cookiecutter-templates.git
-  deploy_network_gateway: 172.16.49.65
-  deploy_network_netmask: 255.255.255.192
-  deploy_network_subnet: 172.16.49.64/26
-  deployment_type: physical
-  dns_server01: 172.18.208.44
-  dns_server02: 172.18.176.6
-  email_address: sgudz@mirantis.com
-  infra_bond_mode: active-backup
-  infra_deploy_nic: eth0
-  infra_kvm01_control_address: 10.167.8.241
-  infra_kvm01_deploy_address: 172.16.49.67
-  infra_kvm01_hostname: kvm01
-  infra_kvm02_control_address: 10.167.8.242
-  infra_kvm02_deploy_address: 172.16.49.68
-  infra_kvm02_hostname: kvm02
-  infra_kvm03_control_address: 10.167.8.243
-  infra_kvm03_deploy_address: 172.16.49.69
-  infra_kvm03_hostname: kvm03
-  infra_kvm04_control_address: 10.167.8.244
-  infra_kvm04_deploy_address: 172.16.49.70
-  infra_kvm04_hostname: kvm04
-  infra_kvm05_control_address: 10.167.8.245
-  infra_kvm05_deploy_address: 172.16.49.71
-  infra_kvm05_hostname: kvm05
-  infra_kvm06_control_address: 10.167.8.246
-  infra_kvm06_deploy_address: 172.16.49.72
-  infra_kvm06_hostname: kvm06
-  infra_kvm_vip_address: 10.167.8.240
-  infra_primary_first_nic: eth1
-  infra_primary_second_nic: eth2
-  internal_proxy_enabled: 'False'
-  kqueen_custom_mail_enabled: 'False'
-  kqueen_enabled: 'False'
-  kubernetes_enabled: 'False'
-  local_repositories: 'False'
-  maas_enabled: 'True'
-  maas_deploy_address: 172.16.49.66
-  maas_deploy_cidr: 172.16.49.64/26
-  maas_deploy_gateway: 172.16.49.65
-  maas_deploy_range_end: 172.16.49.119
-  maas_deploy_range_start: 172.16.49.78
-  maas_deploy_vlan: '0'
-  maas_dhcp_enabled: 'True'
-  maas_fabric_name: fabric-0
-  maas_hostname: cfg01
-  maas_manage_deploy_network: 'True'
-  maas_machines: |
-        kvm01: # cz8062-kvm.host-telecom.com
-          distro_series: "xenial"
-          # hwe_kernel: "hwe-16.04"
-          # pxe_interface_mac:
-          pxe_interface_mac: "0c:c4:7a:a8:d3:44"
-          interfaces:
-            enp8s0f0:
-              mac: "0c:c4:7a:a8:d3:44"
-              mode: "static"
-              ip: "172.16.49.67"
-              subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
-              gateway: ${_param:deploy_network_gateway}
-          power_parameters:
-            power_address: "5.43.227.106"
-            power_pass: ==IPMI_PASS==
-            power_type: ipmi
-            power_user: ==IPMI_USER==
-        kvm02: # #cz8063-kvm.host-telecom.com
-          distro_series: "xenial"
-          # hwe_kernel: "hwe-16.04"
-          pxe_interface_mac: "0c:c4:7a:a8:b8:18"
-          interfaces:
-            enp8s0f0:
-              mac: "0c:c4:7a:a8:b8:18"
-              mode: "static"
-              ip: "172.16.49.68"
-              subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
-              gateway: ${_param:deploy_network_gateway}
-          power_parameters:
-            power_address: "5.43.227.107"
-            power_pass: ==IPMI_PASS==
-            power_type: ipmi
-            power_user: ==IPMI_USER==
-        kvm03: # #cz8064-kvm.host-telecom.com
-          distro_series: "xenial"
-          # hwe_kernel: "hwe-16.04"
-          pxe_interface_mac: "0c:c4:7a:a8:d0:40"
-          interfaces:
-            enp8s0f0:
-              mac: "0c:c4:7a:a8:d0:40"
-              mode: "static"
-              ip: "172.16.49.69"
-              subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
-              gateway: ${_param:deploy_network_gateway}
-          power_parameters:
-            power_address: "5.43.227.108"
-            power_pass: ==IPMI_PASS==
-            power_type: ipmi
-            power_user: ==IPMI_USER==
-        kvm04: # cz8065-kvm.host-telecom.com
-          distro_series: "xenial"
-          # hwe_kernel: "hwe-16.04"
-          # pxe_interface_mac:
-          pxe_interface_mac: "0c:c4:7a:a8:b8:22"
-          interfaces:
-            enp8s0f0:
-              mac: "0c:c4:7a:a8:b8:22"
-              mode: "static"
-              ip: "172.16.49.70"
-              subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
-              gateway: ${_param:deploy_network_gateway}
-          power_parameters:
-            power_address: "5.43.227.110"
-            power_pass: ==IPMI_PASS==
-            power_type: ipmi
-            power_user: ==IPMI_USER==
-        kvm05: # #cz8066-kvm.host-telecom.com
-          distro_series: "xenial"
-          # hwe_kernel: "hwe-16.04"
-          pxe_interface_mac: "0c:c4:7a:a8:b8:1a"
-          interfaces:
-            enp8s0f0:
-              mac: "0c:c4:7a:a8:b8:1a"
-              mode: "static"
-              ip: "172.16.49.71"
-              subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
-              gateway: ${_param:deploy_network_gateway}
-          power_parameters:
-            power_address: "5.43.227.111"
-            power_pass: ==IPMI_PASS==
-            power_type: ipmi
-            power_user: ==IPMI_USER==
-        kvm06: # #cz8067-kvm.host-telecom.com
-          distro_series: "xenial"
-          # hwe_kernel: "hwe-16.04"
-          pxe_interface_mac: "0c:c4:7a:a8:b8:1c"
-          interfaces:
-            enp8s0f0:
-              mac: "0c:c4:7a:a8:b8:1c"
-              mode: "static"
-              ip: "172.16.49.72"
-              subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
-              gateway: ${_param:deploy_network_gateway}
-          power_parameters:
-            power_address: "5.43.227.112"
-            power_pass: ==IPMI_PASS==
-            power_type: ipmi
-            power_user: ==IPMI_USER==
-        osd001: # #cz5272-kvm.host-telecom.com
-          distro_series: "xenial"
-          # hwe_kernel: "hwe-16.04"
-          pxe_interface_mac: "0c:c4:7a:aa:51:f8"
-          interfaces:
-            eno1:
-              mac: "0c:c4:7a:aa:51:f8"
-              mode: "static"
-              ip: "172.16.49.73"
-              subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
-              gateway: ${_param:deploy_network_gateway}
-          power_parameters:
-            power_address: "5.43.225.182"
-            power_pass: ==IPMI_PASS==
-            power_type: ipmi
-            power_user: ==IPMI_USER==
-        osd002: # #cz7857-kvm.host-telecom.com
-          distro_series: "xenial"
-          # hwe_kernel: "hwe-16.04"
-          pxe_interface_mac: "0c:c4:7a:6d:3a:80"
-          interfaces:
-            eno1:
-              mac: "0c:c4:7a:6d:3a:80"
-              mode: "static"
-              ip: "172.16.49.74"
-              subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
-              gateway: ${_param:deploy_network_gateway}
-          power_parameters:
-            power_address: "5.43.225.199"
-            power_pass: ==IPMI_PASS==
-            power_type: ipmi
-            power_user: ==IPMI_USER==
-        osd003: # #cz7787-kvm.host-telecom.com
-          distro_series: "xenial"
-          # hwe_kernel: "hwe-16.04"
-          pxe_interface_mac: "0c:c4:7a:6b:f7:7a"
-          interfaces:
-            eno1:
-              mac: "0c:c4:7a:6b:f7:7a"
-              mode: "static"
-              ip: "172.16.49.75"
-              subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
-              gateway: ${_param:deploy_network_gateway}
-          power_parameters:
-            power_address: "5.43.225.123"
-            power_pass: ==IPMI_PASS==
-            power_type: ipmi
-            power_user: ==IPMI_USER==
-        cmp001: # #cz7987-kvm.host-telecom.com
-          distro_series: "xenial"
-          # hwe_kernel: "hwe-16.04"
-          pxe_interface_mac: "0c:c4:7a:a8:72:ac"
-          interfaces:
-            eno1:
-              mac: "0c:c4:7a:a8:72:ac"
-              mode: "static"
-              ip: "172.16.49.76"
-              subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
-              gateway: ${_param:deploy_network_gateway}
-          power_parameters:
-            power_address: "5.43.225.181"
-            power_pass: ==IPMI_PASS==
-            power_type: ipmi
-            power_user: ==IPMI_USER==
-        cmp002: # cz7842-kvm.host-telecom.com
-          distro_series: "xenial"
-          # hwe_kernel: "hwe-16.04"
-          pxe_interface_mac: "0c:c4:7a:6d:3a:c6"
-          interfaces:
-            eno1:
-              mac: "0c:c4:7a:6d:3a:c6"
-              mode: "static"
-              ip: "172.16.49.77"
-              subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
-              gateway: ${_param:deploy_network_gateway}
-          power_parameters:
-            power_address: "5.43.225.201"
-            power_pass: ==IPMI_PASS==
-            power_type: ipmi
-            power_user: ==IPMI_USER==
-
-  mcp_common_scripts_branch: ''
-  mcp_version: proposed
-  offline_deployment: 'False'
-  opencontrail_analytics_address: 10.167.8.30
-  opencontrail_analytics_hostname: nal
-  opencontrail_analytics_node01_address: 10.167.8.31
-  opencontrail_analytics_node01_hostname: nal01
-  opencontrail_analytics_node02_address: 10.167.8.32
-  opencontrail_analytics_node02_hostname: nal02
-  opencontrail_analytics_node03_address: 10.167.8.33
-  opencontrail_analytics_node03_hostname: nal03
-  opencontrail_compute_iface_mask: '24'
-  opencontrail_control_address: 10.167.8.20
-  opencontrail_control_hostname: ntw
-  opencontrail_control_node01_address: 10.167.8.21
-  opencontrail_control_node01_hostname: ntw01
-  opencontrail_control_node02_address: 10.167.8.22
-  opencontrail_control_node02_hostname: ntw02
-  opencontrail_control_node03_address: 10.167.8.23
-  opencontrail_control_node03_hostname: ntw03
-  opencontrail_enabled: 'True'
-  opencontrail_router01_address: 10.167.8.220
-  opencontrail_router01_hostname: rtr01
-  opencontrail_router02_address: 10.167.8.101
-  opencontrail_router02_hostname: rtr02
-  openssh_groups: ''
-  openstack_benchmark_node01_address: 10.167.8.95
-  openstack_benchmark_node01_hostname: bmk01
-  openstack_cluster_size: compact
-  openstack_compute_count: '2'
-  openstack_compute_rack01_hostname: cmp
-  openstack_compute_single_address_ranges: 10.167.8.101-10.167.8.102
-  openstack_compute_deploy_address_ranges: 172.16.49.76-172.16.49.77
-  openstack_compute_tenant_address_ranges: 10.167.10.101-10.167.10.102
-  openstack_compute_backend_address_ranges: 10.167.10.101-10.167.10.102
-  openstack_control_address: 10.167.8.10
-  openstack_control_hostname: ctl
-  openstack_control_node01_address: 10.167.8.11
-  openstack_control_node01_hostname: ctl01
-  openstack_control_node02_address: 10.167.8.12
-  openstack_control_node02_hostname: ctl02
-  openstack_control_node03_address: 10.167.8.13
-  openstack_control_node03_hostname: ctl03
-  openstack_database_address: 10.167.8.50
-  openstack_database_hostname: dbs
-  openstack_database_node01_address: 10.167.8.51
-  openstack_database_node01_hostname: dbs01
-  openstack_database_node02_address: 10.167.8.52
-  openstack_database_node02_hostname: dbs02
-  openstack_database_node03_address: 10.167.8.53
-  openstack_database_node03_hostname: dbs03
-  openstack_enabled: 'True'
-  openstack_message_queue_address: 10.167.8.40
-  openstack_message_queue_hostname: msg
-  openstack_message_queue_node01_address: 10.167.8.41
-  openstack_message_queue_node01_hostname: msg01
-  openstack_message_queue_node02_address: 10.167.8.42
-  openstack_message_queue_node02_hostname: msg02
-  openstack_message_queue_node03_address: 10.167.8.43
-  openstack_message_queue_node03_hostname: msg03
-  openstack_network_engine: opencontrail
-  openstack_neutron_bgp_vpn: 'False'
-  openstack_neutron_bgp_vpn_driver: bagpipe
-  openstack_nfv_dpdk_enabled: 'False'
-  openstack_nfv_sriov_enabled: 'False'
-  openstack_nova_compute_nfv_req_enabled: 'False'
-  openstack_nova_compute_reserved_host_memory_mb: '900'
-  openstack_proxy_address: 10.167.8.80
-  openstack_proxy_hostname: prx
-  openstack_proxy_node01_address: 10.167.8.81
-  openstack_proxy_node01_hostname: prx01
-  openstack_proxy_node02_address: 10.167.8.82
-  openstack_proxy_node02_hostname: prx02
-  openstack_upgrade_node01_address: 10.167.8.19
-  openstack_version: pike
-  oss_enabled: 'False'
-  oss_node03_address: ${_param:stacklight_monitor_node03_address}
-  oss_webhook_app_id: '24'
-  oss_webhook_login_id: '13'
-  platform: openstack_enabled
-  public_host: ${_param:openstack_proxy_address}
-  publication_method: email
-  reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
-  salt_api_password: BNRhXeGFdgVNx0Ikm2CAMw7eyeHf4grH
-  salt_api_password_hash: $6$jriFnsbZ$eon54Ts/Kn4ywKpexe/W8srpBF64cxr2D8jd0RzTH8zdZVjS3viYt64m1d1VlXenurwpcGLkGzaGmOI0dlOox0
-  salt_master_address: 10.167.8.15
-  salt_master_hostname: cfg01
-  salt_master_management_address: 172.16.49.66
-  shared_reclass_branch: ''
-  shared_reclass_url: https://gerrit.mcp.mirantis.com/salt-models/reclass-system.git
-  stacklight_enabled: 'True'
-  stacklight_log_address: 10.167.8.60
-  stacklight_log_hostname: log
-  stacklight_log_node01_address: 10.167.8.61
-  stacklight_log_node01_hostname: log01
-  stacklight_log_node02_address: 10.167.8.62
-  stacklight_log_node02_hostname: log02
-  stacklight_log_node03_address: 10.167.8.63
-  stacklight_log_node03_hostname: log03
-  stacklight_long_term_storage_type: prometheus
-  stacklight_monitor_address: 10.167.8.70
-  stacklight_monitor_hostname: mon
-  stacklight_monitor_node01_address: 10.167.8.71
-  stacklight_monitor_node01_hostname: mon01
-  stacklight_monitor_node02_address: 10.167.8.72
-  stacklight_monitor_node02_hostname: mon02
-  stacklight_monitor_node03_address: 10.167.8.73
-  stacklight_monitor_node03_hostname: mon03
-  stacklight_telemetry_address: 10.167.8.85
-  stacklight_telemetry_hostname: mtr
-  stacklight_telemetry_node01_address: 10.167.8.86
-  stacklight_telemetry_node01_hostname: mtr01
-  stacklight_telemetry_node02_address: 10.167.8.87
-  stacklight_telemetry_node02_hostname: mtr02
-  stacklight_telemetry_node03_address: 10.167.8.88
-  stacklight_telemetry_node03_hostname: mtr03
-  stacklight_version: '2'
-  static_ips_on_deploy_network_enabled: 'False'
-  tenant_network_gateway: 10.167.10.253
-  tenant_network_netmask: 255.255.255.0
-  tenant_network_subnet: 10.167.10.0/24
-  upstream_proxy_enabled: 'False'
-  use_default_network_scheme: 'True'
-  openldap_domain: cookied-bm-4.0-contrail.local
-  openldap_enabled: 'True'
-  openldap_organisation: ${_param:cluster_name}
-  ceph_enabled: 'True'
-  ceph_version: "luminous"
-  ceph_hyper_converged: "False"
-  ceph_osd_backend: "bluestore"
-  ceph_osd_count: "3"
-  ceph_osd_node_count: 3
-  ceph_osd_block_db_size: 20
-  ceph_osd_journal_size: 20
-  ceph_osd_bond_mode: "active-backup"
-  ceph_osd_data_partition_prefix: ""
-
-  ceph_public_network_allocation: storage
-  ceph_public_network: "10.167.8.0/24"
-  ceph_cluster_network: "10.167.8.0/24"
-
-# for 2018.11.0+
-  ceph_osd_single_address_ranges: "10.167.8.200-10.167.8.202"
-  ceph_osd_deploy_address_ranges: "172.16.49.73-172.16.49.75"
-  ceph_osd_storage_address_ranges: "10.167.8.200-10.167.8.202"
-  ceph_osd_backend_address_ranges: "10.167.10.200-10.167.10.202"
-
-  ceph_osd_data_disks: "/dev/sdb"
-  ceph_osd_journal_or_block_db_disks: "/dev/sdb"
-  ceph_osd_mode: "separated"
-  ceph_osd_deploy_nic: "eth0"
-  ceph_osd_primary_first_nic: "eth1"
-  ceph_osd_primary_second_nic: "eth2"
-  #ceph_mon_node01_address: "172.16.47.66"
-  #ceph_mon_node01_deploy_address: "172.16.48.66"
-  ceph_mon_node01_address: "10.167.8.66"
-  ceph_mon_node01_hostname: "cmn01"
-  #ceph_mon_node02_address: "172.16.47.67"
-  #ceph_mon_node02_deploy_address: "172.16.48.67"
-  ceph_mon_node02_address: "10.167.8.67"
-  ceph_mon_node02_hostname: "cmn02"
-  #ceph_mon_node03_address: "172.16.47.68"
-  #ceph_mon_node03_deploy_address: "172.16.48.68"
-  ceph_mon_node03_address: "10.167.8.68"
-  ceph_mon_node03_hostname: "cmn03"
-  #ceph_rgw_address: "172.16.47.75"
-  ceph_rgw_address: "10.167.8.75"
-  #ceph_rgw_node01_address: "172.16.47.76"
-  #ceph_rgw_node01_deploy_address: "172.16.48.76"
-  ceph_rgw_node01_address: "10.167.8.76"
-  ceph_rgw_node01_hostname: "rgw01"
-  #ceph_rgw_node02_address: "172.16.47.77"
-  #ceph_rgw_node02_deploy_address: "172.16.48.77"
-  ceph_rgw_node02_address: "10.167.8.77"
-  ceph_rgw_node02_hostname: "rgw02"
-  #ceph_rgw_node03_address: "172.16.47.78"
-  #ceph_rgw_node03_deploy_address: "172.16.48.78"
-  ceph_rgw_node03_address: "10.167.8.78"
-  ceph_rgw_node03_hostname: "rgw03"
-  manila_enabled: 'False'
-  barbican_enabled: 'False'
-  barbican_integration_enabled: 'False'
-  # SSL settings
-  nova_vnc_tls_enabled: 'True'
-  galera_ssl_enabled: 'True'
-  openstack_mysql_x509_enabled: 'True'
-  rabbitmq_ssl_enabled: 'True'
-  openstack_rabbitmq_x509_enabled: 'True'
-  openstack_internal_protocol: 'https'
-  cinder_backup_engine: 'ceph'
-  cinder_ceph_backup_pool_name: 'backups'
diff --git a/tcp_tests/templates/cookied-cicd-bm-os-contrail40-maas/salt-context-environment.yaml b/tcp_tests/templates/cookied-cicd-bm-os-contrail40-maas/salt-context-environment.yaml
deleted file mode 100644
index 5db9637..0000000
--- a/tcp_tests/templates/cookied-cicd-bm-os-contrail40-maas/salt-context-environment.yaml
+++ /dev/null
@@ -1,397 +0,0 @@
-nodes:

-    # Virtual Control Plane nodes

-    cid01.cookied-bm-mcp-ocata-contrail.local:

-      reclass_storage_name: cicd_control_node01

-      roles:

-      - cicd_control_leader

-      - linux_system_codename_xenial

-      interfaces:

-        ens2:
-          role: single_dhcp
-        ens3:

-          role: single_ctl

-

-    cid02.cookied-bm-mcp-ocata-contrail.local:

-      reclass_storage_name: cicd_control_node02

-      roles:

-      - cicd_control_manager

-      - linux_system_codename_xenial

-      interfaces:

-        ens2:
-          role: single_dhcp
-        ens3:

-          role: single_ctl

-

-    cid03.cookied-bm-mcp-ocata-contrail.local:

-      reclass_storage_name: cicd_control_node03

-      roles:

-      - cicd_control_manager

-      - linux_system_codename_xenial

-      interfaces:

-        ens2:
-          role: single_dhcp
-        ens3:

-          role: single_ctl
-

-    ctl01.cookied-bm-mcp-ocata-contrail.local:

-      reclass_storage_name: openstack_control_node01

-      roles:

-      - openstack_control_leader

-      - linux_system_codename_xenial

-      classes:
-      - system.linux.system.repo.mcp.apt_mirantis.docker
-      interfaces:

-        ens2:
-          role: single_dhcp
-        ens3:

-          role: single_ctl

-

-    ctl02.cookied-bm-mcp-ocata-contrail.local:

-      reclass_storage_name: openstack_control_node02

-      roles:

-      - openstack_control

-      - linux_system_codename_xenial

-      interfaces:

-        ens2:
-          role: single_dhcp
-        ens3:

-          role: single_ctl

-

-    ctl03.cookied-bm-mcp-ocata-contrail.local:

-      reclass_storage_name: openstack_control_node03

-      roles:

-      - openstack_control

-      - linux_system_codename_xenial

-      interfaces:

-        ens2:
-          role: single_dhcp
-        ens3:

-          role: single_ctl

-

-    dbs01.cookied-bm-mcp-ocata-contrail.local:

-      reclass_storage_name: openstack_database_node01

-      roles:

-      - openstack_database_leader

-      - linux_system_codename_xenial

-      interfaces:

-        ens2:
-          role: single_dhcp
-        ens3:

-          role: single_ctl

-

-    dbs02.cookied-bm-mcp-ocata-contrail.local:

-      reclass_storage_name: openstack_database_node02

-      roles:

-      - openstack_database

-      - linux_system_codename_xenial

-      interfaces:

-        ens2:
-          role: single_dhcp
-        ens3:

-          role: single_ctl

-

-    dbs03.cookied-bm-mcp-ocata-contrail.local:

-      reclass_storage_name: openstack_database_node03

-      roles:

-      - openstack_database

-      - linux_system_codename_xenial

-      interfaces:

-        ens2:
-          role: single_dhcp
-        ens3:

-          role: single_ctl

-

-    msg01.cookied-bm-mcp-ocata-contrail.local:

-      reclass_storage_name: openstack_message_queue_node01

-      roles:

-      - openstack_message_queue

-      - linux_system_codename_xenial

-      interfaces:
-        ens2:
-          role: single_dhcp
-        ens3:

-          role: single_ctl

-

-    msg02.cookied-bm-mcp-ocata-contrail.local:

-      reclass_storage_name: openstack_message_queue_node02

-      roles:

-      - openstack_message_queue

-      - linux_system_codename_xenial

-      interfaces:

-        ens2:
-          role: single_dhcp
-        ens3:

-          role: single_ctl

-

-    msg03.cookied-bm-mcp-ocata-contrail.local:

-      reclass_storage_name: openstack_message_queue_node03

-      roles:

-      - openstack_message_queue

-      - linux_system_codename_xenial

-      interfaces:

-        ens2:
-          role: single_dhcp
-        ens3:

-          role: single_ctl

-

-    prx01.cookied-bm-mcp-ocata-contrail.local:

-      reclass_storage_name: openstack_proxy_node01

-      roles:

-      - openstack_proxy

-      - linux_system_codename_xenial

-      interfaces:

-        ens2:
-          role: single_dhcp
-        ens3:

-          role: single_ctl

-

-    prx02.cookied-bm-mcp-ocata-contrail.local:

-      reclass_storage_name: openstack_proxy_node02

-      roles:

-      - openstack_proxy

-      - linux_system_codename_xenial

-      interfaces:

-        ens2:
-          role: single_dhcp
-        ens3:

-          role: single_ctl

-

-    mon01.cookied-bm-mcp-ocata-contrail.local:

-      reclass_storage_name: stacklight_server_node01

-      roles:

-      - stacklightv2_server_leader

-      - linux_system_codename_xenial

-      interfaces:

-        ens2:
-          role: single_dhcp
-        ens3:

-          role: single_ctl

-

-    mon02.cookied-bm-mcp-ocata-contrail.local:

-      reclass_storage_name: stacklight_server_node02

-      roles:

-      - stacklightv2_server

-      - linux_system_codename_xenial

-      interfaces:

-        ens2:
-          role: single_dhcp
-        ens3:

-          role: single_ctl

-

-    mon03.cookied-bm-mcp-ocata-contrail.local:

-      reclass_storage_name: stacklight_server_node03

-      roles:

-      - stacklightv2_server

-      - linux_system_codename_xenial

-      interfaces:

-        ens2:
-          role: single_dhcp
-        ens3:

-          role: single_ctl

-

-    nal01.cookied-bm-mcp-ocata-contrail.local:

-      reclass_storage_name: opencontrail_analytics_node01

-      roles:

-      - opencontrail_analytics

-      - linux_system_codename_xenial

-      interfaces:

-        ens2:
-          role: single_dhcp
-        ens3:

-          role: single_ctl

-

-    nal02.cookied-bm-mcp-ocata-contrail.local:

-      reclass_storage_name: opencontrail_analytics_node02

-      roles:

-      - opencontrail_analytics

-      - linux_system_codename_xenial

-      interfaces:

-        ens2:
-          role: single_dhcp
-        ens3:

-          role: single_ctl

-

-    nal03.cookied-bm-mcp-ocata-contrail.local:

-      reclass_storage_name: opencontrail_analytics_node03

-      roles:

-      - opencontrail_analytics

-      - linux_system_codename_xenial

-      interfaces:

-        ens2:
-          role: single_dhcp
-        ens3:

-          role: single_ctl

-

-    ntw01.cookied-bm-mcp-ocata-contrail.local:

-      reclass_storage_name: opencontrail_control_node01

-      roles:

-      - opencontrail_control

-      - linux_system_codename_xenial

-      interfaces:

-        ens2:
-          role: single_dhcp
-        ens3:

-          role: single_ctl

-

-    ntw02.cookied-bm-mcp-ocata-contrail.local:

-      reclass_storage_name: opencontrail_control_node02

-      roles:

-      - opencontrail_control

-      - linux_system_codename_xenial

-      interfaces:

-        ens2:
-          role: single_dhcp
-        ens3:

-          role: single_ctl

-

-    ntw03.cookied-bm-mcp-ocata-contrail.local:

-      reclass_storage_name: opencontrail_control_node03

-      roles:

-      - opencontrail_control

-      - linux_system_codename_xenial

-      interfaces:

-        ens2:
-          role: single_dhcp
-        ens3:

-          role: single_ctl

-

-    mtr01.cookied-bm-mcp-ocata-contrail.local:

-      reclass_storage_name: stacklight_telemetry_node01

-      roles:

-      - stacklight_telemetry

-      - linux_system_codename_xenial

-      interfaces:

-        ens2:
-          role: single_dhcp
-        ens3:

-          role: single_ctl

-

-    mtr02.cookied-bm-mcp-ocata-contrail.local:

-      reclass_storage_name: stacklight_telemetry_node02

-      roles:

-      - stacklight_telemetry

-      - linux_system_codename_xenial

-      interfaces:

-        ens2:
-          role: single_dhcp
-        ens3:

-          role: single_ctl

-

-    mtr03.cookied-bm-mcp-ocata-contrail.local:

-      reclass_storage_name: stacklight_telemetry_node03

-      roles:

-      - stacklight_telemetry

-      - linux_system_codename_xenial

-      interfaces:

-        ens2:
-          role: single_dhcp
-        ens3:

-          role: single_ctl

-

-    log01.cookied-bm-mcp-ocata-contrail.local:

-      reclass_storage_name: stacklight_log_node01

-      roles:

-      - stacklight_log_leader_v2

-      - linux_system_codename_xenial

-      interfaces:

-        ens2:
-          role: single_dhcp
-        ens3:

-          role: single_ctl

-

-    log02.cookied-bm-mcp-ocata-contrail.local:

-      reclass_storage_name: stacklight_log_node02

-      roles:

-      - stacklight_log

-      - linux_system_codename_xenial

-      interfaces:

-        ens2:
-          role: single_dhcp
-        ens3:

-          role: single_ctl

-

-    log03.cookied-bm-mcp-ocata-contrail.local:

-      reclass_storage_name: stacklight_log_node03

-      roles:

-      - stacklight_log

-      - linux_system_codename_xenial

-      interfaces:

-        ens2:
-          role: single_dhcp
-        ens3:

-          role: single_ctl

-
-    cmn01.cookied-bm-mcp-ocata-contrail.local:
-      reclass_storage_name: ceph_mon_node01
-      roles:
-      - ceph_mon
-      - linux_system_codename_xenial
-      interfaces:
-        ens2:
-          role: single_dhcp
-        ens3:
-          role: single_ctl
-
-    cmn02.cookied-bm-mcp-ocata-contrail.local:
-      reclass_storage_name: ceph_mon_node02
-      roles:
-      - ceph_mon
-      - linux_system_codename_xenial
-      interfaces:
-        ens2:
-          role: single_dhcp
-        ens3:
-          role: single_ctl
-
-    cmn03.cookied-bm-mcp-ocata-contrail.local:
-      reclass_storage_name: ceph_mon_node03
-      roles:
-      - ceph_mon
-      - linux_system_codename_xenial
-      interfaces:
-        ens2:
-          role: single_dhcp
-        ens3:
-          role: single_ctl
-
-    rgw01.cookied-bm-mcp-ocata-contrail.local:
-      reclass_storage_name: ceph_rgw_node01
-      roles:
-      - ceph_rgw
-      - linux_system_codename_xenial
-      interfaces:
-        ens2:
-          role: single_dhcp
-        ens3:
-          role: single_ctl
-
-    rgw02.cookied-bm-mcp-ocata-contrail.local:
-      reclass_storage_name: ceph_rgw_node02
-      roles:
-      - ceph_rgw
-      - linux_system_codename_xenial
-      interfaces:
-        ens2:
-          role: single_dhcp
-        ens3:
-          role: single_ctl
-
-    rgw03.cookied-bm-mcp-ocata-contrail.local:
-      reclass_storage_name: ceph_rgw_node03
-      roles:
-      - ceph_rgw
-      - linux_system_codename_xenial
-      interfaces:
-        ens2:
-          role: single_dhcp
-        ens3:
-          role: single_ctl
-
-#    bmk01.cookied-bm-mcp-ocata-contrail.local:

-#      reclass_storage_name: openstack_benchmark_node01

-#      roles:

-#      - openstack_benchmark

-#      - linux_system_codename_xenial

-#      interfaces:

-#        ens3:

-#          role: single_ctl

diff --git a/tcp_tests/templates/cookied-cicd-bm-os-contrail40-maas/salt.yaml b/tcp_tests/templates/cookied-cicd-bm-os-contrail40-maas/salt.yaml
deleted file mode 100644
index a7b3677..0000000
--- a/tcp_tests/templates/cookied-cicd-bm-os-contrail40-maas/salt.yaml
+++ /dev/null
@@ -1,74 +0,0 @@
-{% from 'cookied-cicd-bm-os-contrail40-maas/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-cicd-bm-os-contrail40-maas/underlay.yaml' import LAB_CONFIG_NAME with context %}
-{% from 'cookied-cicd-bm-os-contrail40-maas/underlay.yaml' import DOMAIN_NAME with context %}
-
-# Other salt model repository parameters see in shared-salt.yaml
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-- description: Wait for salt-master is ready after configdrive user-data
-  cmd: |
-    timeout 120 salt-call test.ping
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 5}
-  skip_fail: false
-
-{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
-
-- description: Generate a public key for machines in MAAS
-  cmd: |
-    ssh-keygen -y -f ~root/.ssh/id_rsa > ~root/.ssh/id_rsa.pub
-    maas mirantis sshkeys create key="$(cat ~root/.ssh/id_rsa.pub)"
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Run comissioning of BM nodes
-  cmd: |
-    salt-call maas.process_machines
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Wait for machines ready
-  cmd: |
-    salt-call maas.machines_status && timeout 120 salt-call state.sls maas.machines.wait_for_ready
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 7, delay: 5}
-  skip_fail: false
-
-- description: Enforce the interfaces configuration defined in the model for servers
-  cmd: |
-    salt-call state.sls maas.machines.assign_ip;
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Remove all the salt-minions and re-register the cfg01 minion
-  cmd: |
-    salt-key -y -D;
-    salt-call test.ping
-    sleep 5
-    # Check that the cfg01 is registered
-    salt-key | grep cfg01
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: provision the automatically commissioned physical nodes through MAAS
-  cmd: |
-    salt-call maas.deploy_machines;
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 3, delay: 5}
-  skip_fail: false
-
-- description: Wait for machines deployed
-  cmd: |
-    salt-call maas.machines_status && timeout 300 salt-call state.sls maas.machines.wait_for_deployed
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 6, delay: 5}
-  skip_fail: false
diff --git a/tcp_tests/templates/cookied-cicd-bm-os-contrail40-maas/underlay.yaml b/tcp_tests/templates/cookied-cicd-bm-os-contrail40-maas/underlay.yaml
deleted file mode 100644
index e48b817..0000000
--- a/tcp_tests/templates/cookied-cicd-bm-os-contrail40-maas/underlay.yaml
+++ /dev/null
@@ -1,129 +0,0 @@
-# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
-{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'proposed') %}
-
-#{% set DOMAIN_NAME = os_env('LAB_CONFIG_NAME', 'physical_mcp11_ovs_dpdk') + '.local' %}
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-cicd-bm-os-contrail40-maas') %}
-{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME + '.local') %}
-{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01') %}
-
-{% set ETH1_IP_ADDRESS_CFG01 = os_env('ETH1_IP_ADDRESS_CFG01', '172.16.49.66') %}
-
----
-aliases:
- - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
- - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
- - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
-
-
-template:
-  devops_settings:
-    env_name: {{ os_env('ENV_NAME', 'cookied-cicd-bm-os-contrail40-maas_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
-
-    address_pools:
-      admin-pool01:
-        net: {{ os_env('ADMIN_ADDRESS_POOL01', '172.16.49.64/26:26') }}
-        params:
-          ip_reserved:
-            gateway: '172.16.49.65'
-            l2_network_device: +61
-            default_{{ HOSTNAME_CFG01 }}: {{ ETH1_IP_ADDRESS_CFG01 }}
-          ip_ranges:
-            dhcp: [+2, -3]
-      private-pool01:
-        net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.167.8.0/24:24') }}
-        params:
-          ip_reserved:
-            gateway: +1
-            l2_network_device: +1
-          ip_ranges:
-            dhcp: [+2, -3]
-
-      tenant-pool01:
-        net: {{ os_env('TENANT_ADDRESS_POOL01', '10.167.10.0/24:24') }}
-        params:
-          ip_reserved:
-            gateway: +1
-            l2_network_device: +1
-          ip_ranges:
-            dhcp: [+2, -3]
-
-      external-pool01:
-        net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '172.17.42.192/26:26') }}
-        params:
-          ip_reserved:
-            gateway: +1
-            l2_network_device: -2
-          ip_ranges:
-            dhcp: [+2, -3]
-
-    groups:
-
-      - name: default
-        driver:
-          name: devops.driver.libvirt
-          params:
-            connection_string: !os_env CONNECTION_STRING, qemu:///system
-            storage_pool_name: !os_env STORAGE_POOL_NAME, default
-            stp: False
-            hpet: False
-            enable_acpi: true
-            use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
-
-        network_pools:
-          admin: admin-pool01
-
-        l2_network_devices:
-          admin:
-            address_pool: admin-pool01
-            dhcp: false
-            forward:
-              mode: bridge
-            parent_iface:
-              phys_dev: !os_env LAB_MANAGEMENT_IFACE
-          private:
-            address_pool: private-pool01
-            dhcp: false
-            forward:
-              mode: bridge
-            parent_iface:
-              phys_dev: !os_env LAB_CONTROL_IFACE
-
-        group_volumes:
-         - name: cfg01_day01_image               # Pre-configured day01 image
-           source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01') }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
-           format: qcow2
-
-        nodes:
-          - name: {{ HOSTNAME_CFG01 }}
-            role: salt_master
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 4
-              memory: !os_env SLAVE_NODE_MEMORY, 16384
-              boot:
-                - hd
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: cfg01_day01_image
-                  format: qcow2
-                - name: config
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-
-              interfaces:
-                - label: ens3
-                  l2_network_device: admin
-                  interface_model: *interface_model
-                - label: ens4
-                  l2_network_device: private
-                  interface_model: *interface_model
-
-              network_config:
-                ens3:
-                  networks:
-                    - admin
-                ens4:
-                  networks:
-                    - private
diff --git a/tcp_tests/templates/cookied-cicd-bm-queens-contrail-maas/lab04-physical-inventory.yaml b/tcp_tests/templates/cookied-cicd-bm-queens-contrail-maas/lab04-physical-inventory.yaml
deleted file mode 100644
index 1952ac8..0000000
--- a/tcp_tests/templates/cookied-cicd-bm-queens-contrail-maas/lab04-physical-inventory.yaml
+++ /dev/null
@@ -1,77 +0,0 @@
-nodes:
-    cfg01.cookied-cicd-bm-queens-contrail-maas.local:
-      reclass_storage_name: infra_config_node01
-      roles:
-      - infra_config
-      - linux_system_codename_xenial
-      - features_runtest_cfg
-      interfaces:
-        ens3:
-          role: single_static_mgm
-        ens4:
-          role: single_static_ctl
-    # Physical nodes
-
-    kvm01.cookied-cicd-bm-queens-contrail-maas.local:
-      reclass_storage_name: infra_kvm_node01
-      roles:
-      - infra_kvm
-      - linux_system_codename_xenial
-      interfaces:
-        enp9s0f0:
-          role: single_dhcp
-        enp9s0f1:
-          role: bond0_ab_ovs_vlan_ctl
-        ens11f0:
-          role: single_mgm_manual
-
-    kvm02.cookied-cicd-bm-queens-contrail-maas.local:
-      reclass_storage_name: infra_kvm_node02
-      roles:
-      - infra_kvm
-      - linux_system_codename_xenial
-      interfaces:
-        enp9s0f0:
-          role: single_dhcp
-        enp9s0f1:
-          role: bond0_ab_ovs_vlan_ctl
-        ens11f0:
-          role: single_mgm_manual
-
-    kvm03.cookied-cicd-bm-queens-contrail-maas.local:
-      reclass_storage_name: infra_kvm_node03
-      roles:
-      - infra_kvm
-      - linux_system_codename_xenial
-      interfaces:
-        enp9s0f0:
-          role: single_dhcp
-        enp9s0f1:
-          role: bond0_ab_ovs_vlan_ctl
-        ens11f0:
-          role: single_mgm_manual
-
-    osd<<count>>:
-      reclass_storage_name: ceph_osd_rack01
-      roles:
-      - ceph_osd
-      - linux_system_codename_xenial
-      interfaces:
-        enp2s0f0:
-          role: single_dhcp
-        enp2s0f1:
-          role: single_vlan_ctl
-#          role: bond0_ab_vlan_ceph_storage_backend
-
-    cmp<<count>>:
-      reclass_storage_name: openstack_compute_rack01
-      roles:
-      - openstack_compute
-      - linux_system_codename_xenial
-      interfaces:
-        enp2s0f1:
-          role: single_dhcp
-        enp5s0f0:
-          role: bond0_ab_contrail
-        enp5s0f1:
-          role: single_vlan_ctl
diff --git a/tcp_tests/templates/cookied-cicd-bm-queens-contrail-maas/salt-context-cookiecutter-contrail.yaml b/tcp_tests/templates/cookied-cicd-bm-queens-contrail-maas/salt-context-cookiecutter-contrail.yaml
deleted file mode 100644
index d01be4f..0000000
--- a/tcp_tests/templates/cookied-cicd-bm-queens-contrail-maas/salt-context-cookiecutter-contrail.yaml
+++ /dev/null
@@ -1,445 +0,0 @@
-default_context:
-  backup_private_key: |-
-    -----BEGIN RSA PRIVATE KEY-----
-    MIIEpAIBAAKCAQEApq5WxkagvkNWO85FtS1ByHDKkNWhmFdpY9D49dZrSwuE9XGQ
-    +WW79F2AGwKki2N2j1iyfpMEKRIEIb/5cbl6fZzTGTndhd7Jxkx6xGdhZkX9VM6N
-    qotaO4ckj7NsfiZKTwp58/YSRkz3Ii1XPpt0NQqZLuNAwus4Bl9e1Wk5dNw+gHN3
-    m4JmAczJbQ81lrQURC7f3d2xjoFkXWXC2FKkMS6AOl1j87ATeeSG9xeHLbOvIyBw
-    7IwP9MFA5vUtHl8DzsdmzWmVRabe2VMtGa1Ya5JTTgK8nXmtYW3dvEQ/DtgzcKPJ
-    2fO31cze9LRpDSS0E6d/cISBgzsPfBJuUCGHTQIDAQABAoIBAQCmFVVVoA6PRt1o
-    HjMLQpsntGvDQXsRJxhWY2WO4CZs0n+baZvBRgOwjHIXd9ypH2SFlSXWRXuByPfh
-    AT72eJB7FYaqviPjPojjVFWH2lMM63RvypkSdGRmqFRf87KJSHIGrDO0SV8QOaSO
-    o4spURDLwVG9jKd9EY/zmZgPIhgkPazzVrFoGr8YnKE6qSJh5HivscNl8D3+36SN
-    5uhuElzBTNGd2iU4elLJIGjahetIalEZqL0Fvi1ZzAWoK0YXDmbI8uG8/epJ5Sy4
-    XyyHc7+0Jvm1JWwXczdDFuy+RlL9r66Ja8V9MauuJyigOKnNOJhE2b5/klEcczhC
-    AHA/Hw4pAoGBANcJ/gdouXgcuq3JNXq5Cb4w9lvZbDwQdEtY3+qdHAVndomoGsDT
-    USKq6ZRZzkAAnjiN2YywAQzqFGevoYig+WNLTPd2TdNdlNHfw9Wc4G2iSFb1pIr2
-    uoJ+TQGv4Ck/7LS2NVnWfqNoeo8Iq+Wvnh+F3twv0UIazGI8Bj/xLxvrAoGBAMZu
-    QErf3vzbY4g50HFVbPNi2Nl63A7/P421pEe4JAT1clwIVMyntRpNdVyHKkkKdDWr
-    98tBOhf71+shgsVPEMkfPyZ2nuiBit7LzZ+EAztG9i3hhm8yIUPXoipo0YCOe+yF
-    r+r03pX97aciXuRMPmMTHH6N1vFaUXHSgVs6Y7OnAoGAP4v1ZO0eug8LX6XxRuX9
-    qhXAB96VrJ5UL5wA980b5cDwd7eUyFzqQittwWhUmfdUynOo0XmFpfJau1VckAq6
-    CAzNnud4Ejk6bFcLAUpNzDhD1mbbDDHjZgK68P+vZ6E7ax/ZXkYTwGh0p2Yxnjuq
-    p7gg5sK+vSE8Ot9wHV9Bw6cCgYEAguPq6PjvgF+/Mfbg9kFhUtKbNCoEyqe4ZmOw
-    79YZfGPjga3FMhJWNfluNxC55eBNc7HyDFMEXRm0/dbnCfvzmJdR8q9AdyIsVnad
-    NmHAN/PBI9al9OdeZf/xaoQl3eUe/Y/Z0OShhtMvVpYnffSFGplarGgnpqDrJGe1
-    CFZlufUCgYBemuy+C6gLwTOzhcTcCo4Ir5ZiKcXAE6ufk8OIdGnMWJcmTxxmIMY6
-    XyKu0oobWpOBXPiipQ6TmDpI+flxWYRHwPFFzPa+jhCtupRuTdORKrklV2UfdIWZ
-    N4e+J2yCu7lyz0upwa3MkFIVQ1ez0o8X9NRvAz243qi64y1+KOMPmQ==
-    -----END RSA PRIVATE KEY-----
-  backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCmrlbGRqC+Q1Y7zkW1LUHIcMqQ1aGYV2lj0Pj11mtLC4T1cZD5Zbv0XYAbAqSLY3aPWLJ+kwQpEgQhv/lxuXp9nNMZOd2F3snGTHrEZ2FmRf1Uzo2qi1o7hySPs2x+JkpPCnnz9hJGTPciLVc+m3Q1Cpku40DC6zgGX17VaTl03D6Ac3ebgmYBzMltDzWWtBRELt/d3bGOgWRdZcLYUqQxLoA6XWPzsBN55Ib3F4cts68jIHDsjA/0wUDm9S0eXwPOx2bNaZVFpt7ZUy0ZrVhrklNOArydea1hbd28RD8O2DNwo8nZ87fVzN70tGkNJLQTp39whIGDOw98Em5QIYdN
-  bmk_enabled: 'False'
-  cicd_control_node01_address: 10.167.8.91
-  cicd_control_node01_hostname: cid01
-  cicd_control_node02_address: 10.167.8.92
-  cicd_control_node02_hostname: cid02
-  cicd_control_node03_address: 10.167.8.93
-  cicd_control_node03_hostname: cid03
-  cicd_control_vip_address: 10.167.8.90
-  cicd_control_vip_hostname: cid
-  cicd_enabled: 'True'
-  cicd_private_key: |-
-    -----BEGIN RSA PRIVATE KEY-----
-    MIIEowIBAAKCAQEAuBC224XQZFyzqC56EyS7yr/rlpRRYsr2vji77faoWQFmgYbZ
-    oeyqqqm8eSN0Cc0wAnxWsQ7H3ZN9uTnyWVrsogs1vx8597iorZAT4Mu6JDbkWlZh
-    IUHo9P9itWJdUWpxjDavqIvjZo+DmOO1mfv9K1asP8COanQEsgHSyuf+XKMBg0ko
-    kEammAUtS9HRxCAJ47QgLPSCFij5ih/MRWY3HWFUFEF3gRdUodWmeJNmW+7JH7T2
-    wId1kn8oRya7eadKxd6wEaCGm5ILXwwVFmFkOGlEeC8wHnbkatd/A53DxzUfOHBi
-    27Gaf83DPxKqDWW0aAh7b49EnFhdkuF3ZyXbYwIDAQABAoIBAFtioQbYpyBNDj2f
-    5af/guUk6Di4pregAWVsEZIR9n9KPLRuWTsVn55f611Rhtke8IkrZnc92WlfQvpl
-    lLdcd0P0wNiFDmi5W7XgZJ4lR+OXBUT8wfibGqgY688WaTJ04K82r3vFCD/xXOrZ
-    k15CR+3ueFKmrY6Yz4P5d8iZ6iXfR47ZYm+wdmx3vmJ+IVfZCRRPAGP25GxqsOs5
-    3qMl9hV7a1MGVVaVPmVzrq0Xzk6IAW2+0p5udGmezn4y6HFPIvOriUVUkni3mNjX
-    dokrETqVbOjkdFkSw28cMBfP/tO3vyfGh5VX24xvRztWtcpAm6Qr5lKEDSvFv13r
-    0z/DxRECgYEA8oZ4+w2cqLJz91fKpWutGZKj4m/HEY6FZfjVflsTT2bKTt+nTtRY
-    qAeKGYIbrjZMAyy4dG+RgW7WORFcRHFyeSrS5Aw51zO+JQ0KzuBv83UqcbqNLcsz
-    BAPHPk/7f30W4wuInqgXrWMTiGePz0hQsvNU6aR7MH4Sd2C0ot4W+00CgYEAwkq+
-    UtugC8ywK+F0xZvjXHi3VJRJZf4WLtRxZGy8CimaritSKpZZRG23Sk0ifDE6+4fD
-    VtxeTfTmeZBictg/fEAPVHzhsNPNyDMA8t7t4ZKmMX9DNYAqVX21s5YQ9encH6KT
-    1q0NRpjvw7QzhfbFfsxeAxHKZFbFlVmROplF+W8CgYAWHVz6x4r5dwxMCZ1Y6DCo
-    nE6FX1vvpedUHRSaqQNhwiXAe3RuI77R054sJUkQ4bKct386XtIN02WFXqfjNdUS
-    Z21DjjnX/cfg6QeLRbvvn0d3h2NIQbctLosEi5aLUYS8v1h93yYJkXc+gPMEG7wA
-    FWAwzebNzTEx4YeXMlk2IQKBgCt8JxTMawm5CkUH9Oa1eTGdIwsfFT5qm/RnP+nG
-    HF/559DLiVxWwiv6kmdi1DEPo6/gNuwd7k1sXpkeo6oolCzu+X9jY+/7t7bzE2dI
-    Vd2CwQebACPdR5xSwnQrRiiD6ux5qrUFjk8as68NieqVzKYQf4oYVUAX26kNnt+K
-    poqpAoGBAINHTGBFVK3XC+fCbu7rhFS8wZAjBmvEDHGnUBp19JREEr3q7a2D84T3
-    17zo0bwxL09QFnOCDDJcXsh8eGbCONV0hJvJU2o7wGol+lRFSd+v6WYZ37bPEyEx
-    l8kv0xXAElriC1RE1CNtvoOn/uxyRs+2OnNgBVxtAGqUWVdpm6CD
-    -----END RSA PRIVATE KEY-----
-  cicd_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC4ELbbhdBkXLOoLnoTJLvKv+uWlFFiyva+OLvt9qhZAWaBhtmh7Kqqqbx5I3QJzTACfFaxDsfdk325OfJZWuyiCzW/Hzn3uKitkBPgy7okNuRaVmEhQej0/2K1Yl1RanGMNq+oi+Nmj4OY47WZ+/0rVqw/wI5qdASyAdLK5/5cowGDSSiQRqaYBS1L0dHEIAnjtCAs9IIWKPmKH8xFZjcdYVQUQXeBF1Sh1aZ4k2Zb7skftPbAh3WSfyhHJrt5p0rF3rARoIabkgtfDBUWYWQ4aUR4LzAeduRq138DncPHNR84cGLbsZp/zcM/EqoNZbRoCHtvj0ScWF2S4XdnJdtj
-  cluster_domain: cookied-cicd-bm-queens-contrail-maas.local
-  cluster_name: cookied-cicd-bm-queens-contrail-maas
-  opencontrail_version: 4.1
-  linux_repo_contrail_component: oc41
-  compute_bond_mode: active-backup
-  compute_padding_with_zeros: 'True'
-  compute_primary_first_nic: eth1
-  compute_primary_second_nic: eth2
-  context_seed: TFWH0xgUevQkslwhbWVedwwYhBtImHLiGUIExjT9ahxPAUBHh9Kg3QSAIrqTqtvk
-  control_network_netmask: 255.255.255.0
-  control_network_subnet: 10.167.8.0/24
-  control_vlan: '2422'
-  tenant_vlan: '2423'
-  backend_vlan: '2424'
-  storage_vlan: '2425'  # not implemented yet, placeholder
-  cookiecutter_template_branch: ''
-  cookiecutter_template_credentials: gerrit
-  cookiecutter_template_url: https://gerrit.mcp.mirantis.com/mk/cookiecutter-templates.git
-  deploy_network_gateway: 172.16.49.65
-  deploy_network_netmask: 255.255.255.192
-  deploy_network_subnet: 172.16.49.64/26
-  deployment_type: physical
-  dns_server01: 172.18.208.44
-  dns_server02: 172.18.176.6
-  email_address: sgudz@mirantis.com
-  infra_bond_mode: active-backup
-  infra_deploy_nic: eth0
-  infra_kvm01_control_address: 10.167.8.241
-  infra_kvm01_deploy_address: 172.16.49.67
-  infra_kvm01_hostname: kvm01
-  infra_kvm02_control_address: 10.167.8.242
-  infra_kvm02_deploy_address: 172.16.49.68
-  infra_kvm02_hostname: kvm02
-  infra_kvm03_control_address: 10.167.8.243
-  infra_kvm03_deploy_address: 172.16.49.69
-  infra_kvm03_hostname: kvm03
-  infra_kvm_vip_address: 10.167.8.240
-  infra_primary_first_nic: eth1
-  infra_primary_second_nic: eth2
-  internal_proxy_enabled: 'False'
-  kqueen_custom_mail_enabled: 'False'
-  kqueen_enabled: 'False'
-  kubernetes_enabled: 'False'
-  local_repositories: 'False'
-  maas_enabled: 'True'
-  maas_deploy_address: 172.16.49.66
-  maas_deploy_cidr: 172.16.49.64/26
-  maas_deploy_gateway: 172.16.49.65
-  maas_deploy_range_end: 172.16.49.119
-  maas_deploy_range_start: 172.16.49.77
-  maas_deploy_vlan: '0'
-  maas_dhcp_enabled: 'True'
-  maas_fabric_name: fabric-0
-  maas_hostname: cfg01
-  maas_manage_deploy_network: 'True'
-  maas_machines: |
-        kvm01: # cz7341-kvm.host-telecom.com
-          distro_series: "xenial"
-          # hwe_kernel: "hwe-16.04"
-          # pxe_interface_mac:
-          pxe_interface_mac: "0c:c4:7a:6c:83:56"
-          interfaces:
-            enp9s0f0:
-              mac: "0c:c4:7a:6c:83:56"
-              mode: "static"
-              ip: "172.16.49.67"
-              subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
-              gateway: ${_param:deploy_network_gateway}
-          power_parameters:
-            power_address: "5.43.225.117"
-            power_pass: ==IPMI_PASS==
-            power_type: ipmi
-            power_user: ==IPMI_USER==
-        kvm02: # #cz7342-kvm.host-telecom.com
-          distro_series: "xenial"
-          # hwe_kernel: "hwe-16.04"
-          pxe_interface_mac: "0c:c4:7a:6c:84:2c"
-          interfaces:
-            enp9s0f0:
-              mac: "0c:c4:7a:6c:84:2c"
-              mode: "static"
-              ip: "172.16.49.68"
-              subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
-              gateway: ${_param:deploy_network_gateway}
-          power_parameters:
-            power_address: "5.43.225.118"
-            power_pass: ==IPMI_PASS==
-            power_type: ipmi
-            power_user: ==IPMI_USER==
-        kvm03: # #cz7343-kvm.host-telecom.com
-          distro_series: "xenial"
-          # hwe_kernel: "hwe-16.04"
-          pxe_interface_mac: "0c:c4:7a:6c:83:54"
-          interfaces:
-            enp9s0f0:
-              mac: "0c:c4:7a:6c:83:54"
-              mode: "static"
-              ip: "172.16.49.69"
-              subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
-              gateway: ${_param:deploy_network_gateway}
-          power_parameters:
-            power_address: "5.43.225.119"
-            power_pass: ==IPMI_PASS==
-            power_type: ipmi
-            power_user: ==IPMI_USER==
-        osd001: # #cz7343-kvm.host-telecom.com
-          distro_series: "xenial"
-          # hwe_kernel: "hwe-16.04"
-          pxe_interface_mac: "0c:c4:7a:55:6a:d4"
-          interfaces:
-            enp2s0f0:
-              mac: "0c:c4:7a:55:6a:d4"
-              mode: "static"
-              ip: "172.16.49.70"
-              subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
-              gateway: ${_param:deploy_network_gateway}
-          power_parameters:
-            power_address: "185.8.59.243"
-            power_pass: ==IPMI_PASS==
-            power_type: ipmi
-            power_user: ==IPMI_USER==
-        osd002: # #cz7343-kvm.host-telecom.com
-          distro_series: "xenial"
-          # hwe_kernel: "hwe-16.04"
-          pxe_interface_mac: "0c:c4:7a:55:6a:56"
-          interfaces:
-            enp2s0f0:
-              mac: "0c:c4:7a:55:6a:56"
-              mode: "static"
-              ip: "172.16.49.71"
-              subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
-              gateway: ${_param:deploy_network_gateway}
-          power_parameters:
-            power_address: "185.8.59.244"
-            power_pass: ==IPMI_PASS==
-            power_type: ipmi
-            power_user: ==IPMI_USER==
-        osd003: # #cz7343-kvm.host-telecom.com
-          distro_series: "xenial"
-          # hwe_kernel: "hwe-16.04"
-          pxe_interface_mac: "0c:c4:7a:55:6a:2a"
-          interfaces:
-            enp2s0f0:
-              mac: "0c:c4:7a:55:6a:2a"
-              mode: "static"
-              ip: "172.16.49.72"
-              subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
-              gateway: ${_param:deploy_network_gateway}
-          power_parameters:
-            power_address: "185.8.59.245"
-            power_pass: ==IPMI_PASS==
-            power_type: ipmi
-            power_user: ==IPMI_USER==
-        cmp001: # #cz7345-kvm.host-telecom.com
-          distro_series: "xenial"
-          # hwe_kernel: "hwe-16.04"
-          pxe_interface_mac: "0c:c4:7a:54:a2:5f"
-          interfaces:
-            enp2s0f1:
-              mac: "0c:c4:7a:54:a2:5f"
-              mode: "static"
-              ip: "172.16.49.73"
-              subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
-              gateway: ${_param:deploy_network_gateway}
-          power_parameters:
-            power_address: "185.8.59.233"
-            power_pass: ==IPMI_PASS==
-            power_type: ipmi
-            power_user: ==IPMI_USER==
-        cmp002: # cz7346-kvm.host-telecom.com
-          distro_series: "xenial"
-          # hwe_kernel: "hwe-16.04"
-          pxe_interface_mac: "0c:c4:7a:54:a0:51"
-          interfaces:
-            enp2s0f1:
-              mac: "0c:c4:7a:54:a0:51"
-              mode: "static"
-              ip: "172.16.49.74"
-              subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
-              gateway: ${_param:deploy_network_gateway}
-          power_parameters:
-            power_address: "185.8.59.232"
-            power_pass: ==IPMI_PASS==
-            power_type: ipmi
-            power_user: ==IPMI_USER==
-
-  mcp_common_scripts_branch: ''
-  mcp_version: proposed
-  offline_deployment: 'False'
-  opencontrail_analytics_address: 10.167.8.30
-  opencontrail_analytics_hostname: nal
-  opencontrail_analytics_node01_address: 10.167.8.31
-  opencontrail_analytics_node01_hostname: nal01
-  opencontrail_analytics_node02_address: 10.167.8.32
-  opencontrail_analytics_node02_hostname: nal02
-  opencontrail_analytics_node03_address: 10.167.8.33
-  opencontrail_analytics_node03_hostname: nal03
-  opencontrail_compute_iface_mask: '24'
-  opencontrail_control_address: 10.167.8.20
-  opencontrail_control_hostname: ntw
-  opencontrail_control_node01_address: 10.167.8.21
-  opencontrail_control_node01_hostname: ntw01
-  opencontrail_control_node02_address: 10.167.8.22
-  opencontrail_control_node02_hostname: ntw02
-  opencontrail_control_node03_address: 10.167.8.23
-  opencontrail_control_node03_hostname: ntw03
-  opencontrail_enabled: 'True'
-  opencontrail_router01_address: 10.167.8.220
-  opencontrail_router01_hostname: rtr01
-  opencontrail_router02_address: 10.167.8.101
-  opencontrail_router02_hostname: rtr02
-  openldap_enabled: 'False'
-  openssh_groups: ''
-  openstack_benchmark_node01_address: 10.167.8.95
-  openstack_benchmark_node01_hostname: bmk01
-  openstack_cluster_size: compact
-  openstack_compute_count: '2'
-  openstack_compute_rack01_hostname: cmp
-  openstack_compute_single_address_ranges: 10.167.8.101-10.167.8.102
-  openstack_compute_deploy_address_ranges: 172.16.49.73-172.16.49.74
-  openstack_compute_tenant_address_ranges: 10.167.10.101-10.167.10.102
-  openstack_compute_backend_address_ranges: 10.167.10.101-10.167.10.102
-  openstack_control_address: 10.167.8.10
-  openstack_control_hostname: ctl
-  openstack_control_node01_address: 10.167.8.11
-  openstack_control_node01_hostname: ctl01
-  openstack_control_node02_address: 10.167.8.12
-  openstack_control_node02_hostname: ctl02
-  openstack_control_node03_address: 10.167.8.13
-  openstack_control_node03_hostname: ctl03
-  openstack_database_address: 10.167.8.50
-  openstack_database_hostname: dbs
-  openstack_database_node01_address: 10.167.8.51
-  openstack_database_node01_hostname: dbs01
-  openstack_database_node02_address: 10.167.8.52
-  openstack_database_node02_hostname: dbs02
-  openstack_database_node03_address: 10.167.8.53
-  openstack_database_node03_hostname: dbs03
-  openstack_enabled: 'True'
-  openstack_message_queue_address: 10.167.8.40
-  openstack_message_queue_hostname: msg
-  openstack_message_queue_node01_address: 10.167.8.41
-  openstack_message_queue_node01_hostname: msg01
-  openstack_message_queue_node02_address: 10.167.8.42
-  openstack_message_queue_node02_hostname: msg02
-  openstack_message_queue_node03_address: 10.167.8.43
-  openstack_message_queue_node03_hostname: msg03
-  openstack_network_engine: opencontrail
-  openstack_neutron_bgp_vpn: 'False'
-  openstack_neutron_bgp_vpn_driver: bagpipe
-  openstack_nfv_dpdk_enabled: 'False'
-  openstack_nfv_sriov_enabled: 'False'
-  openstack_nova_compute_nfv_req_enabled: 'False'
-  openstack_nova_compute_reserved_host_memory_mb: '900'
-  openstack_proxy_address: 10.167.8.80
-  openstack_proxy_hostname: prx
-  openstack_proxy_node01_address: 10.167.8.81
-  openstack_proxy_node01_hostname: prx01
-  openstack_proxy_node02_address: 10.167.8.82
-  openstack_proxy_node02_hostname: prx02
-  openstack_upgrade_node01_address: 10.167.8.19
-  openstack_version: queens
-  oss_enabled: 'False'
-  oss_node03_address: ${_param:stacklight_monitor_node03_address}
-  oss_webhook_app_id: '24'
-  oss_webhook_login_id: '13'
-  platform: openstack_enabled
-  public_host: ${_param:openstack_proxy_address}
-  publication_method: email
-  reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
-  salt_api_password: BNRhXeGFdgVNx0Ikm2CAMw7eyeHf4grH
-  salt_api_password_hash: $6$jriFnsbZ$eon54Ts/Kn4ywKpexe/W8srpBF64cxr2D8jd0RzTH8zdZVjS3viYt64m1d1VlXenurwpcGLkGzaGmOI0dlOox0
-  salt_master_address: 10.167.8.15
-  salt_master_hostname: cfg01
-  salt_master_management_address: 172.16.49.66
-  shared_reclass_branch: ''
-  shared_reclass_url: https://gerrit.mcp.mirantis.com/salt-models/reclass-system.git
-  stacklight_enabled: 'True'
-  stacklight_log_address: 10.167.8.60
-  stacklight_log_hostname: log
-  stacklight_log_node01_address: 10.167.8.61
-  stacklight_log_node01_hostname: log01
-  stacklight_log_node02_address: 10.167.8.62
-  stacklight_log_node02_hostname: log02
-  stacklight_log_node03_address: 10.167.8.63
-  stacklight_log_node03_hostname: log03
-  stacklight_long_term_storage_type: prometheus
-  stacklight_monitor_address: 10.167.8.70
-  stacklight_monitor_hostname: mon
-  stacklight_monitor_node01_address: 10.167.8.71
-  stacklight_monitor_node01_hostname: mon01
-  stacklight_monitor_node02_address: 10.167.8.72
-  stacklight_monitor_node02_hostname: mon02
-  stacklight_monitor_node03_address: 10.167.8.73
-  stacklight_monitor_node03_hostname: mon03
-  stacklight_telemetry_address: 10.167.8.85
-  stacklight_telemetry_hostname: mtr
-  stacklight_telemetry_node01_address: 10.167.8.86
-  stacklight_telemetry_node01_hostname: mtr01
-  stacklight_telemetry_node02_address: 10.167.8.87
-  stacklight_telemetry_node02_hostname: mtr02
-  stacklight_telemetry_node03_address: 10.167.8.88
-  stacklight_telemetry_node03_hostname: mtr03
-  stacklight_version: '2'
-  static_ips_on_deploy_network_enabled: 'False'
-  tenant_network_gateway: 10.167.10.253
-  tenant_network_netmask: 255.255.255.0
-  tenant_network_subnet: 10.167.10.0/24
-  upstream_proxy_enabled: 'False'
-  use_default_network_scheme: 'True'
-  openldap_domain: cookied-cicd-bm-queens-contrail-maas.local
-  openldap_enabled: 'True'
-  openldap_organisation: ${_param:cluster_name}
-  ceph_enabled: 'True'
-  ceph_version: "luminous"
-  ceph_hyper_converged: "False"
-  ceph_osd_backend: "bluestore"
-  ceph_osd_count: "3"
-  ceph_osd_node_count: 3
-  ceph_osd_block_db_size: 20
-  ceph_osd_journal_size: 20
-  ceph_osd_bond_mode: "active-backup"
-  ceph_osd_data_partition_prefix: ""
-
-  ceph_public_network_allocation: storage
-  ceph_public_network: "10.167.8.0/24"
-  ceph_cluster_network: "10.167.8.0/24"
-
-# for 2018.11.0+
-  ceph_osd_single_address_ranges: "10.167.8.200-10.167.8.202"
-  ceph_osd_deploy_address_ranges: "172.16.49.70-172.16.49.72"
-  ceph_osd_storage_address_ranges: "10.167.8.200-10.167.8.202"
-  ceph_osd_backend_address_ranges: "10.167.10.200-10.167.10.202"
-
-  ceph_osd_data_disks: "/dev/sdb"
-  ceph_osd_journal_or_block_db_disks: "/dev/sdb"
-  ceph_osd_mode: "separated"
-  ceph_osd_deploy_nic: "eth0"
-  ceph_osd_primary_first_nic: "eth1"
-  ceph_osd_primary_second_nic: "eth2"
-  #ceph_mon_node01_address: "172.16.47.66"
-  #ceph_mon_node01_deploy_address: "172.16.48.66"
-  ceph_mon_node01_address: "10.167.8.66"
-  ceph_mon_node01_hostname: "cmn01"
-  #ceph_mon_node02_address: "172.16.47.67"
-  #ceph_mon_node02_deploy_address: "172.16.48.67"
-  ceph_mon_node02_address: "10.167.8.67"
-  ceph_mon_node02_hostname: "cmn02"
-  #ceph_mon_node03_address: "172.16.47.68"
-  #ceph_mon_node03_deploy_address: "172.16.48.68"
-  ceph_mon_node03_address: "10.167.8.68"
-  ceph_mon_node03_hostname: "cmn03"
-  #ceph_rgw_address: "172.16.47.75"
-  ceph_rgw_address: "10.167.8.75"
-  #ceph_rgw_node01_address: "172.16.47.76"
-  #ceph_rgw_node01_deploy_address: "172.16.48.76"
-  ceph_rgw_node01_address: "10.167.8.76"
-  ceph_rgw_node01_hostname: "rgw01"
-  #ceph_rgw_node02_address: "172.16.47.77"
-  #ceph_rgw_node02_deploy_address: "172.16.48.77"
-  ceph_rgw_node02_address: "10.167.8.77"
-  ceph_rgw_node02_hostname: "rgw02"
-  #ceph_rgw_node03_address: "172.16.47.78"
-  #ceph_rgw_node03_deploy_address: "172.16.48.78"
-  ceph_rgw_node03_address: "10.167.8.78"
-  ceph_rgw_node03_hostname: "rgw03"
-  manila_enabled: 'False'
-  barbican_enabled: 'False'
-  barbican_integration_enabled: 'False'
-  # SSL settings
-  nova_vnc_tls_enabled: 'True'
-  galera_ssl_enabled: 'True'
-  openstack_mysql_x509_enabled: 'True'
-  rabbitmq_ssl_enabled: 'True'
-  openstack_rabbitmq_x509_enabled: 'True'
-  openstack_internal_protocol: 'https'
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-cicd-bm-queens-contrail-maas/salt-context-environment.yaml b/tcp_tests/templates/cookied-cicd-bm-queens-contrail-maas/salt-context-environment.yaml
deleted file mode 100644
index de08d36..0000000
--- a/tcp_tests/templates/cookied-cicd-bm-queens-contrail-maas/salt-context-environment.yaml
+++ /dev/null
@@ -1,397 +0,0 @@
-nodes:

-    # Virtual Control Plane nodes

-    cid01.cookied-cicd-bm-queens-contrail-maas.local:

-      reclass_storage_name: cicd_control_node01

-      roles:

-      - cicd_control_leader

-      - linux_system_codename_xenial

-      interfaces:

-        ens2:

-          role: single_dhcp

-        ens3:

-          role: single_ctl

-

-    cid02.cookied-cicd-bm-queens-contrail-maas.local:

-      reclass_storage_name: cicd_control_node02

-      roles:

-      - cicd_control_manager

-      - linux_system_codename_xenial

-      interfaces:

-        ens2:

-          role: single_dhcp

-        ens3:

-          role: single_ctl

-

-    cid03.cookied-cicd-bm-queens-contrail-maas.local:

-      reclass_storage_name: cicd_control_node03

-      roles:

-      - cicd_control_manager

-      - linux_system_codename_xenial

-      interfaces:

-        ens2:

-          role: single_dhcp

-        ens3:

-          role: single_ctl

-

-    ctl01.cookied-cicd-bm-queens-contrail-maas.local:

-      reclass_storage_name: openstack_control_node01

-      roles:

-      - openstack_control_leader

-      - linux_system_codename_xenial

-      classes:

-      - system.linux.system.repo.mcp.apt_mirantis.docker

-      interfaces:

-        ens2:

-          role: single_dhcp

-        ens3:

-          role: single_ctl

-

-    ctl02.cookied-cicd-bm-queens-contrail-maas.local:

-      reclass_storage_name: openstack_control_node02

-      roles:

-      - openstack_control

-      - linux_system_codename_xenial

-      interfaces:

-        ens2:

-          role: single_dhcp

-        ens3:

-          role: single_ctl

-

-    ctl03.cookied-cicd-bm-queens-contrail-maas.local:

-      reclass_storage_name: openstack_control_node03

-      roles:

-      - openstack_control

-      - linux_system_codename_xenial

-      interfaces:

-        ens2:

-          role: single_dhcp

-        ens3:

-          role: single_ctl

-

-    dbs01.cookied-cicd-bm-queens-contrail-maas.local:

-      reclass_storage_name: openstack_database_node01

-      roles:

-      - openstack_database_leader

-      - linux_system_codename_xenial

-      interfaces:

-        ens2:

-          role: single_dhcp

-        ens3:

-          role: single_ctl

-

-    dbs02.cookied-cicd-bm-queens-contrail-maas.local:

-      reclass_storage_name: openstack_database_node02

-      roles:

-      - openstack_database

-      - linux_system_codename_xenial

-      interfaces:

-        ens2:

-          role: single_dhcp

-        ens3:

-          role: single_ctl

-

-    dbs03.cookied-cicd-bm-queens-contrail-maas.local:

-      reclass_storage_name: openstack_database_node03

-      roles:

-      - openstack_database

-      - linux_system_codename_xenial

-      interfaces:

-        ens2:

-          role: single_dhcp

-        ens3:

-          role: single_ctl

-

-    msg01.cookied-cicd-bm-queens-contrail-maas.local:

-      reclass_storage_name: openstack_message_queue_node01

-      roles:

-      - openstack_message_queue

-      - linux_system_codename_xenial

-      interfaces:

-        ens2:

-          role: single_dhcp

-        ens3:

-          role: single_ctl

-

-    msg02.cookied-cicd-bm-queens-contrail-maas.local:

-      reclass_storage_name: openstack_message_queue_node02

-      roles:

-      - openstack_message_queue

-      - linux_system_codename_xenial

-      interfaces:

-        ens2:

-          role: single_dhcp

-        ens3:

-          role: single_ctl

-

-    msg03.cookied-cicd-bm-queens-contrail-maas.local:

-      reclass_storage_name: openstack_message_queue_node03

-      roles:

-      - openstack_message_queue

-      - linux_system_codename_xenial

-      interfaces:

-        ens2:

-          role: single_dhcp

-        ens3:

-          role: single_ctl

-

-    prx01.cookied-cicd-bm-queens-contrail-maas.local:

-      reclass_storage_name: openstack_proxy_node01

-      roles:

-      - openstack_proxy

-      - linux_system_codename_xenial

-      interfaces:

-        ens2:

-          role: single_dhcp

-        ens3:

-          role: single_ctl

-

-    prx02.cookied-cicd-bm-queens-contrail-maas.local:

-      reclass_storage_name: openstack_proxy_node02

-      roles:

-      - openstack_proxy

-      - linux_system_codename_xenial

-      interfaces:

-        ens2:

-          role: single_dhcp

-        ens3:

-          role: single_ctl

-

-    mon01.cookied-cicd-bm-queens-contrail-maas.local:

-      reclass_storage_name: stacklight_server_node01

-      roles:

-      - stacklightv2_server_leader

-      - linux_system_codename_xenial

-      interfaces:

-        ens2:

-          role: single_dhcp

-        ens3:

-          role: single_ctl

-

-    mon02.cookied-cicd-bm-queens-contrail-maas.local:

-      reclass_storage_name: stacklight_server_node02

-      roles:

-      - stacklightv2_server

-      - linux_system_codename_xenial

-      interfaces:

-        ens2:

-          role: single_dhcp

-        ens3:

-          role: single_ctl

-

-    mon03.cookied-cicd-bm-queens-contrail-maas.local:

-      reclass_storage_name: stacklight_server_node03

-      roles:

-      - stacklightv2_server

-      - linux_system_codename_xenial

-      interfaces:

-        ens2:

-          role: single_dhcp

-        ens3:

-          role: single_ctl

-

-    nal01.cookied-cicd-bm-queens-contrail-maas.local:

-      reclass_storage_name: opencontrail_analytics_node01

-      roles:

-      - opencontrail_analytics

-      - linux_system_codename_xenial

-      interfaces:

-        ens2:

-          role: single_dhcp

-        ens3:

-          role: single_ctl

-

-    nal02.cookied-cicd-bm-queens-contrail-maas.local:

-      reclass_storage_name: opencontrail_analytics_node02

-      roles:

-      - opencontrail_analytics

-      - linux_system_codename_xenial

-      interfaces:

-        ens2:

-          role: single_dhcp

-        ens3:

-          role: single_ctl

-

-    nal03.cookied-cicd-bm-queens-contrail-maas.local:

-      reclass_storage_name: opencontrail_analytics_node03

-      roles:

-      - opencontrail_analytics

-      - linux_system_codename_xenial

-      interfaces:

-        ens2:

-          role: single_dhcp

-        ens3:

-          role: single_ctl

-

-    ntw01.cookied-cicd-bm-queens-contrail-maas.local:

-      reclass_storage_name: opencontrail_control_node01

-      roles:

-      - opencontrail_control

-      - linux_system_codename_xenial

-      interfaces:

-        ens2:

-          role: single_dhcp

-        ens3:

-          role: single_ctl

-

-    ntw02.cookied-cicd-bm-queens-contrail-maas.local:

-      reclass_storage_name: opencontrail_control_node02

-      roles:

-      - opencontrail_control

-      - linux_system_codename_xenial

-      interfaces:

-        ens2:

-          role: single_dhcp

-        ens3:

-          role: single_ctl

-

-    ntw03.cookied-cicd-bm-queens-contrail-maas.local:

-      reclass_storage_name: opencontrail_control_node03

-      roles:

-      - opencontrail_control

-      - linux_system_codename_xenial

-      interfaces:

-        ens2:

-          role: single_dhcp

-        ens3:

-          role: single_ctl

-

-    mtr01.cookied-cicd-bm-queens-contrail-maas.local:

-      reclass_storage_name: stacklight_telemetry_node01

-      roles:

-      - stacklight_telemetry

-      - linux_system_codename_xenial

-      interfaces:

-        ens2:

-          role: single_dhcp

-        ens3:

-          role: single_ctl

-

-    mtr02.cookied-cicd-bm-queens-contrail-maas.local:

-      reclass_storage_name: stacklight_telemetry_node02

-      roles:

-      - stacklight_telemetry

-      - linux_system_codename_xenial

-      interfaces:

-        ens2:

-          role: single_dhcp

-        ens3:

-          role: single_ctl

-

-    mtr03.cookied-cicd-bm-queens-contrail-maas.local:

-      reclass_storage_name: stacklight_telemetry_node03

-      roles:

-      - stacklight_telemetry

-      - linux_system_codename_xenial

-      interfaces:

-        ens2:

-          role: single_dhcp

-        ens3:

-          role: single_ctl

-

-    log01.cookied-cicd-bm-queens-contrail-maas.local:

-      reclass_storage_name: stacklight_log_node01

-      roles:

-      - stacklight_log_leader_v2

-      - linux_system_codename_xenial

-      interfaces:

-        ens2:

-          role: single_dhcp

-        ens3:

-          role: single_ctl

-

-    log02.cookied-cicd-bm-queens-contrail-maas.local:

-      reclass_storage_name: stacklight_log_node02

-      roles:

-      - stacklight_log

-      - linux_system_codename_xenial

-      interfaces:

-        ens2:

-          role: single_dhcp

-        ens3:

-          role: single_ctl

-

-    log03.cookied-cicd-bm-queens-contrail-maas.local:

-      reclass_storage_name: stacklight_log_node03

-      roles:

-      - stacklight_log

-      - linux_system_codename_xenial

-      interfaces:

-        ens2:

-          role: single_dhcp

-        ens3:

-          role: single_ctl

-

-    cmn01.cookied-cicd-bm-queens-contrail-maas.local:

-      reclass_storage_name: ceph_mon_node01

-      roles:

-      - ceph_mon

-      - linux_system_codename_xenial

-      interfaces:

-        ens2:

-          role: single_dhcp

-        ens3:

-          role: single_ctl

-

-    cmn02.cookied-cicd-bm-queens-contrail-maas.local:

-      reclass_storage_name: ceph_mon_node02

-      roles:

-      - ceph_mon

-      - linux_system_codename_xenial

-      interfaces:

-        ens2:

-          role: single_dhcp

-        ens3:

-          role: single_ctl

-

-    cmn03.cookied-cicd-bm-queens-contrail-maas.local:

-      reclass_storage_name: ceph_mon_node03

-      roles:

-      - ceph_mon

-      - linux_system_codename_xenial

-      interfaces:

-        ens2:

-          role: single_dhcp

-        ens3:

-          role: single_ctl

-

-    rgw01.cookied-cicd-bm-queens-contrail-maas.local:

-      reclass_storage_name: ceph_rgw_node01

-      roles:

-      - ceph_rgw

-      - linux_system_codename_xenial

-      interfaces:

-        ens2:

-          role: single_dhcp

-        ens3:

-          role: single_ctl

-

-    rgw02.cookied-cicd-bm-queens-contrail-maas.local:

-      reclass_storage_name: ceph_rgw_node02

-      roles:

-      - ceph_rgw

-      - linux_system_codename_xenial

-      interfaces:

-        ens2:

-          role: single_dhcp

-        ens3:

-          role: single_ctl

-

-    rgw03.cookied-cicd-bm-queens-contrail-maas.local:

-      reclass_storage_name: ceph_rgw_node03

-      roles:

-      - ceph_rgw

-      - linux_system_codename_xenial

-      interfaces:

-        ens2:

-          role: single_dhcp

-        ens3:

-          role: single_ctl

-

-#    bmk01.cookied-bm-mcp-ocata-contrail.local:

-#      reclass_storage_name: openstack_benchmark_node01

-#      roles:

-#      - openstack_benchmark

-#      - linux_system_codename_xenial

-#      interfaces:

-#        ens3:

-#          role: single_ctl

diff --git a/tcp_tests/templates/cookied-cicd-bm-queens-contrail-maas/salt.yaml b/tcp_tests/templates/cookied-cicd-bm-queens-contrail-maas/salt.yaml
deleted file mode 100644
index 77f10d2..0000000
--- a/tcp_tests/templates/cookied-cicd-bm-queens-contrail-maas/salt.yaml
+++ /dev/null
@@ -1,74 +0,0 @@
-{% from 'cookied-cicd-bm-queens-contrail-maas/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-cicd-bm-queens-contrail-maas/underlay.yaml' import LAB_CONFIG_NAME with context %}
-{% from 'cookied-cicd-bm-queens-contrail-maas/underlay.yaml' import DOMAIN_NAME with context %}
-
-# Other salt model repository parameters see in shared-salt.yaml
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-- description: Wait for salt-master is ready after configdrive user-data
-  cmd: |
-    timeout 120 salt-call test.ping
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 5}
-  skip_fail: false
-
-{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
-
-- description: Generate a public key for machines in MAAS
-  cmd: |
-    ssh-keygen -y -f ~root/.ssh/id_rsa > ~root/.ssh/id_rsa.pub
-    maas mirantis sshkeys create key="$(cat ~root/.ssh/id_rsa.pub)"
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Run comissioning of BM nodes
-  cmd: |
-    salt-call maas.process_machines
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Wait for machines ready
-  cmd: |
-    salt-call maas.machines_status && timeout 120 salt-call state.sls maas.machines.wait_for_ready
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 7, delay: 5}
-  skip_fail: false
-
-- description: Enforce the interfaces configuration defined in the model for servers
-  cmd: |
-    salt-call state.sls maas.machines.assign_ip;
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Remove all the salt-minions and re-register the cfg01 minion
-  cmd: |
-    salt-key -y -D;
-    salt-call test.ping
-    sleep 5
-    # Check that the cfg01 is registered
-    salt-key | grep cfg01
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: provision the automatically commissioned physical nodes through MAAS
-  cmd: |
-    salt-call maas.deploy_machines;
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 3, delay: 5}
-  skip_fail: false
-
-- description: Wait for machines deployed
-  cmd: |
-    salt-call maas.machines_status && timeout 300 salt-call state.sls maas.machines.wait_for_deployed
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 6, delay: 5}
-  skip_fail: false
diff --git a/tcp_tests/templates/cookied-cicd-bm-queens-contrail-maas/underlay.yaml b/tcp_tests/templates/cookied-cicd-bm-queens-contrail-maas/underlay.yaml
deleted file mode 100644
index 54b9f63..0000000
--- a/tcp_tests/templates/cookied-cicd-bm-queens-contrail-maas/underlay.yaml
+++ /dev/null
@@ -1,129 +0,0 @@
-# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
-{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'proposed') %}
-
-#{% set DOMAIN_NAME = os_env('LAB_CONFIG_NAME', 'physical_mcp11_ovs_dpdk') + '.local' %}
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-cicd-bm-queens-contrail-maas') %}
-{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME + '.local') %}
-{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01') %}
-
-{% set ETH1_IP_ADDRESS_CFG01 = os_env('ETH1_IP_ADDRESS_CFG01', '172.16.49.66') %}
-
----
-aliases:
- - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
- - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
- - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
-
-
-template:
-  devops_settings:
-    env_name: {{ os_env('ENV_NAME', 'cookied-cicd-bm-queens-contrail-maas_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
-
-    address_pools:
-      admin-pool01:
-        net: {{ os_env('ADMIN_ADDRESS_POOL01', '172.16.49.64/26:26') }}
-        params:
-          ip_reserved:
-            gateway: '172.16.49.65'
-            l2_network_device: +61
-            default_{{ HOSTNAME_CFG01 }}: {{ ETH1_IP_ADDRESS_CFG01 }}
-          ip_ranges:
-            dhcp: [+2, -3]
-      private-pool01:
-        net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.167.8.0/24:24') }}
-        params:
-          ip_reserved:
-            gateway: +1
-            l2_network_device: +1
-          ip_ranges:
-            dhcp: [+2, -3]
-
-      tenant-pool01:
-        net: {{ os_env('TENANT_ADDRESS_POOL01', '10.167.10.0/24:24') }}
-        params:
-          ip_reserved:
-            gateway: +1
-            l2_network_device: +1
-          ip_ranges:
-            dhcp: [+2, -3]
-
-      external-pool01:
-        net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '172.17.42.192/26:26') }}
-        params:
-          ip_reserved:
-            gateway: +1
-            l2_network_device: -2
-          ip_ranges:
-            dhcp: [+2, -3]
-
-    groups:
-
-      - name: default
-        driver:
-          name: devops.driver.libvirt
-          params:
-            connection_string: !os_env CONNECTION_STRING, qemu:///system
-            storage_pool_name: !os_env STORAGE_POOL_NAME, default
-            stp: False
-            hpet: False
-            enable_acpi: true
-            use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
-
-        network_pools:
-          admin: admin-pool01
-
-        l2_network_devices:
-          admin:
-            address_pool: admin-pool01
-            dhcp: false
-            forward:
-              mode: bridge
-            parent_iface:
-              phys_dev: !os_env LAB_MANAGEMENT_IFACE
-          private:
-            address_pool: private-pool01
-            dhcp: false
-            forward:
-              mode: bridge
-            parent_iface:
-              phys_dev: !os_env LAB_CONTROL_IFACE
-
-        group_volumes:
-         - name: cfg01_day01_image               # Pre-configured day01 image
-           source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01') }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
-           format: qcow2
-
-        nodes:
-          - name: {{ HOSTNAME_CFG01 }}
-            role: salt_master
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 4
-              memory: !os_env SLAVE_NODE_MEMORY, 12288
-              boot:
-                - hd
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: cfg01_day01_image
-                  format: qcow2
-                - name: config
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-
-              interfaces:
-                - label: ens3
-                  l2_network_device: admin
-                  interface_model: *interface_model
-                - label: ens4
-                  l2_network_device: private
-                  interface_model: *interface_model
-
-              network_config:
-                ens3:
-                  networks:
-                    - admin
-                ens4:
-                  networks:
-                    - private
diff --git a/tcp_tests/templates/cookied-cicd-k8s-calico-sl/cookiecutter-context-k8s-sl.yaml b/tcp_tests/templates/cookied-cicd-k8s-calico-sl/cookiecutter-context-k8s-sl.yaml
deleted file mode 100644
index cb9a221..0000000
--- a/tcp_tests/templates/cookied-cicd-k8s-calico-sl/cookiecutter-context-k8s-sl.yaml
+++ /dev/null
@@ -1,215 +0,0 @@
-default_context:
-  auditd_enabled: 'False'
-  backup_private_key: |
-      -----BEGIN RSA PRIVATE KEY-----
-      MIIEowIBAAKCAQEAxL6/rVgCetsETpZaUmXmkj8cZ1WN0eubH1FvMDOi/La9ZJyT
-      k0C6AYpJnIyEm93pMj5cLm08qRqMW+2pdOhYjcH69yg5MrX5SkRk8jCmIHIYoIbh
-      Qnwbnj3dd3I39ZdfU2FO7u2vlbglVou6ZoQxlJDItuLNtzq6EG+w9eF19e7+OsC6
-      6iUItp618zfw1l3J/8nKvCGe2RYDf7mJW6XwCl/DwryJmwwzvPgYJ3QMuDD8/HFj
-      lrJ3xjFTXj4b4Ws1XIoy78fFbtiLr4OwqCYkho03u2E5rOOP1qZxZB63sivHMLMO
-      MM5bOAQKbulFNoyALADGYfc7sf0bZ4u9XXDXxQIDAQABAoIBAQCfmc2MJRT97KW1
-      yqpCpX9BrAiymuiNHf+cjEcSZxEUyHkjIRFmJt+9WB0W7ba1anM92vCUiPDojSzH
-      dig9Oi578JxR20NrK8uqv4jUHzrknynzLveVI3CUEcOSnglfJQijbxDFKfOCFPvV
-      FUyE1UATMNBh6+LNfMprgu+exuMWOPnDyUiYQ+WZ0JfuZY8fuaZte4woJJOb9LUu
-      5rsMG/smIzjpgZ0Z9ZVDMurfq565qhpaXRAqKeIuyht8pacTo31iMQdHB78AvY/3
-      g0z21Gk8k3z0Kr/YFKr2r4FmXY5m/gAUvZly2ZrVQM5XsbTVCzq/JpI5fssNvSbU
-      AKmXzf4RAoGBAOO3d4/cstxERzW6hyOTjZIN1ppR52CsnZTsVPbfd0pCtmzmVZce
-      CtHKdcXSbTwZvvkK09QSWAp3MoSpd0gIOiLU8Wx/R/RIZsu9BlhTS3r3EQLnk72d
-      H/1TTA+j4T/LIYLSojQ1RxvIrHetAD44j732aTwKAHj/SybEAVqNkOB/AoGBAN0u
-      gLcrgqIHGrk4VjWSvlCGymfF40equcx+ud7XhfZDGETUOSahW4dPZ52cjPAkrCBQ
-      MMfcDwSVGsOAjd+mNt11BHUKobnhXwFaWWuyqyn9NmWFbjMbICVh7E3Of5aVN38o
-      lrmo/7LuKMVG7XRwphCv5NkaJmQG4njDyUQWlaW7AoGADCd8wDb9bPhP/LQqBmIX
-      ylXmwHHisaxE9O/wUQT4bwREjGd25gv6c9wkkRx8LBsLsGs9hzI7dMOL9Ly+2x9l
-      SvqmsC3S/1zl77X1Ir2/Z57MT6Vgo1xBmtnZU3Rhz2/eKAdqFPNLClaZrgGT475N
-      HcyLLWMzR0IJFtabY+Puea0CgYA8Zb5wRkldxWLewSuJZZDinGwY+kieAVjLJq/K
-      0j+ah6fQ48LXcah0wpIgz+cMjHcUO9GWQdk3/x9X03rqX5EL2DBnZYfUIl63F9zj
-      M97ZkHOSNWVqPzX//0Vv2butewG0j3jZKfTo/2/SrxOYgEpYtC9huWpSVi7xm0US
-      erhSkQKBgFIf9JEsfgE57ANhvITZ3ZI0uZXNxZkXQaVg8jvScDi79IIhy9iPzhKC
-      aIIQoDNIlWv1ftCRZ5AlBvVXgvQ/QNrwy48JiQTzWZlb9Ezg8w+olQmSbG6fq7Y+
-      7r3i+QUZ7RBdOb24QcQ618q54ozNTCB7OywY78ptFzeoBeptiNr1
-      -----END RSA PRIVATE KEY-----
-  backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDEvr+tWAJ62wROllpSZeaSPxxnVY3R65sfUW8wM6L8tr1knJOTQLoBikmcjISb3ekyPlwubTypGoxb7al06FiNwfr3KDkytflKRGTyMKYgchighuFCfBuePd13cjf1l19TYU7u7a+VuCVWi7pmhDGUkMi24s23OroQb7D14XX17v46wLrqJQi2nrXzN/DWXcn/ycq8IZ7ZFgN/uYlbpfAKX8PCvImbDDO8+BgndAy4MPz8cWOWsnfGMVNePhvhazVcijLvx8Vu2Iuvg7CoJiSGjTe7YTms44/WpnFkHreyK8cwsw4wzls4BApu6UU2jIAsAMZh9zux/Rtni71dcNfF
-  bmk_enabled: 'False'
-  calico_cni_image: docker-prod-local.artifactory.mirantis.com/mirantis/projectcalico/calico/cni:latest
-  calico_enable_nat: 'True'
-  calico_image: docker-prod-local.artifactory.mirantis.com/mirantis/projectcalico/calico/node:latest
-  calico_netmask: '16'
-  calico_network: 192.168.0.0
-  calicoctl_image: docker-prod-local.artifactory.mirantis.com/mirantis/projectcalico/calico/ctl:latest
-  ceph_enabled: 'False'
-  cicd_control_node01_address: 10.167.4.91
-  cicd_control_node01_hostname: cid01
-  cicd_control_node02_address: 10.167.4.92
-  cicd_control_node02_hostname: cid02
-  cicd_control_node03_address: 10.167.4.93
-  cicd_control_node03_hostname: cid03
-  cicd_control_vip_address: 10.167.4.90
-  cicd_control_vip_hostname: cid
-  cicd_enabled: 'True'
-  cicd_private_key: |-
-    -----BEGIN RSA PRIVATE KEY-----
-    MIIEpAIBAAKCAQEAv64AnFbEuuOQHLlmMkmaZ+Hh/8hJ+VfFpJ/MzW1wWzYyhis7
-    3A8rxNFWJ/I1/LJSsFI8qU0DpxjFjS9LMTTFXhDPPpuzgRLwfVusEmuQdXjOiT34
-    AIs07Q4w1nlvJ2+/l788ie1AEfnewd9erUHOs8Wt/PT3OOM/0ikY7EibvYF4L1Lb
-    xGRKYnUkY7G3eal9XcQpsTzAFRXoK3WafbCFBFsfzEWOhx1T+tn1SwaxPYJDt1OB
-    B1s77enFtBwbmbd0m1F1ufSXmdWea2xF3+5caS6tps/hwhCoOSJUQb7+dK4ri8og
-    q2YIhfEptrMP1R+nVqEY76P31aa/YSw4zOvcQwIDAQABAoIBAQCLKOzQlD4n4ObT
-    s9Z6U+2B1gCaDvOFzy9yoYGy8u1Li0GLHwBKd8kzDzgZsEN5vo1B7bKUx5ELU3S5
-    V8ijZMiVzmZn8eqUnwdyO4flp6otXxOzmAXhfy9hm5fhXjBQ1VSn+vMcv95wLpSG
-    9IBsEQbchXwX1lFWP8Yp8iRiByTqoz6A7qSxRzIOtq1/coYS9Vcy7VZDMiUjqvuc
-    pYvwYHvrgeYqxLXyDRzbZX1BbkSoNI/5VwxLb9IMG901IXph0r4V3uVgnnq+Xzkk
-    MoOfmB3cyOrvtWblZAjkyA+jzTs/QNALRUeI7wUeh4FvlwEGHE6v5G4G28zOS0vL
-    7IEhCqThAoGBAOeyDO07b060l+NOO+Jkv+NV31VD0w3S4TMyLPVSxXsrRPoHM9RM
-    udi6lewmALE4wk2Lc1Il6n0UrUGVbXxf55NJp2BQoSic+ZK2nTki0cZ/CkUDVNwY
-    R0WtWE0i3J+eF3e8j9VYm1mIlv0aDoYeH4qCp5is/JanvLy4MUl6tM7/AoGBANPJ
-    XheDO5lmqq1ejDTo3GAzYuAs44dQLDs0znEuuaUKZ4MKgQ4ax0L9n0MxvsuUGVcN
-    Nm7fZS4uMY3zLCOLcAXyD1jXY210gmOgFdXeYrH+2kSmqfflV8KHOLCatxLzRtbe
-    KBflcrEnrpUVNGKlpZaYr+4AyapXeMuXIxwveva9AoGAYtoDS9/UwHaqau+A+zlS
-    6TJFA8LZNAepz0b0CYLUAJXYavhRs508mWwZ9NPN7c6yj5UUkZLdtZnxxY50VOEy
-    ExQUljIwX/yBOogxEiR57b9b6U/fj7vIBMFNcDOUf4Far9pCX5rbRNrS2I+abLxD
-    ZrwRt0Duz3QnQTkwxhHVPI8CgYAaIjQJJLl7AW84O32DneRrvouJ7CAbd2ot2CNN
-    Vh20XudNBUPNkMJb4t3/Nak8h8bktg2sesaKf0rAIGym6jLlmOwJ43IydHkOgBeR
-    r4JwQml+pS4+F7/Pkk4NhNnobbqlEv7RjA+uCp6BaP9w2M3pGmhDLzezXF3ciYbc
-    mINM5QKBgQCyM9ZWwSiA0D3oitnhs7C4eC0IHBfnSoa7f40osKm4VvmqKBFgRu8L
-    qYK9qX++pUm4sk0q7poGUscc1udMlejAkfc/HLIlUi6MM+S7ZQ2NHtnZ7COZa5O4
-    9fG8FTiigLvMHka9ihYXtPbyGvusCaqyHp3D9VyOT+WsyM5eJe40lA==
-    -----END RSA PRIVATE KEY-----
-  cicd_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC/rgCcVsS645AcuWYySZpn4eH/yEn5V8Wkn8zNbXBbNjKGKzvcDyvE0VYn8jX8slKwUjypTQOnGMWNL0sxNMVeEM8+m7OBEvB9W6wSa5B1eM6JPfgAizTtDjDWeW8nb7+XvzyJ7UAR+d7B316tQc6zxa389Pc44z/SKRjsSJu9gXgvUtvEZEpidSRjsbd5qX1dxCmxPMAVFegrdZp9sIUEWx/MRY6HHVP62fVLBrE9gkO3U4EHWzvt6cW0HBuZt3SbUXW59JeZ1Z5rbEXf7lxpLq2mz+HCEKg5IlRBvv50riuLyiCrZgiF8Sm2sw/VH6dWoRjvo/fVpr9hLDjM69xD
-  cluster_domain: cookied-cicd-k8s-calico-sl.local
-  cluster_name: cookied-cicd-k8s-calico-sl
-  context_seed: T3sbEdCaBfxrg9ysyA6LIaift250Ktb389rpcISKbdqPi5j0WHKiKAhBftYueBKl
-  control_network_netmask: 255.255.255.0
-  control_network_subnet: 10.167.4.0/24
-  control_vlan: '10'
-  cookiecutter_template_branch: ''
-  jenkins_pipelines_branch: 'release/2019.2.0'
-  cookiecutter_template_credentials: gerrit
-  cookiecutter_template_url: https://gerrit.mcp.mirantis.com/mk/cookiecutter-templates.git
-  deploy_network_gateway: 10.167.5.1
-  deploy_network_netmask: 255.255.255.0
-  deploy_network_subnet: 10.167.5.0/24
-  deployment_type: physical
-  dns_server01: 172.18.176.6
-  dns_server02: 172.18.208.44
-  email_address: ddmitriev@mirantis.com
-  etcd_ssl: 'True'
-  infra_bond_mode: active-backup
-  infra_deploy_nic: eth0
-  infra_kvm01_control_address: 10.167.4.241
-  infra_kvm01_deploy_address: 10.167.5.91
-  infra_kvm01_hostname: kvm01
-  infra_kvm02_control_address: 10.167.4.242
-  infra_kvm02_deploy_address: 10.167.5.92
-  infra_kvm02_hostname: kvm02
-  infra_kvm03_control_address: 10.167.4.243
-  infra_kvm03_deploy_address: 10.167.5.93
-  infra_kvm03_hostname: kvm03
-  infra_kvm_vip_address: 10.167.4.240
-  infra_primary_first_nic: eth1
-  infra_primary_second_nic: eth2
-  internal_proxy_enabled: 'False'
-  kqueen_custom_mail_enabled: 'False'
-  kqueen_enabled: 'False'
-  kubernetes_control_address: 10.167.4.10
-  kubernetes_control_node01_address: 10.167.4.11
-  kubernetes_control_node01_deploy_address: 10.167.5.11
-  kubernetes_control_node01_hostname: ctl01
-  kubernetes_control_node02_address: 10.167.4.12
-  kubernetes_control_node02_deploy_address: 10.167.5.12
-  kubernetes_control_node02_hostname: ctl02
-  kubernetes_control_node03_address: 10.167.4.13
-  kubernetes_control_node03_deploy_address: 10.167.5.13
-  kubernetes_control_node03_hostname: ctl03
-  kubernetes_compute_count: 4
-  kubernetes_compute_rack01_hostname: cmp
-  kubernetes_compute_deploy_address_ranges: 10.167.5.101-10.167.5.104
-  kubernetes_compute_single_address_ranges: 10.167.4.101-10.167.4.104
-  kubernetes_compute_tenant_address_ranges: 10.167.6.101-10.167.6.104
-  kubernetes_enabled: 'True'
-  kubernetes_externaldns_enabled: 'False'
-  kubernetes_keepalived_vip_interface: br_ctl
-  kubernetes_network_calico_enabled: 'True'
-  kubernetes_virtlet_enabled: 'True'
-  kubernetes_proxy_hostname: prx
-  kubernetes_proxy_node01_hostname: prx01
-  kubernetes_proxy_node02_hostname: prx02
-  kubernetes_proxy_address: 10.167.4.220
-  kubernetes_proxy_node01_address: 10.167.4.221
-  kubernetes_proxy_node02_address: 10.167.4.222
-  kubernetes_metallb_enabled: 'True'
-  metallb_addresses: 172.17.16.150-172.17.16.190
-  kubernetes_ingressnginx_enabled: 'True'
-  kubernetes_ingressnginx_controller_replicas: 2
-  local_repositories: 'False'
-  maas_deploy_address: 10.167.5.15
-  maas_deploy_range_end: 10.167.5.199
-  maas_deploy_range_start: 10.167.5.180
-  maas_deploy_vlan: '0'
-  maas_fabric_name: deploy-fabric0
-  maas_hostname: cfg01
-  mcp_common_scripts_branch: ''
-  mcp_version: proposed
-  offline_deployment: 'False'
-  opencontrail_enabled: 'False'
-  openldap_domain: ${_param:cluster_name}.local
-  openldap_enabled: 'True'
-  openldap_organisation: ${_param:cluster_name}
-  openssh_groups: cicd
-  openstack_enabled: 'False'
-  oss_enabled: 'False'
-  oss_node03_address: ${_param:stacklight_monitor_node03_address}
-  oss_webhook_app_id: '24'
-  oss_pushkin_email_sender_password: password
-  oss_pushkin_smtp_port: '587'
-  oss_webhook_login_id: '13'
-  platform: kubernetes_enabled
-  public_host: ${_param:infra_config_address}
-  publication_method: email
-  reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
-  salt_api_password: LTlVnap35hqpRVbB5QjA27EuKh9Ttl3k
-  salt_api_password_hash: $6$RKagUPuQ$Javpjz7b.hqKOOr1rai7uGQd/FnqlOH59tXn12/0G.LkVyunYmgBkSC5zTjoqZvIS1fOOOqsmCb9Q4HcGUbXS.
-  salt_master_address: 10.167.4.15
-  salt_master_hostname: cfg01
-  salt_master_management_address: 10.167.5.15
-  shared_reclass_branch: 'proposed'
-  shared_reclass_url: https://gerrit.mcp.mirantis.com/salt-models/reclass-system.git
-  stacklight_enabled: 'True'
-  stacklight_log_address: 10.167.4.60
-  stacklight_log_hostname: log
-  stacklight_log_node01_address: 10.167.4.61
-  stacklight_log_node01_hostname: log01
-  stacklight_log_node02_address: 10.167.4.62
-  stacklight_log_node02_hostname: log02
-  stacklight_log_node03_address: 10.167.4.63
-  stacklight_log_node03_hostname: log03
-  stacklight_long_term_storage_type: prometheus
-  stacklight_monitor_address: 10.167.4.70
-  stacklight_monitor_hostname: mon
-  stacklight_monitor_node01_address: 10.167.4.71
-  stacklight_monitor_node01_hostname: mon01
-  stacklight_monitor_node02_address: 10.167.4.72
-  stacklight_monitor_node02_hostname: mon02
-  stacklight_monitor_node03_address: 10.167.4.73
-  stacklight_monitor_node03_hostname: mon03
-  stacklight_telemetry_address: 10.167.4.85
-  stacklight_telemetry_hostname: mtr
-  stacklight_telemetry_node01_address: 10.167.4.86
-  stacklight_telemetry_node01_hostname: mtr01
-  stacklight_telemetry_node02_address: 10.167.4.87
-  stacklight_telemetry_node02_hostname: mtr02
-  stacklight_telemetry_node03_address: 10.167.4.88
-  stacklight_telemetry_node03_hostname: mtr03
-  stacklight_version: '2'
-  static_ips_on_deploy_network_enabled: 'False'
-  tenant_network_gateway: 10.167.6.1
-  tenant_network_netmask: 255.255.255.0
-  tenant_network_subnet: 10.167.6.0/24
-  tenant_vlan: '20'
-  upstream_proxy_enabled: 'False'
-  use_default_network_scheme: 'False'
-  vnf_onboarding_enabled: 'False'
-  secrets_encryption_enabled: 'True'
-  secrets_encryption_key_id: 'F5CB2ADC36159B03'
-  # Used on CI only.
-  secrets_encryption_private_key: ''
-  kubernetes_helm_enabled: 'True'
diff --git a/tcp_tests/templates/cookied-cicd-k8s-calico-sl/encryption-key.asc b/tcp_tests/templates/cookied-cicd-k8s-calico-sl/encryption-key.asc
deleted file mode 100644
index 381eb77..0000000
--- a/tcp_tests/templates/cookied-cicd-k8s-calico-sl/encryption-key.asc
+++ /dev/null
@@ -1,56 +0,0 @@
------BEGIN PGP PRIVATE KEY BLOCK-----
-
-lQcYBFyBRcUBEACmP/muUIwbEg6Z7dA3c9I2NadcjDHXKg/ViXwaOB4KSd9/FC0o
-KSBPccWb+1sm+zdUy2f/LC5r8RvU7yZd4Mbzz8R1DQncXg4nG7bppW7oAcHpc0jk
-pV/SvdMYxuXsrbKbpoGEquwVkbb4oTv2MLSBfGfFzBeQfiwGEWm1xPLSeXc4biLC
-FatCU7w4LS1U4BEOqRCp6lW/hQFLoX+j6rNT8TwC5AeFpKgUWuQZGOO4fZKpbvo2
-sCvF5VA1HSVXlZtzum6pL1yzLL/SoyLrYOy1KrZQmSBHG9htCZQVmvYK7U5WtWE4
-Ws5IAj+HwvgKyzXE2Srsirj1NqauQRsk+1riQk3rpDrX2BeXNUSoHR5M/RDY0gCc
-8P6heanQRnyFtjUSoovkQsydY77+QVxe0MCs+lZlg31fL+wJVG7FIbIKKwR5sj8i
-/JqhWE+t2ZzIrQ/7o7fRk7hv/u69Vb/t/Nt7fkbn53zoubqi3kNgXf6hwhTUtfW/
-lE9cc4JTzis4i/RnILUDnAwos1c0Z+tGCUo4pbiP71VfU8L259g+clPFXOIkqA9t
-L9JSZQfhH/lRj3Abs57OvZjN7/D1h8PWB+8nTB8bkoUt45SubgQb0Y9maWUcwjxw
-AcJSIk6mq8vVdBu7zOuslDjMnoUZbtJwcSwQQOnb9UUppjs3CjbcH80ttQARAQAB
-AA/9ExdprtDlJf6u2pJqxNNyInOK4p/e4VydMOJ28/PZz0iod8lzXhdK9JSWItF8
-qD9VHVG2gaErO44Wqh9EgqdbcYg8gUycA0hxy5/tI2uyDsaU5CAvEMLE/Eh8Q24j
-3UgdKK64VOnj7p4rKuYpIp55PB1zNU24rwkuOQwq3Yreb7kvLbXIHA2s+xLunGzj
-tcl9a/eSSFD2w+WcPnkvVT2QlmUvhQ12p6w++QdvBkrLa9ZPz1FFPp6AiFtLGK5e
-KW6uyV1xc9BSjujmpmPBkNIynKNpCFxtTn0uH2doMAr5kkuqIV726SfUZISNkyOa
-pHKtnAtsWHmdv9skzQIBAgAzcXTBGbdDxRj6QR+ohqbsCzfu3z9QHSbXUmxezti9
-bQqpsU1SIg8z2oDARFR6KlRzhnfpPvan+Gp9TvYsvxrXe61HpxRMdLj6Gt2Ibruf
-YHCtr1S9J5CzTTOurlIKpACUYIqgVXfgIkQzqiYX8e56PiDTUB++OqEg66i0orXB
-nbHAD2vu16CNvcaNqsak3DWkHMwmEfsuxqyUXNte0eYu9SCHtnNoYT/D7A72gK4b
-Gqg80J8ZCpo1ilIX3xUq8WsH+CoXs0X7hy6Cbi22AqnHFRYmrgoIWmRzJonp393b
-yqmTV+QsKQRpmwdX4hiH78zJLnLEUQMn8CuHAGwaJCzk4okIAMKNrIQZhkdbCCe4
-IrLuMKn4aQj3c22SMXNmu78/0cP9Rtsm3ChjzzelLO7NjvPm0nIvEcThFSIZIXCv
-iWGZCXFCKn3WtA5xWuMFNXsEQcc3AG/qRODdDSeFpo+VH/9IwppAc3zI2jxe1PRD
-G2DnheLaLIKgHunsCYxpftJDod/vRqRHeU7ulMVJfEKVxdzrCbKGiIOXSyS6KowQ
-JOxF/80ocq/25Zc/oH25Y2r/0y+xzDpOHBgU0ndrCZf2z8oOuECJTxcq83UDyJzT
-HrG/hTrU83YsQMZ0AwBrYxpzUfdH7b6y60VE19FrwmMDK6Fz8I/x4Ai0sNkI3QLR
-NntY9fsIANrB3QM8CtsdxXsFvdTEwNLsG8LMdn3loCH6Cq3ejkEKa69Uua+sB6ND
-wYOXWzyksLZJyfxIXux/hMlK/kO3ohGcEFiMUaDZndJy8IKUlDrhwcUZqm7dXMDU
-CIf0T3rOEzOXbNu3UTds3j/ruSvA5KmjzOa4Qnb41CyL5Fh7x0R8Rux3NzAn6Ecx
-Y+nAWRtI/Yz7zdL8zuHaJfbVuxAPJ+ImcXAS7cX6T9dM3tWRlam1+0Ezhdb4F8i5
-lcY7sMu95scDwhV7qOmln6wtGSkBPZgE0+TqRuELZrPvlcIRRIM42UwPWhYO2PG8
-kKd2i5teweDnhzN8+E87VV2BQhP9DA8H/0+ZiXsvaG60JGqNmWzVbB6U1qgwrFOR
-VcuzIWpdZyQR8Ok63GXuA0odoqReolba9R6fVlXchj6INBz2WY2F0twwCRPx7tRg
-Pyq4PaTA8ZYYjAVWVCd9k97gY2i80p4MPzQCnE8g4n6OWGY47pcTwSkm4HBoGoam
-igIRn3Soz7CXGF+PvSGi1T0jpwM5IWfM3IwEUPdPTIJuA2iD/9zSKDvhsP+trJ1Y
-TMe9CW3Llf5mFbHLRZ7LfMOLIngKOIxBAxHiT8wUrIRaH78wHdz8ALDsC+LNP6rK
-hKb8h/VHXaqmf0BlNjGpO7XZXfxXWJ0oTUG5Z+jKz2Ir14HYLZI1GlOA8bQlZXhh
-bXBsZS5jb20gPHNhbHQtbWFzdGVyQGV4YW1wbGUuY29tPokCTgQTAQgAOBYhBLaR
-Vrvqyq56MiGjUvXLKtw2FZsDBQJcgUXFAhsvBQsJCAcCBhUKCQgLAgQWAgMBAh4B
-AheAAAoJEPXLKtw2FZsDpi4P/1kmvlpkbOhrL73zAPyMzYa4Yo2Pi/BoMbyEKNKO
-K3wLCdP6xLGecVIt8pANosksDSGlWAnWj36/jfgt/aZisx1u6MTYaOEHkXahxOX4
-ghDW1cTbdtz7Uy5Ah9O3WNI+ejmOpCtuc3P/XOkdttKZLuCNCs6ocgCsejpNHcFK
-vMhOhnRKV8kcBrG2QLyfSyafBtM/zV+NR4Wrng71Za8fiXHlDanmrAIyuSnD538r
-hTwSFe0C9HntwuF6W+UShN7c+jPJaKQjKbZy9fuFp33NcTSPCB5dH9yrhQvOeFQo
-dFzEabMDFVGPfUVWR+TH39dWYOsq5zFmgQAbOB/vHdmEtrYNrxX0AiCZZHQHTUb9
-oBK68V8eVeFdoRLcMORBZ2RCqkQTOQoAF7o772knltjtsymnI0XNvVC/XCnZv89Q
-/eoivrd/rMMpTFOGcys6EAnSUWx0ZG/JCkezQqnx9U219BvqKNOZ60aOeOYHKpsX
-Ha8Nr72YRmtm0UMsDjEUyLOj+o06XnN7uafMv2bZpjWh2hfOrkAbxe41z6t+78ho
-P+C5vSvp01OmAt71iq+62MXVcLVKEWDpiuZSj8m83RlY5AGIaPaGX9LKPcHdGxKw
-QSczgB/jI3G08vWaq82he6UJuYexbYe1iJXfvcx8kThwZ1nXQJm+7UsISUsh8/NZ
-x0n/
-=uxDD
------END PGP PRIVATE KEY BLOCK-----
diff --git a/tcp_tests/templates/cookied-cicd-k8s-calico-sl/environment-context-k8s-sl.yaml b/tcp_tests/templates/cookied-cicd-k8s-calico-sl/environment-context-k8s-sl.yaml
deleted file mode 100644
index 4abe271..0000000
--- a/tcp_tests/templates/cookied-cicd-k8s-calico-sl/environment-context-k8s-sl.yaml
+++ /dev/null
@@ -1,258 +0,0 @@
-nodes:
-    cfg01:
-      reclass_storage_name: infra_config_node01
-      roles:
-      - infra_config
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_static_ctl
-        ens5:
-          role: single_storage_dhcp
-
-    kvm01:
-      reclass_storage_name: infra_kvm_node01
-      roles:
-      - infra_kvm
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl
-
-    kvm02:
-      reclass_storage_name: infra_kvm_node02
-      roles:
-      - infra_kvm
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl
-
-    kvm03:
-      reclass_storage_name: infra_kvm_node03
-      roles:
-      - infra_kvm
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl
-
-    cid01:
-      reclass_storage_name: cicd_control_node01
-      roles:
-      - cicd_control_leader
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl
-
-    cid02:
-      reclass_storage_name: cicd_control_node02
-      roles:
-      - cicd_control_manager
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl
-
-    cid03:
-      reclass_storage_name: cicd_control_node03
-      roles:
-      - cicd_control_manager
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl
-
-    ctl01:
-      reclass_storage_name: kubernetes_control_node01
-      roles:
-      - kubernetes_control
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl_calico
-        ens5:
-          role: single_storage_dhcp
-
-    ctl02:
-      reclass_storage_name: kubernetes_control_node02
-      roles:
-      - kubernetes_control
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl_calico
-        ens5:
-          role: single_storage_dhcp
-
-    ctl03:
-      reclass_storage_name: kubernetes_control_node03
-      roles:
-      - kubernetes_control
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl_calico
-        ens5:
-          role: single_storage_dhcp
-
-    prx01:
-      reclass_storage_name: kubernetes_proxy_node01
-      roles:
-      - kubernetes_proxy
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl
-        ens5:
-          role: single_storage_dhcp
-
-    prx02:
-      reclass_storage_name: kubernetes_proxy_node02
-      roles:
-      - kubernetes_proxy
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl
-        ens5:
-          role: single_storage_dhcp
-
-    # Generator-based computes. For compatibility only
-    cmp<<count>>:
-      reclass_storage_name: kubernetes_compute_rack01
-      roles:
-      - kubernetes_compute
-      - linux_system_codename_xenial
-      - salt_master_host
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl_calico
-        ens5:
-          role: single_storage_dhcp
-
-    mon01:
-      reclass_storage_name: stacklight_server_node01
-      roles:
-      - stacklightv2_server_leader
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl
-
-    mon02:
-      reclass_storage_name: stacklight_server_node02
-      roles:
-      - stacklightv2_server
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl
-
-    mon03:
-      reclass_storage_name: stacklight_server_node03
-      roles:
-      - stacklightv2_server
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl
-
-    mtr01:
-      reclass_storage_name: stacklight_telemetry_node01
-      roles:
-      - stacklight_telemetry_leader
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl
-
-    mtr02:
-      reclass_storage_name: stacklight_telemetry_node02
-      roles:
-      - stacklight_telemetry
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl
-
-    mtr03:
-      reclass_storage_name: stacklight_telemetry_node03
-      roles:
-      - stacklight_telemetry
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl
-
-    log01:
-      reclass_storage_name: stacklight_log_node01
-      roles:
-      - stacklight_log_leader_v2
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl
-
-    log02:
-      reclass_storage_name: stacklight_log_node02
-      roles:
-      - stacklight_log
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl
-
-    log03:
-      reclass_storage_name: stacklight_log_node03
-      roles:
-      - stacklight_log
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl
diff --git a/tcp_tests/templates/cookied-cicd-k8s-calico-sl/salt.yaml b/tcp_tests/templates/cookied-cicd-k8s-calico-sl/salt.yaml
deleted file mode 100644
index a7fefa8..0000000
--- a/tcp_tests/templates/cookied-cicd-k8s-calico-sl/salt.yaml
+++ /dev/null
@@ -1,24 +0,0 @@
-{% from 'cookied-cicd-k8s-calico-sl/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-cicd-k8s-calico-sl/underlay.yaml' import LAB_CONFIG_NAME with context %}
-{% from 'cookied-cicd-k8s-calico-sl/underlay.yaml' import DOMAIN_NAME with context %}
-
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
-# Other salt model repository parameters see in shared-salt.yaml
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
-
-- description: "Share custom key from cfg to give each node acces with key from cfg01"
-  cmd: |
-    set -e;
-    set -x;
-    key=$(ssh-keygen -y -f /root/.ssh/id_rsa);
-    salt '*' cmd.run "echo $key >> /root/.ssh/authorized_keys";
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: true
diff --git a/tcp_tests/templates/cookied-cicd-k8s-calico-sl/underlay--meta-data.yaml b/tcp_tests/templates/cookied-cicd-k8s-calico-sl/underlay--meta-data.yaml
deleted file mode 100644
index 3699401..0000000
--- a/tcp_tests/templates/cookied-cicd-k8s-calico-sl/underlay--meta-data.yaml
+++ /dev/null
@@ -1,4 +0,0 @@
-| # All the data below will be stored as a string object
-  instance-id: iid-local1
-  hostname: {hostname}
-  local-hostname: {hostname}
diff --git a/tcp_tests/templates/cookied-cicd-k8s-calico-sl/underlay--user-data1604-swp.yaml b/tcp_tests/templates/cookied-cicd-k8s-calico-sl/underlay--user-data1604-swp.yaml
deleted file mode 100644
index 81936a4..0000000
--- a/tcp_tests/templates/cookied-cicd-k8s-calico-sl/underlay--user-data1604-swp.yaml
+++ /dev/null
@@ -1,84 +0,0 @@
-| # All the data below will be stored as a string object
-  #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
-  ssh_pwauth: True
-  users:
-   - name: root
-     sudo: ALL=(ALL) NOPASSWD:ALL
-     shell: /bin/bash
-     ssh_authorized_keys:
-     {% for key in config.underlay.ssh_keys %}
-      - ssh-rsa {{ key['public'] }}
-     {% endfor %}
-
-  disable_root: false
-  chpasswd:
-   list: |
-    root:r00tme
-   expire: False
-
-  bootcmd:
-   # Enable root access
-   - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
-   - service sshd restart
-  output:
-    all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
-  runcmd:
-   - if lvs vg0; then pvresize /dev/vda3; fi
-   - if lvs vg0; then /usr/bin/growlvm.py --image-layout-file /usr/share/growlvm/image-layout.yml; fi
-
-   - export TERM=linux
-   - export LANG=C
-   # Configure dhclient
-   - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
-   - sudo resolvconf -u
-
-   # Enable grub menu using updated config below
-   - update-grub
-
-   # Prepare network connection
-   - sudo ifup ens3
-   #- sudo route add default gw {gateway} {interface_name}
-
-   # Create swap
-   - fallocate -l 16G /swapfile
-   - chmod 600 /swapfile
-   - mkswap /swapfile
-   - swapon /swapfile
-   - echo "/swapfile   none    swap    defaults   0   0" >> /etc/fstab
-
-  write_files:
-   - path: /etc/default/grub.d/97-enable-grub-menu.cfg
-     content: |
-         GRUB_RECORDFAIL_TIMEOUT=30
-         GRUB_TIMEOUT=3
-         GRUB_TIMEOUT_STYLE=menu
-
-   - path: /etc/network/interfaces
-     content: |
-          auto ens3
-          iface ens3 inet dhcp
-
-   - path: /usr/share/growlvm/image-layout.yml
-     content: |
-       root:
-         size: '30%VG'
-       home:
-         size: '1G'
-       var_log:
-         size: '11%VG'
-       var_log_audit:
-         size: '5G'
-       var_tmp:
-         size: '11%VG'
-       tmp:
-         size: '5G'
-     owner: root:root
-
-  growpart:
-    mode: auto
-    devices:
-      - '/'
-      - '/dev/vda3'
-    ignore_growroot_disabled: false
diff --git a/tcp_tests/templates/cookied-cicd-k8s-calico-sl/underlay--user-data1604.yaml b/tcp_tests/templates/cookied-cicd-k8s-calico-sl/underlay--user-data1604.yaml
deleted file mode 100644
index 6451e34..0000000
--- a/tcp_tests/templates/cookied-cicd-k8s-calico-sl/underlay--user-data1604.yaml
+++ /dev/null
@@ -1,84 +0,0 @@
-| # All the data below will be stored as a string object
-  #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
-  ssh_pwauth: True
-  users:
-   - name: root
-     sudo: ALL=(ALL) NOPASSWD:ALL
-     shell: /bin/bash
-     ssh_authorized_keys:
-     {% for key in config.underlay.ssh_keys %}
-      - ssh-rsa {{ key['public'] }}
-     {% endfor %}
-
-  disable_root: false
-  chpasswd:
-   list: |
-    root:r00tme
-   expire: False
-
-  bootcmd:
-   # Enable root access
-   - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
-   - service sshd restart
-  output:
-    all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
-  runcmd:
-   - if lvs vg0; then pvresize /dev/vda3; fi
-   - if lvs vg0; then /usr/bin/growlvm.py --image-layout-file /usr/share/growlvm/image-layout.yml; fi
-
-   - export TERM=linux
-   - export LANG=C
-   # Configure dhclient
-   - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
-   - sudo resolvconf -u
-
-   # Enable grub menu using updated config below
-   - update-grub
-
-   # Prepare network connection
-   - sudo ifup ens3
-   #- sudo route add default gw {gateway} {interface_name}
-
-   # Create swap
-   #- fallocate -l 16G /swapfile
-   #- chmod 600 /swapfile
-   #- mkswap /swapfile
-   #- swapon /swapfile
-   #- echo "/swapfile   none    swap    defaults   0   0" >> /etc/fstab
-
-  write_files:
-   - path: /etc/default/grub.d/97-enable-grub-menu.cfg
-     content: |
-         GRUB_RECORDFAIL_TIMEOUT=30
-         GRUB_TIMEOUT=3
-         GRUB_TIMEOUT_STYLE=menu
-
-   - path: /etc/network/interfaces
-     content: |
-          auto ens3
-          iface ens3 inet dhcp
-
-   - path: /usr/share/growlvm/image-layout.yml
-     content: |
-       root:
-         size: '30%VG'
-       home:
-         size: '1G'
-       var_log:
-         size: '11%VG'
-       var_log_audit:
-         size: '5G'
-       var_tmp:
-         size: '11%VG'
-       tmp:
-         size: '5G'
-     owner: root:root
-
-  growpart:
-    mode: auto
-    devices:
-      - '/'
-      - '/dev/vda3'
-    ignore_growroot_disabled: false
diff --git a/tcp_tests/templates/cookied-cicd-k8s-calico-sl/underlay.yaml b/tcp_tests/templates/cookied-cicd-k8s-calico-sl/underlay.yaml
deleted file mode 100644
index 81d4cb8..0000000
--- a/tcp_tests/templates/cookied-cicd-k8s-calico-sl/underlay.yaml
+++ /dev/null
@@ -1,877 +0,0 @@
-# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
-{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
-
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-cicd-k8s-calico-sl') %}
-{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME + '.local') %}
-{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01') %}
-{% set HOSTNAME_CID01 = os_env('HOSTNAME_CID01', 'cid01') %}
-{% set HOSTNAME_CID02 = os_env('HOSTNAME_CID02', 'cid02') %}
-{% set HOSTNAME_CID03 = os_env('HOSTNAME_CID03', 'cid03') %}
-
-{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01') %}
-{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02') %}
-{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03') %}
-{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp001') %}
-{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp002') %}
-{% set HOSTNAME_CMP03 = os_env('HOSTNAME_CMP03', 'cmp003') %}
-{% set HOSTNAME_CMP04 = os_env('HOSTNAME_CMP04', 'cmp004') %}
-{% set HOSTNAME_LOG01 = os_env('HOSTNAME_LOG01', 'log01') %}
-{% set HOSTNAME_LOG02 = os_env('HOSTNAME_LOG02', 'log02') %}
-{% set HOSTNAME_LOG03 = os_env('HOSTNAME_LOG03', 'log03') %}
-{% set HOSTNAME_MTR01 = os_env('HOSTNAME_MTR01', 'mtr01') %}
-{% set HOSTNAME_MTR02 = os_env('HOSTNAME_MTR02', 'mtr02') %}
-{% set HOSTNAME_MTR03 = os_env('HOSTNAME_MTR03', 'mtr03') %}
-{% set HOSTNAME_MON01 = os_env('HOSTNAME_MON01', 'mon01') %}
-{% set HOSTNAME_MON02 = os_env('HOSTNAME_MON02', 'mon02') %}
-{% set HOSTNAME_MON03 = os_env('HOSTNAME_MON03', 'mon03') %}
-{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01') %}
-{% set HOSTNAME_PRX02 = os_env('HOSTNAME_PRX02', 'prx02') %}
-{% set HOSTNAME_KVM01 = os_env('HOSTNAME_KVM01', 'kvm01') %}
-{% set HOSTNAME_KVM02 = os_env('HOSTNAME_KVM02', 'kvm02') %}
-{% set HOSTNAME_KVM03 = os_env('HOSTNAME_KVM03', 'kvm03') %}
-
-{% import 'cookied-cicd-k8s-calico-sl/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
-{% import 'cookied-cicd-k8s-calico-sl/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
-{% import 'cookied-cicd-k8s-calico-sl/underlay--user-data1604-swp.yaml' as CLOUDINIT_USER_DATA_1604_SWP with context %}
-
----
-aliases:
- - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
- - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
- - &cloudinit_user_data_1604 {{ CLOUDINIT_USER_DATA_1604 }}
- - &cloudinit_user_data_1604_swp {{ CLOUDINIT_USER_DATA_1604_SWP }}
-
-template:
-  devops_settings:
-    env_name: {{ os_env('ENV_NAME', LAB_CONFIG_NAME + '_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
-
-    address_pools:
-      private-pool01:
-        net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.60.0.0/16:24') }}
-        params:
-          ip_reserved:
-            gateway: +1
-            l2_network_device: +1
-            default_{{ HOSTNAME_CFG01 }}: +15
-
-            default_{{ HOSTNAME_CID }}: +90
-            default_{{ HOSTNAME_CID01 }}: +91
-            default_{{ HOSTNAME_CID02 }}: +92
-            default_{{ HOSTNAME_CID03 }}: +93
-            default_{{ HOSTNAME_PRX01 }}: +221
-            default_{{ HOSTNAME_PRX02 }}: +222
-
-          ip_ranges:
-            dhcp: [+90, -10]
-
-      admin-pool01:
-        net: {{ os_env('ADMIN_ADDRESS_POOL01', '10.70.0.0/16:24') }}
-        params:
-          ip_reserved:
-            gateway: +1
-            l2_network_device: +1
-            default_{{ HOSTNAME_CFG01 }}: +15
-
-            default_{{ HOSTNAME_CID }}: +90
-            default_{{ HOSTNAME_CID01 }}: +91
-            default_{{ HOSTNAME_CID02 }}: +92
-            default_{{ HOSTNAME_CID03 }}: +93
-
-            default_{{ HOSTNAME_CTL }}: +10
-            default_{{ HOSTNAME_CTL01 }}: +11
-            default_{{ HOSTNAME_CTL02 }}: +12
-            default_{{ HOSTNAME_CTL03 }}: +13
-            default_{{ HOSTNAME_CMP01 }}: +101
-            default_{{ HOSTNAME_CMP02 }}: +102
-            default_{{ HOSTNAME_CMP03 }}: +103
-            default_{{ HOSTNAME_CMP04 }}: +104
-            default_{{ HOSTNAME_LOG }}: +60
-            default_{{ HOSTNAME_LOG01 }}: +61
-            default_{{ HOSTNAME_LOG02 }}: +62
-            default_{{ HOSTNAME_LOG03 }}: +63
-            default_{{ HOSTNAME_MON }}: +70
-            default_{{ HOSTNAME_MON01 }}: +71
-            default_{{ HOSTNAME_MON02 }}: +72
-            default_{{ HOSTNAME_MON03 }}: +73
-            default_{{ HOSTNAME_MTR }}: +85
-            default_{{ HOSTNAME_MTR01 }}: +86
-            default_{{ HOSTNAME_MTR02 }}: +87
-            default_{{ HOSTNAME_MTR03 }}: +88
-            default_{{ HOSTNAME_PRX01 }}: +221
-            default_{{ HOSTNAME_PRX02 }}: +222
-            default_{{ HOSTNAME_KVM }}: +240
-            default_{{ HOSTNAME_KVM01 }}: +241
-            default_{{ HOSTNAME_KVM02 }}: +242
-            default_{{ HOSTNAME_KVM03 }}: +243
-
-          ip_ranges:
-            dhcp: [+90, -10]
-
-      tenant-pool01:
-        net: {{ os_env('TENANT_ADDRESS_POOL01', '10.80.0.0/16:24') }}
-        params:
-          ip_reserved:
-            gateway: +1
-            l2_network_device: +1
-            default_{{ HOSTNAME_CFG01 }}: +15
-
-            default_{{ HOSTNAME_CID }}: +90
-            default_{{ HOSTNAME_CID01 }}: +91
-            default_{{ HOSTNAME_CID02 }}: +92
-            default_{{ HOSTNAME_CID03 }}: +93
-
-          ip_ranges:
-            dhcp: [+10, -10]
-
-      external-pool01:
-        net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '10.90.0.0/16:24') }}
-        params:
-          ip_reserved:
-            gateway: +1
-            l2_network_device: +1
-            default_{{ HOSTNAME_CFG01 }}: +15
-
-            default_{{ HOSTNAME_CID }}: +90
-            default_{{ HOSTNAME_CID01 }}: +91
-            default_{{ HOSTNAME_CID02 }}: +92
-            default_{{ HOSTNAME_CID03 }}: +93
-            default_{{ HOSTNAME_CTL01 }}: +11
-            default_{{ HOSTNAME_CTL02 }}: +12
-            default_{{ HOSTNAME_CTL03 }}: +13
-            default_{{ HOSTNAME_CMP01 }}: +101
-            default_{{ HOSTNAME_CMP02 }}: +102
-            default_{{ HOSTNAME_CMP03 }}: +103
-            default_{{ HOSTNAME_CMP04 }}: +104
-            default_{{ HOSTNAME_PRX01 }}: +221
-            default_{{ HOSTNAME_PRX02 }}: +222
-
-          ip_ranges:
-            dhcp: [+10, -10]
-
-
-    groups:
-      - name: default
-        driver:
-          name: devops.driver.libvirt
-          params:
-            connection_string: {{ os_env('CONNECTION_STRING', 'qemu:///system') }}
-            storage_pool_name: {{ os_env('STORAGE_POOL_NAME', 'default') }}
-            stp: False
-            hpet: False
-            enable_acpi: true
-            use_host_cpu: {{ os_env('DRIVER_USE_HOST_CPU', true) }}
-            use_hugepages: {{ os_env('DRIVER_USE_HUGEPAGES', false) }}
-
-        network_pools:
-          admin: admin-pool01
-          private: private-pool01
-          tenant: tenant-pool01
-          external: external-pool01
-
-        l2_network_devices:
-          admin:
-            address_pool: admin-pool01
-            dhcp: true
-            forward:
-              mode: nat
-
-          private:
-            address_pool: private-pool01
-            dhcp: false
-            forward:
-              mode: route
-
-          external:
-            address_pool: external-pool01
-            dhcp: true
-            forward:
-              mode: nat
-
-
-        group_volumes:
-         - name: cfg01_day01_image               # Pre-configured day01 image
-           source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01') }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
-           format: qcow2
-         - name: mcp_ubuntu_1604_image           # Pre-configured image for VCP nodes initially based on kvm nodes.
-           # http://images.mirantis.com/ubuntu-16-04-x64-latest.qcow2  (preffered)
-           # or
-           # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
-           source_image: {{ os_env('MCP_IMAGE_PATH1604') }}
-           format: qcow2
-
-        nodes:
-          - name: {{ HOSTNAME_CFG01 }}
-            role: salt_master
-            params:
-              vcpu: {{ os_env('CFG_NODE_CPU', 4) }}
-              memory: {{ os_env('CFG_NODE_MEMORY', 12288) }}
-              boot:
-                - hd
-              volumes:
-                - name: system
-                  capacity: {{ os_env('CFG_NODE_VOLUME_SIZE', 150) }}
-                  backing_store: cfg01_day01_image
-                  format: qcow2
-                - name: config
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  # source_image: !os_env CFG01_CONFIG_PATH # no source image required.
-                                                            # it will be uploaded after config drive generation
-              interfaces: &all_interfaces
-                - label: ens3
-                  l2_network_device: admin
-                  interface_model: *interface_model
-                - label: ens4
-                  l2_network_device: private
-                  interface_model: *interface_model
-                - label: ens5
-                  l2_network_device: external
-                  interface_model: *interface_model
-              network_config: &all_network_config
-                ens3:
-                  networks:
-                    - admin
-                ens4:
-                  networks:
-                    - private
-                ens5:
-                  networks:
-                    - external
-
-          - name: {{ HOSTNAME_KVM01 }}
-            role: salt_minion
-            params:
-              vcpu: {{ os_env('KVM_NODE_CPU', 1) }}
-              memory: {{ os_env('KVM_NODE_MEMORY', 1024) }}
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: {{ os_env('KVM_NODE_VOLUME_SIZE', 150) }}
-                  backing_store: mcp_ubuntu_1604_image
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604_swp
-
-              interfaces: &interfaces
-                - label: ens3
-                  l2_network_device: admin
-                  interface_model: *interface_model
-                - label: ens4
-                  l2_network_device: private
-                  interface_model: *interface_model
-              network_config: &network_config
-                ens3:
-                  networks:
-                    - admin
-                ens4:
-                  networks:
-                    - private
-
-          - name: {{ HOSTNAME_KVM02 }}
-            role: salt_minion
-            params:
-              vcpu: {{ os_env('KVM_NODE_CPU', 1) }}
-              memory: {{ os_env('KVM_NODE_MEMORY', 1024) }}
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: {{ os_env('KVM_NODE_VOLUME_SIZE', 150) }}
-                  backing_store: mcp_ubuntu_1604_image
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604_swp
-
-              interfaces: *interfaces
-              network_config: *network_config
-
-          - name: {{ HOSTNAME_KVM03 }}
-            role: salt_minion
-            params:
-              vcpu: {{ os_env('KVM_NODE_CPU', 1) }}
-              memory: {{ os_env('KVM_NODE_MEMORY', 1024) }}
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: {{ os_env('KVM_NODE_VOLUME_SIZE', 150) }}
-                  backing_store: mcp_ubuntu_1604_image
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604_swp
-
-              interfaces: *interfaces
-              network_config: *network_config
-
-          - name: {{ HOSTNAME_CID01 }}
-            role: salt_minion
-            params:
-              vcpu: {{ os_env('CID_NODE_CPU', 3) }}
-              memory: {{ os_env('CID_NODE_MEMORY', 8192) }}
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: {{ os_env('CID_NODE_VOLUME_SIZE', 150) }}
-                  backing_store: mcp_ubuntu_1604_image
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604_swp
-
-              interfaces: *interfaces
-              network_config: *network_config
-
-          - name: {{ HOSTNAME_CID02 }}
-            role: salt_minion
-            params:
-              vcpu: {{ os_env('CID_NODE_CPU', 3) }}
-              memory: {{ os_env('CID_NODE_MEMORY', 8192) }}
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: {{ os_env('CID_NODE_VOLUME_SIZE', 150) }}
-                  backing_store: mcp_ubuntu_1604_image
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604_swp
-
-              interfaces: *interfaces
-              network_config: *network_config
-
-          - name: {{ HOSTNAME_CID03 }}
-            role: salt_minion
-            params:
-              vcpu: {{ os_env('CID_NODE_CPU', 3) }}
-              memory: {{ os_env('CID_NODE_MEMORY', 8192) }}
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: {{ os_env('CID_NODE_VOLUME_SIZE', 150) }}
-                  backing_store: mcp_ubuntu_1604_image
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604_swp
-
-              interfaces: *interfaces
-              network_config: *network_config
-
-          - name: {{ HOSTNAME_CTL01 }}
-            role: k8s_controller
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 8192
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: mcp_ubuntu_1604_image
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: *all_interfaces
-              network_config: *all_network_config
-
-          - name: {{ HOSTNAME_CTL02 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 8192
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: mcp_ubuntu_1604_image
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: *all_interfaces
-              network_config: *all_network_config
-
-          - name: {{ HOSTNAME_CTL03 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 8192
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: mcp_ubuntu_1604_image
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: *all_interfaces
-              network_config: *all_network_config
-
-          - name: {{ HOSTNAME_CMP01 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: mcp_ubuntu_1604_image
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: *all_interfaces
-              network_config: *all_network_config
-
-          - name: {{ HOSTNAME_CMP02 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: mcp_ubuntu_1604_image
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: *all_interfaces
-              network_config: *all_network_config
-
-          - name: {{ HOSTNAME_CMP03 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: mcp_ubuntu_1604_image
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: *all_interfaces
-              network_config: *all_network_config
-
-          - name: {{ HOSTNAME_CMP04 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: mcp_ubuntu_1604_image
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: *all_interfaces
-              network_config: *all_network_config
-
-          - name: {{ HOSTNAME_MON01 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: mcp_ubuntu_1604_image
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604_swp
-
-              interfaces: *interfaces
-              network_config: *network_config
-
-          - name: {{ HOSTNAME_MON02 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: mcp_ubuntu_1604_image
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604_swp
-
-              interfaces: *interfaces
-              network_config: *network_config
-
-          - name: {{ HOSTNAME_MON03 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: mcp_ubuntu_1604_image
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604_swp
-
-              interfaces: *interfaces
-              network_config: *network_config
-
-          - name: {{ HOSTNAME_LOG01 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: mcp_ubuntu_1604_image
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604_swp
-
-              interfaces: *interfaces
-              network_config: *network_config
-
-          - name: {{ HOSTNAME_LOG02 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: mcp_ubuntu_1604_image
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604_swp
-
-              interfaces: *interfaces
-              network_config: *network_config
-
-          - name: {{ HOSTNAME_LOG03 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: mcp_ubuntu_1604_image
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604_swp
-
-              interfaces: *interfaces
-              network_config: *network_config
-
-          - name: {{ HOSTNAME_MTR01 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: mcp_ubuntu_1604_image
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604_swp
-
-              interfaces: *interfaces
-              network_config: *network_config
-
-          - name: {{ HOSTNAME_MTR02 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: mcp_ubuntu_1604_image
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604_swp
-
-              interfaces: *interfaces
-              network_config: *network_config
-
-          - name: {{ HOSTNAME_MTR03 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: mcp_ubuntu_1604_image
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604_swp
-
-              interfaces: *interfaces
-              network_config: *network_config
-
-          - name: {{ HOSTNAME_PRX01 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 1
-              memory: !os_env SLAVE_NODE_MEMORY, 2048
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: mcp_ubuntu_1604_image
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604_swp
-
-              interfaces: *all_interfaces
-              network_config: *all_network_config
-
-          - name: {{ HOSTNAME_PRX02 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 1
-              memory: !os_env SLAVE_NODE_MEMORY, 2048
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: mcp_ubuntu_1604_image
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604_swp
-
-              interfaces: *all_interfaces
-              network_config: *all_network_config
diff --git a/tcp_tests/templates/cookied-cicd-k8s-genie/cookiecutter-context-k8s-genie.yaml b/tcp_tests/templates/cookied-cicd-k8s-genie/cookiecutter-context-k8s-genie.yaml
deleted file mode 100644
index 8c68d15..0000000
--- a/tcp_tests/templates/cookied-cicd-k8s-genie/cookiecutter-context-k8s-genie.yaml
+++ /dev/null
@@ -1,128 +0,0 @@
-default_context:
-  auditd_enabled: 'False'
-  bmk_enabled: 'False'
-  calico_cni_image: docker-prod-local.artifactory.mirantis.com/mirantis/projectcalico/calico/cni:latest
-  calico_enable_nat: 'True'
-  calico_image: docker-prod-local.artifactory.mirantis.com/mirantis/projectcalico/calico/node:latest
-  calico_netmask: '16'
-  calico_network: 192.168.0.0
-  calicoctl_image: docker-prod-local.artifactory.mirantis.com/mirantis/projectcalico/calico/ctl:latest
-  ceph_enabled: 'False'
-  cicd_control_node01_address: 10.167.4.91
-  cicd_control_node01_hostname: cid01
-  cicd_control_node02_address: 10.167.4.92
-  cicd_control_node02_hostname: cid02
-  cicd_control_node03_address: 10.167.4.93
-  cicd_control_node03_hostname: cid03
-  cicd_control_vip_address: 10.167.4.90
-  cicd_control_vip_hostname: cid
-  cicd_enabled: 'True'
-  cluster_domain: cookied-cicd-k8s-genie.local
-  cluster_name: cookied-cicd-k8s-genie
-  context_seed: T3sbEdCaBfxrg9ysyA6LIaift250Ktb389rpcISKbdqPi5j0WHKiKAhBftYueBKl
-  control_network_netmask: 255.255.255.0
-  control_network_subnet: 10.167.4.0/24
-  control_vlan: '10'
-  cookiecutter_template_branch: ''
-  jenkins_pipelines_branch: 'release/2019.2.0'
-  cookiecutter_template_credentials: gerrit
-  cookiecutter_template_url: https://gerrit.mcp.mirantis.com/mk/cookiecutter-templates.git
-  deploy_network_gateway: 10.167.5.1
-  deploy_network_netmask: 255.255.255.0
-  deploy_network_subnet: 10.167.5.0/24
-  deployment_type: physical
-  dns_server01: 172.18.176.6
-  dns_server02: 172.18.208.44
-  email_address: ddmitriev@mirantis.com
-  etcd_ssl: 'True'
-  infra_bond_mode: active-backup
-  infra_deploy_nic: eth0
-  infra_kvm01_control_address: 10.167.4.241
-  infra_kvm01_deploy_address: 10.167.5.91
-  infra_kvm01_hostname: kvm01
-  infra_kvm02_control_address: 10.167.4.242
-  infra_kvm02_deploy_address: 10.167.5.92
-  infra_kvm02_hostname: kvm02
-  infra_kvm03_control_address: 10.167.4.243
-  infra_kvm03_deploy_address: 10.167.5.93
-  infra_kvm03_hostname: kvm03
-  infra_kvm_vip_address: 10.167.4.240
-  infra_primary_first_nic: eth1
-  infra_primary_second_nic: eth2
-  internal_proxy_enabled: 'False'
-  kqueen_custom_mail_enabled: 'False'
-  kqueen_enabled: 'False'
-  kubernetes_control_address: 10.167.4.10
-  kubernetes_control_node01_address: 10.167.4.11
-  kubernetes_control_node01_deploy_address: 10.167.5.11
-  kubernetes_control_node01_hostname: ctl01
-  kubernetes_control_node02_address: 10.167.4.12
-  kubernetes_control_node02_deploy_address: 10.167.5.12
-  kubernetes_control_node02_hostname: ctl02
-  kubernetes_control_node03_address: 10.167.4.13
-  kubernetes_control_node03_deploy_address: 10.167.5.13
-  kubernetes_control_node03_hostname: ctl03
-  kubernetes_compute_count: 4
-  kubernetes_compute_rack01_hostname: cmp
-  kubernetes_compute_deploy_address_ranges: 10.167.5.101-10.167.5.104
-  kubernetes_compute_single_address_ranges: 10.167.4.101-10.167.4.104
-  kubernetes_compute_tenant_address_ranges: 10.167.6.101-10.167.6.104
-  kubernetes_enabled: 'True'
-  kubernetes_externaldns_enabled: 'False'
-  kubernetes_keepalived_vip_interface: br_ctl
-  kubernetes_network_calico_enabled: 'True'
-  kubernetes_proxy_hostname: prx
-  kubernetes_proxy_node01_hostname: prx01
-  kubernetes_proxy_node02_hostname: prx02
-  kubernetes_proxy_address: 10.167.4.220
-  kubernetes_proxy_node01_address: 10.167.4.221
-  kubernetes_proxy_node02_address: 10.167.4.222
-  kubernetes_metallb_enabled: 'True'
-  metallb_addresses: 172.17.16.150-172.17.16.190
-  kubernetes_ingressnginx_enabled: 'True'
-  kubernetes_ingressnginx_controller_replicas: 2
-  local_repositories: 'False'
-  maas_deploy_address: 10.167.5.15
-  maas_deploy_range_end: 10.167.5.199
-  maas_deploy_range_start: 10.167.5.180
-  maas_deploy_vlan: '0'
-  maas_fabric_name: deploy-fabric0
-  maas_hostname: cfg01
-  mcp_common_scripts_branch: ''
-  mcp_version: proposed
-  offline_deployment: 'False'
-  opencontrail_enabled: 'False'
-  openldap_domain: ${_param:cluster_name}.local
-  openldap_enabled: 'True'
-  openldap_organisation: ${_param:cluster_name}
-  openssh_groups: cicd
-  openstack_enabled: 'False'
-  oss_enabled: 'False'
-  oss_node03_address: ${_param:stacklight_monitor_node03_address}
-  platform: kubernetes_enabled
-  public_host: ${_param:infra_config_address}
-  publication_method: email
-  reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
-  salt_api_password: LTlVnap35hqpRVbB5QjA27EuKh9Ttl3k
-  salt_api_password_hash: $6$RKagUPuQ$Javpjz7b.hqKOOr1rai7uGQd/FnqlOH59tXn12/0G.LkVyunYmgBkSC5zTjoqZvIS1fOOOqsmCb9Q4HcGUbXS.
-  salt_master_address: 10.167.4.15
-  salt_master_hostname: cfg01
-  salt_master_management_address: 10.167.5.15
-  shared_reclass_branch: 'proposed'
-  shared_reclass_url: https://gerrit.mcp.mirantis.com/salt-models/reclass-system.git
-  stacklight_enabled: 'False'
-  stacklight_version: '2'
-  static_ips_on_deploy_network_enabled: 'False'
-  tenant_network_gateway: 10.167.6.1
-  tenant_network_netmask: 255.255.255.0
-  tenant_network_subnet: 10.167.6.0/24
-  tenant_vlan: '20'
-  upstream_proxy_enabled: 'False'
-  use_default_network_scheme: 'False'
-  vnf_onboarding_enabled: 'False'
-  kubernetes_network_flannel_enabled: 'True'
-  flannel_network: 10.20.0.0/16
-  kubernetes_network_genie_enabled: 'True'
-  kubernetes_genie_default_plugin: 'calico'
-  kubernetes_virtlet_enabled: 'True'
-  kubernetes_helm_enabled: 'True'
diff --git a/tcp_tests/templates/cookied-cicd-k8s-genie/environment-context-k8s-genie.yaml b/tcp_tests/templates/cookied-cicd-k8s-genie/environment-context-k8s-genie.yaml
deleted file mode 100644
index 807d07f..0000000
--- a/tcp_tests/templates/cookied-cicd-k8s-genie/environment-context-k8s-genie.yaml
+++ /dev/null
@@ -1,159 +0,0 @@
-nodes:
-    cfg01:
-      reclass_storage_name: infra_config_node01
-      roles:
-      - infra_config
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_static_ctl
-        ens5:
-          role: single_storage_dhcp
-
-    kvm01:
-      reclass_storage_name: infra_kvm_node01
-      roles:
-      - infra_kvm
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl
-
-    kvm02:
-      reclass_storage_name: infra_kvm_node02
-      roles:
-      - infra_kvm
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl
-
-    kvm03:
-      reclass_storage_name: infra_kvm_node03
-      roles:
-      - infra_kvm
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl
-
-    cid01:
-      reclass_storage_name: cicd_control_node01
-      roles:
-      - cicd_control_leader
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl
-
-    cid02:
-      reclass_storage_name: cicd_control_node02
-      roles:
-      - cicd_control_manager
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl
-
-    cid03:
-      reclass_storage_name: cicd_control_node03
-      roles:
-      - cicd_control_manager
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl
-
-    ctl01:
-      reclass_storage_name: kubernetes_control_node01
-      roles:
-      - kubernetes_control
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl_calico
-        ens5:
-          role: single_storage_dhcp
-
-    ctl02:
-      reclass_storage_name: kubernetes_control_node02
-      roles:
-      - kubernetes_control
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl_calico
-        ens5:
-          role: single_storage_dhcp
-
-    ctl03:
-      reclass_storage_name: kubernetes_control_node03
-      roles:
-      - kubernetes_control
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl_calico
-        ens5:
-          role: single_storage_dhcp
-
-    prx01:
-      reclass_storage_name: kubernetes_proxy_node01
-      roles:
-      - kubernetes_proxy
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl
-        ens5:
-          role: single_storage_dhcp
-
-    prx02:
-      reclass_storage_name: kubernetes_proxy_node02
-      roles:
-      - kubernetes_proxy
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl
-        ens5:
-          role: single_storage_dhcp
-
-    # Generator-based computes. For compatibility only
-    cmp<<count>>:
-      reclass_storage_name: kubernetes_compute_rack01
-      roles:
-      - kubernetes_compute
-      - linux_system_codename_xenial
-      - salt_master_host
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl_calico
-        ens5:
-          role: single_storage_dhcp
diff --git a/tcp_tests/templates/cookied-cicd-k8s-genie/salt.yaml b/tcp_tests/templates/cookied-cicd-k8s-genie/salt.yaml
deleted file mode 100644
index 7294447..0000000
--- a/tcp_tests/templates/cookied-cicd-k8s-genie/salt.yaml
+++ /dev/null
@@ -1,24 +0,0 @@
-{% from 'cookied-cicd-k8s-genie/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-cicd-k8s-genie/underlay.yaml' import LAB_CONFIG_NAME with context %}
-{% from 'cookied-cicd-k8s-genie/underlay.yaml' import DOMAIN_NAME with context %}
-
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
-# Other salt model repository parameters see in shared-salt.yaml
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
-
-- description: "Share custom key from cfg to give each node acces with key from cfg01"
-  cmd: |
-    set -e;
-    set -x;
-    key=$(ssh-keygen -y -f /root/.ssh/id_rsa);
-    salt '*' cmd.run "echo $key >> /root/.ssh/authorized_keys";
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: true
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-cicd-k8s-genie/underlay--meta-data.yaml b/tcp_tests/templates/cookied-cicd-k8s-genie/underlay--meta-data.yaml
deleted file mode 100644
index 3699401..0000000
--- a/tcp_tests/templates/cookied-cicd-k8s-genie/underlay--meta-data.yaml
+++ /dev/null
@@ -1,4 +0,0 @@
-| # All the data below will be stored as a string object
-  instance-id: iid-local1
-  hostname: {hostname}
-  local-hostname: {hostname}
diff --git a/tcp_tests/templates/cookied-cicd-k8s-genie/underlay--user-data1604-swp.yaml b/tcp_tests/templates/cookied-cicd-k8s-genie/underlay--user-data1604-swp.yaml
deleted file mode 100644
index 81936a4..0000000
--- a/tcp_tests/templates/cookied-cicd-k8s-genie/underlay--user-data1604-swp.yaml
+++ /dev/null
@@ -1,84 +0,0 @@
-| # All the data below will be stored as a string object
-  #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
-  ssh_pwauth: True
-  users:
-   - name: root
-     sudo: ALL=(ALL) NOPASSWD:ALL
-     shell: /bin/bash
-     ssh_authorized_keys:
-     {% for key in config.underlay.ssh_keys %}
-      - ssh-rsa {{ key['public'] }}
-     {% endfor %}
-
-  disable_root: false
-  chpasswd:
-   list: |
-    root:r00tme
-   expire: False
-
-  bootcmd:
-   # Enable root access
-   - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
-   - service sshd restart
-  output:
-    all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
-  runcmd:
-   - if lvs vg0; then pvresize /dev/vda3; fi
-   - if lvs vg0; then /usr/bin/growlvm.py --image-layout-file /usr/share/growlvm/image-layout.yml; fi
-
-   - export TERM=linux
-   - export LANG=C
-   # Configure dhclient
-   - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
-   - sudo resolvconf -u
-
-   # Enable grub menu using updated config below
-   - update-grub
-
-   # Prepare network connection
-   - sudo ifup ens3
-   #- sudo route add default gw {gateway} {interface_name}
-
-   # Create swap
-   - fallocate -l 16G /swapfile
-   - chmod 600 /swapfile
-   - mkswap /swapfile
-   - swapon /swapfile
-   - echo "/swapfile   none    swap    defaults   0   0" >> /etc/fstab
-
-  write_files:
-   - path: /etc/default/grub.d/97-enable-grub-menu.cfg
-     content: |
-         GRUB_RECORDFAIL_TIMEOUT=30
-         GRUB_TIMEOUT=3
-         GRUB_TIMEOUT_STYLE=menu
-
-   - path: /etc/network/interfaces
-     content: |
-          auto ens3
-          iface ens3 inet dhcp
-
-   - path: /usr/share/growlvm/image-layout.yml
-     content: |
-       root:
-         size: '30%VG'
-       home:
-         size: '1G'
-       var_log:
-         size: '11%VG'
-       var_log_audit:
-         size: '5G'
-       var_tmp:
-         size: '11%VG'
-       tmp:
-         size: '5G'
-     owner: root:root
-
-  growpart:
-    mode: auto
-    devices:
-      - '/'
-      - '/dev/vda3'
-    ignore_growroot_disabled: false
diff --git a/tcp_tests/templates/cookied-cicd-k8s-genie/underlay--user-data1604.yaml b/tcp_tests/templates/cookied-cicd-k8s-genie/underlay--user-data1604.yaml
deleted file mode 100644
index 6451e34..0000000
--- a/tcp_tests/templates/cookied-cicd-k8s-genie/underlay--user-data1604.yaml
+++ /dev/null
@@ -1,84 +0,0 @@
-| # All the data below will be stored as a string object
-  #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
-  ssh_pwauth: True
-  users:
-   - name: root
-     sudo: ALL=(ALL) NOPASSWD:ALL
-     shell: /bin/bash
-     ssh_authorized_keys:
-     {% for key in config.underlay.ssh_keys %}
-      - ssh-rsa {{ key['public'] }}
-     {% endfor %}
-
-  disable_root: false
-  chpasswd:
-   list: |
-    root:r00tme
-   expire: False
-
-  bootcmd:
-   # Enable root access
-   - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
-   - service sshd restart
-  output:
-    all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
-  runcmd:
-   - if lvs vg0; then pvresize /dev/vda3; fi
-   - if lvs vg0; then /usr/bin/growlvm.py --image-layout-file /usr/share/growlvm/image-layout.yml; fi
-
-   - export TERM=linux
-   - export LANG=C
-   # Configure dhclient
-   - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
-   - sudo resolvconf -u
-
-   # Enable grub menu using updated config below
-   - update-grub
-
-   # Prepare network connection
-   - sudo ifup ens3
-   #- sudo route add default gw {gateway} {interface_name}
-
-   # Create swap
-   #- fallocate -l 16G /swapfile
-   #- chmod 600 /swapfile
-   #- mkswap /swapfile
-   #- swapon /swapfile
-   #- echo "/swapfile   none    swap    defaults   0   0" >> /etc/fstab
-
-  write_files:
-   - path: /etc/default/grub.d/97-enable-grub-menu.cfg
-     content: |
-         GRUB_RECORDFAIL_TIMEOUT=30
-         GRUB_TIMEOUT=3
-         GRUB_TIMEOUT_STYLE=menu
-
-   - path: /etc/network/interfaces
-     content: |
-          auto ens3
-          iface ens3 inet dhcp
-
-   - path: /usr/share/growlvm/image-layout.yml
-     content: |
-       root:
-         size: '30%VG'
-       home:
-         size: '1G'
-       var_log:
-         size: '11%VG'
-       var_log_audit:
-         size: '5G'
-       var_tmp:
-         size: '11%VG'
-       tmp:
-         size: '5G'
-     owner: root:root
-
-  growpart:
-    mode: auto
-    devices:
-      - '/'
-      - '/dev/vda3'
-    ignore_growroot_disabled: false
diff --git a/tcp_tests/templates/cookied-cicd-k8s-genie/underlay.yaml b/tcp_tests/templates/cookied-cicd-k8s-genie/underlay.yaml
deleted file mode 100644
index 1e1704a..0000000
--- a/tcp_tests/templates/cookied-cicd-k8s-genie/underlay.yaml
+++ /dev/null
@@ -1,622 +0,0 @@
-# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
-{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
-
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-cicd-k8s-genie') %}
-{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME + '.local') %}
-{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01') %}
-{% set HOSTNAME_CID01 = os_env('HOSTNAME_CID01', 'cid01') %}
-{% set HOSTNAME_CID02 = os_env('HOSTNAME_CID02', 'cid02') %}
-{% set HOSTNAME_CID03 = os_env('HOSTNAME_CID03', 'cid03') %}
-
-{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01') %}
-{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02') %}
-{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03') %}
-{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp001') %}
-{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp002') %}
-{% set HOSTNAME_CMP03 = os_env('HOSTNAME_CMP03', 'cmp003') %}
-{% set HOSTNAME_CMP04 = os_env('HOSTNAME_CMP04', 'cmp004') %}
-{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01') %}
-{% set HOSTNAME_PRX02 = os_env('HOSTNAME_PRX02', 'prx02') %}
-{% set HOSTNAME_KVM01 = os_env('HOSTNAME_KVM01', 'kvm01') %}
-{% set HOSTNAME_KVM02 = os_env('HOSTNAME_KVM02', 'kvm02') %}
-{% set HOSTNAME_KVM03 = os_env('HOSTNAME_KVM03', 'kvm03') %}
-
-{% import 'cookied-cicd-k8s-genie/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
-{% import 'cookied-cicd-k8s-genie/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
-{% import 'cookied-cicd-k8s-genie/underlay--user-data1604-swp.yaml' as CLOUDINIT_USER_DATA_1604_SWP with context %}
-
----
-aliases:
- - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
- - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
- - &cloudinit_user_data_1604 {{ CLOUDINIT_USER_DATA_1604 }}
- - &cloudinit_user_data_1604_swp {{ CLOUDINIT_USER_DATA_1604_SWP }}
-
-template:
-  devops_settings:
-    env_name: {{ os_env('ENV_NAME', LAB_CONFIG_NAME + '_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
-
-    address_pools:
-      private-pool01:
-        net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.60.0.0/16:24') }}
-        params:
-          ip_reserved:
-            gateway: +1
-            l2_network_device: +1
-            default_{{ HOSTNAME_CFG01 }}: +15
-
-            default_{{ HOSTNAME_CID }}: +90
-            default_{{ HOSTNAME_CID01 }}: +91
-            default_{{ HOSTNAME_CID02 }}: +92
-            default_{{ HOSTNAME_CID03 }}: +93
-            default_{{ HOSTNAME_PRX01 }}: +221
-            default_{{ HOSTNAME_PRX02 }}: +222
-
-          ip_ranges:
-            dhcp: [+90, -10]
-
-      admin-pool01:
-        net: {{ os_env('ADMIN_ADDRESS_POOL01', '10.70.0.0/16:24') }}
-        params:
-          ip_reserved:
-            gateway: +1
-            l2_network_device: +1
-            default_{{ HOSTNAME_CFG01 }}: +15
-
-            default_{{ HOSTNAME_CID }}: +90
-            default_{{ HOSTNAME_CID01 }}: +91
-            default_{{ HOSTNAME_CID02 }}: +92
-            default_{{ HOSTNAME_CID03 }}: +93
-
-            default_{{ HOSTNAME_CTL }}: +10
-            default_{{ HOSTNAME_CTL01 }}: +11
-            default_{{ HOSTNAME_CTL02 }}: +12
-            default_{{ HOSTNAME_CTL03 }}: +13
-            default_{{ HOSTNAME_CMP01 }}: +101
-            default_{{ HOSTNAME_CMP02 }}: +102
-            default_{{ HOSTNAME_CMP03 }}: +103
-            default_{{ HOSTNAME_CMP04 }}: +104
-            default_{{ HOSTNAME_PRX01 }}: +221
-            default_{{ HOSTNAME_PRX02 }}: +222
-            default_{{ HOSTNAME_KVM }}: +240
-            default_{{ HOSTNAME_KVM01 }}: +241
-            default_{{ HOSTNAME_KVM02 }}: +242
-            default_{{ HOSTNAME_KVM03 }}: +243
-
-          ip_ranges:
-            dhcp: [+90, -10]
-
-      tenant-pool01:
-        net: {{ os_env('TENANT_ADDRESS_POOL01', '10.80.0.0/16:24') }}
-        params:
-          ip_reserved:
-            gateway: +1
-            l2_network_device: +1
-            default_{{ HOSTNAME_CFG01 }}: +15
-
-            default_{{ HOSTNAME_CID }}: +90
-            default_{{ HOSTNAME_CID01 }}: +91
-            default_{{ HOSTNAME_CID02 }}: +92
-            default_{{ HOSTNAME_CID03 }}: +93
-
-          ip_ranges:
-            dhcp: [+10, -10]
-
-      external-pool01:
-        net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '10.90.0.0/16:24') }}
-        params:
-          ip_reserved:
-            gateway: +1
-            l2_network_device: +1
-            default_{{ HOSTNAME_CFG01 }}: +15
-
-            default_{{ HOSTNAME_CID }}: +90
-            default_{{ HOSTNAME_CID01 }}: +91
-            default_{{ HOSTNAME_CID02 }}: +92
-            default_{{ HOSTNAME_CID03 }}: +93
-            default_{{ HOSTNAME_CTL01 }}: +11
-            default_{{ HOSTNAME_CTL02 }}: +12
-            default_{{ HOSTNAME_CTL03 }}: +13
-            default_{{ HOSTNAME_CMP01 }}: +101
-            default_{{ HOSTNAME_CMP02 }}: +102
-            default_{{ HOSTNAME_CMP03 }}: +103
-            default_{{ HOSTNAME_CMP04 }}: +104
-            default_{{ HOSTNAME_PRX01 }}: +221
-            default_{{ HOSTNAME_PRX02 }}: +222
-
-          ip_ranges:
-            dhcp: [+10, -10]
-
-
-    groups:
-      - name: default
-        driver:
-          name: devops.driver.libvirt
-          params:
-            connection_string: {{ os_env('CONNECTION_STRING', 'qemu:///system') }}
-            storage_pool_name: {{ os_env('STORAGE_POOL_NAME', 'default') }}
-            stp: False
-            hpet: False
-            enable_acpi: true
-            use_host_cpu: {{ os_env('DRIVER_USE_HOST_CPU', true) }}
-            use_hugepages: {{ os_env('DRIVER_USE_HUGEPAGES', false) }}
-
-        network_pools:
-          admin: admin-pool01
-          private: private-pool01
-          tenant: tenant-pool01
-          external: external-pool01
-
-        l2_network_devices:
-          admin:
-            address_pool: admin-pool01
-            dhcp: true
-            forward:
-              mode: nat
-
-          private:
-            address_pool: private-pool01
-            dhcp: false
-            forward:
-              mode: route
-
-          external:
-            address_pool: external-pool01
-            dhcp: true
-            forward:
-              mode: nat
-
-
-        group_volumes:
-         - name: cfg01_day01_image               # Pre-configured day01 image
-           source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01') }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
-           format: qcow2
-         - name: mcp_ubuntu_1604_image           # Pre-configured image for VCP nodes initially based on kvm nodes.
-           # http://images.mirantis.com/ubuntu-16-04-x64-latest.qcow2  (preffered)
-           # or
-           # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
-           source_image: {{ os_env('MCP_IMAGE_PATH1604') }}
-           format: qcow2
-
-        nodes:
-          - name: {{ HOSTNAME_CFG01 }}
-            role: salt_master
-            params:
-              vcpu: {{ os_env('CFG_NODE_CPU', 3) }}
-              memory: {{ os_env('CFG_NODE_MEMORY', 12288) }}
-              boot:
-                - hd
-              volumes:
-                - name: system
-                  capacity: {{ os_env('CFG_NODE_VOLUME_SIZE', 150) }}
-                  backing_store: cfg01_day01_image
-                  format: qcow2
-                - name: config
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  # source_image: !os_env CFG01_CONFIG_PATH # no source image required.
-                                                            # it will be uploaded after config drive generation
-              interfaces: &all_interfaces
-                - label: ens3
-                  l2_network_device: admin
-                  interface_model: *interface_model
-                - label: ens4
-                  l2_network_device: private
-                  interface_model: *interface_model
-                - label: ens5
-                  l2_network_device: external
-                  interface_model: *interface_model
-              network_config: &all_network_config
-                ens3:
-                  networks:
-                    - admin
-                ens4:
-                  networks:
-                    - private
-                ens5:
-                  networks:
-                    - external
-
-          - name: {{ HOSTNAME_KVM01 }}
-            role: salt_minion
-            params:
-              vcpu: {{ os_env('KVM_NODE_CPU', 1) }}
-              memory: {{ os_env('KVM_NODE_MEMORY', 1024) }}
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: {{ os_env('KVM_NODE_VOLUME_SIZE', 150) }}
-                  backing_store: mcp_ubuntu_1604_image
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604_swp
-
-              interfaces: &interfaces
-                - label: ens3
-                  l2_network_device: admin
-                  interface_model: *interface_model
-                - label: ens4
-                  l2_network_device: private
-                  interface_model: *interface_model
-              network_config: &network_config
-                ens3:
-                  networks:
-                    - admin
-                ens4:
-                  networks:
-                    - private
-
-          - name: {{ HOSTNAME_KVM02 }}
-            role: salt_minion
-            params:
-              vcpu: {{ os_env('KVM_NODE_CPU', 1) }}
-              memory: {{ os_env('KVM_NODE_MEMORY', 1024) }}
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: {{ os_env('KVM_NODE_VOLUME_SIZE', 150) }}
-                  backing_store: mcp_ubuntu_1604_image
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604_swp
-
-              interfaces: *interfaces
-              network_config: *network_config
-
-          - name: {{ HOSTNAME_KVM03 }}
-            role: salt_minion
-            params:
-              vcpu: {{ os_env('KVM_NODE_CPU', 1) }}
-              memory: {{ os_env('KVM_NODE_MEMORY', 1024) }}
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: {{ os_env('KVM_NODE_VOLUME_SIZE', 150) }}
-                  backing_store: mcp_ubuntu_1604_image
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604_swp
-
-              interfaces: *interfaces
-              network_config: *network_config
-
-          - name: {{ HOSTNAME_CID01 }}
-            role: salt_minion
-            params:
-              vcpu: {{ os_env('CID_NODE_CPU', 3) }}
-              memory: {{ os_env('CID_NODE_MEMORY', 8192) }}
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: {{ os_env('CID_NODE_VOLUME_SIZE', 150) }}
-                  backing_store: mcp_ubuntu_1604_image
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604_swp
-
-              interfaces: *interfaces
-              network_config: *network_config
-
-          - name: {{ HOSTNAME_CID02 }}
-            role: salt_minion
-            params:
-              vcpu: {{ os_env('CID_NODE_CPU', 3) }}
-              memory: {{ os_env('CID_NODE_MEMORY', 8192) }}
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: {{ os_env('CID_NODE_VOLUME_SIZE', 150) }}
-                  backing_store: mcp_ubuntu_1604_image
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604_swp
-
-              interfaces: *interfaces
-              network_config: *network_config
-
-          - name: {{ HOSTNAME_CID03 }}
-            role: salt_minion
-            params:
-              vcpu: {{ os_env('CID_NODE_CPU', 3) }}
-              memory: {{ os_env('CID_NODE_MEMORY', 8192) }}
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: {{ os_env('CID_NODE_VOLUME_SIZE', 150) }}
-                  backing_store: mcp_ubuntu_1604_image
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604_swp
-
-              interfaces: *interfaces
-              network_config: *network_config
-
-          - name: {{ HOSTNAME_CTL01 }}
-            role: k8s_controller
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 8192
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: mcp_ubuntu_1604_image
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: *all_interfaces
-              network_config: *all_network_config
-
-          - name: {{ HOSTNAME_CTL02 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 8192
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: mcp_ubuntu_1604_image
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: *all_interfaces
-              network_config: *all_network_config
-
-          - name: {{ HOSTNAME_CTL03 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 8192
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: mcp_ubuntu_1604_image
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: *all_interfaces
-              network_config: *all_network_config
-
-          - name: {{ HOSTNAME_CMP01 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: mcp_ubuntu_1604_image
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: *all_interfaces
-              network_config: *all_network_config
-
-          - name: {{ HOSTNAME_CMP02 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: mcp_ubuntu_1604_image
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: *all_interfaces
-              network_config: *all_network_config
-
-          - name: {{ HOSTNAME_CMP03 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: mcp_ubuntu_1604_image
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: *all_interfaces
-              network_config: *all_network_config
-
-          - name: {{ HOSTNAME_CMP04 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: mcp_ubuntu_1604_image
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: *all_interfaces
-              network_config: *all_network_config
-
-          - name: {{ HOSTNAME_PRX01 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 1
-              memory: !os_env SLAVE_NODE_MEMORY, 2048
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: mcp_ubuntu_1604_image
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604_swp
-
-              interfaces: *all_interfaces
-              network_config: *all_network_config
-
-          - name: {{ HOSTNAME_PRX02 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 1
-              memory: !os_env SLAVE_NODE_MEMORY, 2048
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: mcp_ubuntu_1604_image
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604_swp
-
-              interfaces: *all_interfaces
-              network_config: *all_network_config
diff --git a/tcp_tests/templates/cookied-cicd-k8s-system/cookiecutter-context-k8s-system.yaml b/tcp_tests/templates/cookied-cicd-k8s-system/cookiecutter-context-k8s-system.yaml
deleted file mode 100644
index e97bf67..0000000
--- a/tcp_tests/templates/cookied-cicd-k8s-system/cookiecutter-context-k8s-system.yaml
+++ /dev/null
@@ -1,190 +0,0 @@
-default_context:
-  auditd_enabled: 'False'
-  backup_private_key: |
-      -----BEGIN RSA PRIVATE KEY-----
-      MIIEowIBAAKCAQEAxL6/rVgCetsETpZaUmXmkj8cZ1WN0eubH1FvMDOi/La9ZJyT
-      k0C6AYpJnIyEm93pMj5cLm08qRqMW+2pdOhYjcH69yg5MrX5SkRk8jCmIHIYoIbh
-      Qnwbnj3dd3I39ZdfU2FO7u2vlbglVou6ZoQxlJDItuLNtzq6EG+w9eF19e7+OsC6
-      6iUItp618zfw1l3J/8nKvCGe2RYDf7mJW6XwCl/DwryJmwwzvPgYJ3QMuDD8/HFj
-      lrJ3xjFTXj4b4Ws1XIoy78fFbtiLr4OwqCYkho03u2E5rOOP1qZxZB63sivHMLMO
-      MM5bOAQKbulFNoyALADGYfc7sf0bZ4u9XXDXxQIDAQABAoIBAQCfmc2MJRT97KW1
-      yqpCpX9BrAiymuiNHf+cjEcSZxEUyHkjIRFmJt+9WB0W7ba1anM92vCUiPDojSzH
-      dig9Oi578JxR20NrK8uqv4jUHzrknynzLveVI3CUEcOSnglfJQijbxDFKfOCFPvV
-      FUyE1UATMNBh6+LNfMprgu+exuMWOPnDyUiYQ+WZ0JfuZY8fuaZte4woJJOb9LUu
-      5rsMG/smIzjpgZ0Z9ZVDMurfq565qhpaXRAqKeIuyht8pacTo31iMQdHB78AvY/3
-      g0z21Gk8k3z0Kr/YFKr2r4FmXY5m/gAUvZly2ZrVQM5XsbTVCzq/JpI5fssNvSbU
-      AKmXzf4RAoGBAOO3d4/cstxERzW6hyOTjZIN1ppR52CsnZTsVPbfd0pCtmzmVZce
-      CtHKdcXSbTwZvvkK09QSWAp3MoSpd0gIOiLU8Wx/R/RIZsu9BlhTS3r3EQLnk72d
-      H/1TTA+j4T/LIYLSojQ1RxvIrHetAD44j732aTwKAHj/SybEAVqNkOB/AoGBAN0u
-      gLcrgqIHGrk4VjWSvlCGymfF40equcx+ud7XhfZDGETUOSahW4dPZ52cjPAkrCBQ
-      MMfcDwSVGsOAjd+mNt11BHUKobnhXwFaWWuyqyn9NmWFbjMbICVh7E3Of5aVN38o
-      lrmo/7LuKMVG7XRwphCv5NkaJmQG4njDyUQWlaW7AoGADCd8wDb9bPhP/LQqBmIX
-      ylXmwHHisaxE9O/wUQT4bwREjGd25gv6c9wkkRx8LBsLsGs9hzI7dMOL9Ly+2x9l
-      SvqmsC3S/1zl77X1Ir2/Z57MT6Vgo1xBmtnZU3Rhz2/eKAdqFPNLClaZrgGT475N
-      HcyLLWMzR0IJFtabY+Puea0CgYA8Zb5wRkldxWLewSuJZZDinGwY+kieAVjLJq/K
-      0j+ah6fQ48LXcah0wpIgz+cMjHcUO9GWQdk3/x9X03rqX5EL2DBnZYfUIl63F9zj
-      M97ZkHOSNWVqPzX//0Vv2butewG0j3jZKfTo/2/SrxOYgEpYtC9huWpSVi7xm0US
-      erhSkQKBgFIf9JEsfgE57ANhvITZ3ZI0uZXNxZkXQaVg8jvScDi79IIhy9iPzhKC
-      aIIQoDNIlWv1ftCRZ5AlBvVXgvQ/QNrwy48JiQTzWZlb9Ezg8w+olQmSbG6fq7Y+
-      7r3i+QUZ7RBdOb24QcQ618q54ozNTCB7OywY78ptFzeoBeptiNr1
-      -----END RSA PRIVATE KEY-----
-  backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDEvr+tWAJ62wROllpSZeaSPxxnVY3R65sfUW8wM6L8tr1knJOTQLoBikmcjISb3ekyPlwubTypGoxb7al06FiNwfr3KDkytflKRGTyMKYgchighuFCfBuePd13cjf1l19TYU7u7a+VuCVWi7pmhDGUkMi24s23OroQb7D14XX17v46wLrqJQi2nrXzN/DWXcn/ycq8IZ7ZFgN/uYlbpfAKX8PCvImbDDO8+BgndAy4MPz8cWOWsnfGMVNePhvhazVcijLvx8Vu2Iuvg7CoJiSGjTe7YTms44/WpnFkHreyK8cwsw4wzls4BApu6UU2jIAsAMZh9zux/Rtni71dcNfF
-  bmk_enabled: 'False'
-  calico_cni_image: docker-prod-local.artifactory.mirantis.com/mirantis/projectcalico/calico/cni:latest
-  calico_enable_nat: 'True'
-  calico_image: docker-prod-local.artifactory.mirantis.com/mirantis/projectcalico/calico/node:latest
-  calico_netmask: '16'
-  calico_network: 192.168.0.0
-  calicoctl_image: docker-prod-local.artifactory.mirantis.com/mirantis/projectcalico/calico/ctl:latest
-  ceph_enabled: 'False'
-  cicd_control_node01_address: 10.167.4.91
-  cicd_control_node01_hostname: cid01
-  cicd_control_node02_address: 10.167.4.92
-  cicd_control_node02_hostname: cid02
-  cicd_control_node03_address: 10.167.4.93
-  cicd_control_node03_hostname: cid03
-  cicd_control_vip_address: 10.167.4.90
-  cicd_control_vip_hostname: cid
-  cicd_enabled: 'True'
-  cicd_private_key: |-
-    -----BEGIN RSA PRIVATE KEY-----
-    MIIEpAIBAAKCAQEAv64AnFbEuuOQHLlmMkmaZ+Hh/8hJ+VfFpJ/MzW1wWzYyhis7
-    3A8rxNFWJ/I1/LJSsFI8qU0DpxjFjS9LMTTFXhDPPpuzgRLwfVusEmuQdXjOiT34
-    AIs07Q4w1nlvJ2+/l788ie1AEfnewd9erUHOs8Wt/PT3OOM/0ikY7EibvYF4L1Lb
-    xGRKYnUkY7G3eal9XcQpsTzAFRXoK3WafbCFBFsfzEWOhx1T+tn1SwaxPYJDt1OB
-    B1s77enFtBwbmbd0m1F1ufSXmdWea2xF3+5caS6tps/hwhCoOSJUQb7+dK4ri8og
-    q2YIhfEptrMP1R+nVqEY76P31aa/YSw4zOvcQwIDAQABAoIBAQCLKOzQlD4n4ObT
-    s9Z6U+2B1gCaDvOFzy9yoYGy8u1Li0GLHwBKd8kzDzgZsEN5vo1B7bKUx5ELU3S5
-    V8ijZMiVzmZn8eqUnwdyO4flp6otXxOzmAXhfy9hm5fhXjBQ1VSn+vMcv95wLpSG
-    9IBsEQbchXwX1lFWP8Yp8iRiByTqoz6A7qSxRzIOtq1/coYS9Vcy7VZDMiUjqvuc
-    pYvwYHvrgeYqxLXyDRzbZX1BbkSoNI/5VwxLb9IMG901IXph0r4V3uVgnnq+Xzkk
-    MoOfmB3cyOrvtWblZAjkyA+jzTs/QNALRUeI7wUeh4FvlwEGHE6v5G4G28zOS0vL
-    7IEhCqThAoGBAOeyDO07b060l+NOO+Jkv+NV31VD0w3S4TMyLPVSxXsrRPoHM9RM
-    udi6lewmALE4wk2Lc1Il6n0UrUGVbXxf55NJp2BQoSic+ZK2nTki0cZ/CkUDVNwY
-    R0WtWE0i3J+eF3e8j9VYm1mIlv0aDoYeH4qCp5is/JanvLy4MUl6tM7/AoGBANPJ
-    XheDO5lmqq1ejDTo3GAzYuAs44dQLDs0znEuuaUKZ4MKgQ4ax0L9n0MxvsuUGVcN
-    Nm7fZS4uMY3zLCOLcAXyD1jXY210gmOgFdXeYrH+2kSmqfflV8KHOLCatxLzRtbe
-    KBflcrEnrpUVNGKlpZaYr+4AyapXeMuXIxwveva9AoGAYtoDS9/UwHaqau+A+zlS
-    6TJFA8LZNAepz0b0CYLUAJXYavhRs508mWwZ9NPN7c6yj5UUkZLdtZnxxY50VOEy
-    ExQUljIwX/yBOogxEiR57b9b6U/fj7vIBMFNcDOUf4Far9pCX5rbRNrS2I+abLxD
-    ZrwRt0Duz3QnQTkwxhHVPI8CgYAaIjQJJLl7AW84O32DneRrvouJ7CAbd2ot2CNN
-    Vh20XudNBUPNkMJb4t3/Nak8h8bktg2sesaKf0rAIGym6jLlmOwJ43IydHkOgBeR
-    r4JwQml+pS4+F7/Pkk4NhNnobbqlEv7RjA+uCp6BaP9w2M3pGmhDLzezXF3ciYbc
-    mINM5QKBgQCyM9ZWwSiA0D3oitnhs7C4eC0IHBfnSoa7f40osKm4VvmqKBFgRu8L
-    qYK9qX++pUm4sk0q7poGUscc1udMlejAkfc/HLIlUi6MM+S7ZQ2NHtnZ7COZa5O4
-    9fG8FTiigLvMHka9ihYXtPbyGvusCaqyHp3D9VyOT+WsyM5eJe40lA==
-    -----END RSA PRIVATE KEY-----
-  cicd_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC/rgCcVsS645AcuWYySZpn4eH/yEn5V8Wkn8zNbXBbNjKGKzvcDyvE0VYn8jX8slKwUjypTQOnGMWNL0sxNMVeEM8+m7OBEvB9W6wSa5B1eM6JPfgAizTtDjDWeW8nb7+XvzyJ7UAR+d7B316tQc6zxa389Pc44z/SKRjsSJu9gXgvUtvEZEpidSRjsbd5qX1dxCmxPMAVFegrdZp9sIUEWx/MRY6HHVP62fVLBrE9gkO3U4EHWzvt6cW0HBuZt3SbUXW59JeZ1Z5rbEXf7lxpLq2mz+HCEKg5IlRBvv50riuLyiCrZgiF8Sm2sw/VH6dWoRjvo/fVpr9hLDjM69xD
-  cluster_domain: cookied-cicd-k8s-system.local
-  cluster_name: cookied-cicd-k8s-system
-  context_seed: T3sbEdCaBfxrg9ysyA6LIaift250Ktb389rpcISKbdqPi5j0WHKiKAhBftYueBKl
-  control_network_netmask: 255.255.255.0
-  control_network_subnet: 10.167.4.0/24
-  control_vlan: '10'
-  cookiecutter_template_branch: ''
-  cookiecutter_template_credentials: gerrit
-  cookiecutter_template_url: https://gerrit.mcp.mirantis.com/mk/cookiecutter-templates.git
-  deploy_network_gateway: 10.167.5.1
-  deploy_network_netmask: 255.255.255.0
-  deploy_network_subnet: 10.167.5.0/24
-  deployment_type: physical
-  dns_server01: 172.18.176.6
-  dns_server02: 172.18.208.44
-  email_address: vjigulin@mirantis.com
-  etcd_ssl: 'True'
-  infra_bond_mode: active-backup
-  infra_deploy_nic: eth0
-  infra_kvm01_control_address: 10.167.4.241
-  infra_kvm01_deploy_address: 10.167.5.91
-  infra_kvm01_hostname: kvm01
-  infra_kvm02_control_address: 10.167.4.242
-  infra_kvm02_deploy_address: 10.167.5.92
-  infra_kvm02_hostname: kvm02
-  infra_kvm03_control_address: 10.167.4.243
-  infra_kvm03_deploy_address: 10.167.5.93
-  infra_kvm03_hostname: kvm03
-  infra_kvm_vip_address: 10.167.4.240
-  infra_primary_first_nic: eth1
-  infra_primary_second_nic: eth2
-  internal_proxy_enabled: 'False'
-  kqueen_custom_mail_enabled: 'False'
-  kqueen_enabled: 'False'
-  kubernetes_control_address: 10.167.4.10
-  kubernetes_control_node01_address: 10.167.4.11
-  kubernetes_control_node01_deploy_address: 10.167.5.11
-  kubernetes_control_node01_hostname: ctl01
-  kubernetes_control_node02_address: 10.167.4.12
-  kubernetes_control_node02_deploy_address: 10.167.5.12
-  kubernetes_control_node02_hostname: ctl02
-  kubernetes_control_node03_address: 10.167.4.13
-  kubernetes_control_node03_deploy_address: 10.167.5.13
-  kubernetes_control_node03_hostname: ctl03
-  kubernetes_compute_count: 3
-  kubernetes_compute_rack01_hostname: cmp
-  kubernetes_compute_deploy_address_ranges: 10.167.5.101-10.167.5.104
-  kubernetes_compute_single_address_ranges: 10.167.4.101-10.167.4.104
-  kubernetes_compute_tenant_address_ranges: 10.167.6.101-10.167.6.104
-  kubernetes_enabled: 'True'
-  kubernetes_keepalived_vip_interface: br_ctl
-  kubernetes_network_calico_enabled: 'True'
-  kubernetes_proxy_hostname: prx
-  kubernetes_proxy_node01_hostname: prx01
-  kubernetes_proxy_node02_hostname: prx02
-  kubernetes_proxy_address: 10.167.4.220
-  kubernetes_proxy_node01_address: 10.167.4.221
-  kubernetes_proxy_node02_address: 10.167.4.222
-  local_repositories: 'False'
-  maas_deploy_address: 10.167.5.15
-  maas_deploy_range_end: 10.167.5.199
-  maas_deploy_range_start: 10.167.5.180
-  maas_deploy_vlan: '0'
-  maas_fabric_name: deploy-fabric0
-  maas_hostname: cfg01
-  mcp_common_scripts_branch: ''
-  mcp_version: proposed
-  offline_deployment: 'False'
-  opencontrail_enabled: 'False'
-  openldap_domain: ${_param:cluster_name}.local
-  openldap_enabled: 'True'
-  openldap_organisation: ${_param:cluster_name}
-  openssh_groups: cicd
-  openstack_enabled: 'False'
-  oss_enabled: 'False'
-  oss_node03_address: ${_param:stacklight_monitor_node03_address}
-  platform: kubernetes_enabled
-  public_host: ${_param:infra_config_address}
-  publication_method: email
-  reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
-  salt_api_password: LTlVnap35hqpRVbB5QjA27EuKh9Ttl3k
-  salt_api_password_hash: $6$RKagUPuQ$Javpjz7b.hqKOOr1rai7uGQd/FnqlOH59tXn12/0G.LkVyunYmgBkSC5zTjoqZvIS1fOOOqsmCb9Q4HcGUbXS.
-  salt_master_address: 10.167.4.15
-  salt_master_hostname: cfg01
-  salt_master_management_address: 10.167.5.15
-  shared_reclass_branch: 'proposed'
-  shared_reclass_url: https://gerrit.mcp.mirantis.com/salt-models/reclass-system.git
-  stacklight_enabled: 'False'
-  stacklight_version: '2'
-  static_ips_on_deploy_network_enabled: 'False'
-  tenant_network_gateway: 10.167.6.1
-  tenant_network_netmask: 255.255.255.0
-  tenant_network_subnet: 10.167.6.0/24
-  tenant_vlan: '20'
-  upstream_proxy_enabled: 'False'
-  use_default_network_scheme: 'False'
-  vnf_onboarding_enabled: 'False'
-  flannel_network: 10.20.0.0/16
-
-  kubernetes_network_genie_enabled: 'True'
-  kubernetes_genie_default_plugin: 'calico'
-
-  kubernetes_network_flannel_enabled: 'True'
-  flannel_network: 10.20.0.0/16
-
-  kubernetes_metallb_enabled: 'True'
-  metallb_addresses: 172.17.16.150-172.17.16.190
-
-  kubernetes_virtlet_enabled: 'True'
-  kubernetes_externaldns_enabled: 'True'
-  kubernetes_ingressnginx_enabled: 'True'
-  kubernetes_ingressnginx_controller_replicas: 2
-  calico_policy_enabled: 'True'
diff --git a/tcp_tests/templates/cookied-cicd-k8s-system/environment-context-k8s-system.yaml b/tcp_tests/templates/cookied-cicd-k8s-system/environment-context-k8s-system.yaml
deleted file mode 100644
index 807d07f..0000000
--- a/tcp_tests/templates/cookied-cicd-k8s-system/environment-context-k8s-system.yaml
+++ /dev/null
@@ -1,159 +0,0 @@
-nodes:
-    cfg01:
-      reclass_storage_name: infra_config_node01
-      roles:
-      - infra_config
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_static_ctl
-        ens5:
-          role: single_storage_dhcp
-
-    kvm01:
-      reclass_storage_name: infra_kvm_node01
-      roles:
-      - infra_kvm
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl
-
-    kvm02:
-      reclass_storage_name: infra_kvm_node02
-      roles:
-      - infra_kvm
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl
-
-    kvm03:
-      reclass_storage_name: infra_kvm_node03
-      roles:
-      - infra_kvm
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl
-
-    cid01:
-      reclass_storage_name: cicd_control_node01
-      roles:
-      - cicd_control_leader
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl
-
-    cid02:
-      reclass_storage_name: cicd_control_node02
-      roles:
-      - cicd_control_manager
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl
-
-    cid03:
-      reclass_storage_name: cicd_control_node03
-      roles:
-      - cicd_control_manager
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl
-
-    ctl01:
-      reclass_storage_name: kubernetes_control_node01
-      roles:
-      - kubernetes_control
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl_calico
-        ens5:
-          role: single_storage_dhcp
-
-    ctl02:
-      reclass_storage_name: kubernetes_control_node02
-      roles:
-      - kubernetes_control
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl_calico
-        ens5:
-          role: single_storage_dhcp
-
-    ctl03:
-      reclass_storage_name: kubernetes_control_node03
-      roles:
-      - kubernetes_control
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl_calico
-        ens5:
-          role: single_storage_dhcp
-
-    prx01:
-      reclass_storage_name: kubernetes_proxy_node01
-      roles:
-      - kubernetes_proxy
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl
-        ens5:
-          role: single_storage_dhcp
-
-    prx02:
-      reclass_storage_name: kubernetes_proxy_node02
-      roles:
-      - kubernetes_proxy
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl
-        ens5:
-          role: single_storage_dhcp
-
-    # Generator-based computes. For compatibility only
-    cmp<<count>>:
-      reclass_storage_name: kubernetes_compute_rack01
-      roles:
-      - kubernetes_compute
-      - linux_system_codename_xenial
-      - salt_master_host
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl_calico
-        ens5:
-          role: single_storage_dhcp
diff --git a/tcp_tests/templates/cookied-cicd-k8s-system/salt.yaml b/tcp_tests/templates/cookied-cicd-k8s-system/salt.yaml
deleted file mode 100644
index e4abaad..0000000
--- a/tcp_tests/templates/cookied-cicd-k8s-system/salt.yaml
+++ /dev/null
@@ -1,14 +0,0 @@
-{% from 'cookied-cicd-k8s-system/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-cicd-k8s-system/underlay.yaml' import LAB_CONFIG_NAME with context %}
-{% from 'cookied-cicd-k8s-system/underlay.yaml' import DOMAIN_NAME with context %}
-
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
-# Other salt model repository parameters see in shared-salt.yaml
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
diff --git a/tcp_tests/templates/cookied-cicd-k8s-system/underlay--meta-data.yaml b/tcp_tests/templates/cookied-cicd-k8s-system/underlay--meta-data.yaml
deleted file mode 100644
index 3699401..0000000
--- a/tcp_tests/templates/cookied-cicd-k8s-system/underlay--meta-data.yaml
+++ /dev/null
@@ -1,4 +0,0 @@
-| # All the data below will be stored as a string object
-  instance-id: iid-local1
-  hostname: {hostname}
-  local-hostname: {hostname}
diff --git a/tcp_tests/templates/cookied-cicd-k8s-system/underlay--user-data1604-swp.yaml b/tcp_tests/templates/cookied-cicd-k8s-system/underlay--user-data1604-swp.yaml
deleted file mode 100644
index 81936a4..0000000
--- a/tcp_tests/templates/cookied-cicd-k8s-system/underlay--user-data1604-swp.yaml
+++ /dev/null
@@ -1,84 +0,0 @@
-| # All the data below will be stored as a string object
-  #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
-  ssh_pwauth: True
-  users:
-   - name: root
-     sudo: ALL=(ALL) NOPASSWD:ALL
-     shell: /bin/bash
-     ssh_authorized_keys:
-     {% for key in config.underlay.ssh_keys %}
-      - ssh-rsa {{ key['public'] }}
-     {% endfor %}
-
-  disable_root: false
-  chpasswd:
-   list: |
-    root:r00tme
-   expire: False
-
-  bootcmd:
-   # Enable root access
-   - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
-   - service sshd restart
-  output:
-    all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
-  runcmd:
-   - if lvs vg0; then pvresize /dev/vda3; fi
-   - if lvs vg0; then /usr/bin/growlvm.py --image-layout-file /usr/share/growlvm/image-layout.yml; fi
-
-   - export TERM=linux
-   - export LANG=C
-   # Configure dhclient
-   - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
-   - sudo resolvconf -u
-
-   # Enable grub menu using updated config below
-   - update-grub
-
-   # Prepare network connection
-   - sudo ifup ens3
-   #- sudo route add default gw {gateway} {interface_name}
-
-   # Create swap
-   - fallocate -l 16G /swapfile
-   - chmod 600 /swapfile
-   - mkswap /swapfile
-   - swapon /swapfile
-   - echo "/swapfile   none    swap    defaults   0   0" >> /etc/fstab
-
-  write_files:
-   - path: /etc/default/grub.d/97-enable-grub-menu.cfg
-     content: |
-         GRUB_RECORDFAIL_TIMEOUT=30
-         GRUB_TIMEOUT=3
-         GRUB_TIMEOUT_STYLE=menu
-
-   - path: /etc/network/interfaces
-     content: |
-          auto ens3
-          iface ens3 inet dhcp
-
-   - path: /usr/share/growlvm/image-layout.yml
-     content: |
-       root:
-         size: '30%VG'
-       home:
-         size: '1G'
-       var_log:
-         size: '11%VG'
-       var_log_audit:
-         size: '5G'
-       var_tmp:
-         size: '11%VG'
-       tmp:
-         size: '5G'
-     owner: root:root
-
-  growpart:
-    mode: auto
-    devices:
-      - '/'
-      - '/dev/vda3'
-    ignore_growroot_disabled: false
diff --git a/tcp_tests/templates/cookied-cicd-k8s-system/underlay--user-data1604.yaml b/tcp_tests/templates/cookied-cicd-k8s-system/underlay--user-data1604.yaml
deleted file mode 100644
index 6451e34..0000000
--- a/tcp_tests/templates/cookied-cicd-k8s-system/underlay--user-data1604.yaml
+++ /dev/null
@@ -1,84 +0,0 @@
-| # All the data below will be stored as a string object
-  #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
-  ssh_pwauth: True
-  users:
-   - name: root
-     sudo: ALL=(ALL) NOPASSWD:ALL
-     shell: /bin/bash
-     ssh_authorized_keys:
-     {% for key in config.underlay.ssh_keys %}
-      - ssh-rsa {{ key['public'] }}
-     {% endfor %}
-
-  disable_root: false
-  chpasswd:
-   list: |
-    root:r00tme
-   expire: False
-
-  bootcmd:
-   # Enable root access
-   - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
-   - service sshd restart
-  output:
-    all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
-  runcmd:
-   - if lvs vg0; then pvresize /dev/vda3; fi
-   - if lvs vg0; then /usr/bin/growlvm.py --image-layout-file /usr/share/growlvm/image-layout.yml; fi
-
-   - export TERM=linux
-   - export LANG=C
-   # Configure dhclient
-   - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
-   - sudo resolvconf -u
-
-   # Enable grub menu using updated config below
-   - update-grub
-
-   # Prepare network connection
-   - sudo ifup ens3
-   #- sudo route add default gw {gateway} {interface_name}
-
-   # Create swap
-   #- fallocate -l 16G /swapfile
-   #- chmod 600 /swapfile
-   #- mkswap /swapfile
-   #- swapon /swapfile
-   #- echo "/swapfile   none    swap    defaults   0   0" >> /etc/fstab
-
-  write_files:
-   - path: /etc/default/grub.d/97-enable-grub-menu.cfg
-     content: |
-         GRUB_RECORDFAIL_TIMEOUT=30
-         GRUB_TIMEOUT=3
-         GRUB_TIMEOUT_STYLE=menu
-
-   - path: /etc/network/interfaces
-     content: |
-          auto ens3
-          iface ens3 inet dhcp
-
-   - path: /usr/share/growlvm/image-layout.yml
-     content: |
-       root:
-         size: '30%VG'
-       home:
-         size: '1G'
-       var_log:
-         size: '11%VG'
-       var_log_audit:
-         size: '5G'
-       var_tmp:
-         size: '11%VG'
-       tmp:
-         size: '5G'
-     owner: root:root
-
-  growpart:
-    mode: auto
-    devices:
-      - '/'
-      - '/dev/vda3'
-    ignore_growroot_disabled: false
diff --git a/tcp_tests/templates/cookied-cicd-k8s-system/underlay.yaml b/tcp_tests/templates/cookied-cicd-k8s-system/underlay.yaml
deleted file mode 100644
index 388ae25..0000000
--- a/tcp_tests/templates/cookied-cicd-k8s-system/underlay.yaml
+++ /dev/null
@@ -1,622 +0,0 @@
-# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
-{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
-
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-cicd-k8s-system') %}
-{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME + '.local') %}
-{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01') %}
-{% set HOSTNAME_CID01 = os_env('HOSTNAME_CID01', 'cid01') %}
-{% set HOSTNAME_CID02 = os_env('HOSTNAME_CID02', 'cid02') %}
-{% set HOSTNAME_CID03 = os_env('HOSTNAME_CID03', 'cid03') %}
-
-{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01') %}
-{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02') %}
-{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03') %}
-{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp001') %}
-{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp002') %}
-{% set HOSTNAME_CMP03 = os_env('HOSTNAME_CMP03', 'cmp003') %}
-{% set HOSTNAME_CMP04 = os_env('HOSTNAME_CMP04', 'cmp004') %}
-{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01') %}
-{% set HOSTNAME_PRX02 = os_env('HOSTNAME_PRX02', 'prx02') %}
-{% set HOSTNAME_KVM01 = os_env('HOSTNAME_KVM01', 'kvm01') %}
-{% set HOSTNAME_KVM02 = os_env('HOSTNAME_KVM02', 'kvm02') %}
-{% set HOSTNAME_KVM03 = os_env('HOSTNAME_KVM03', 'kvm03') %}
-
-{% import 'cookied-cicd-k8s-system/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
-{% import 'cookied-cicd-k8s-system/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
-{% import 'cookied-cicd-k8s-system/underlay--user-data1604-swp.yaml' as CLOUDINIT_USER_DATA_1604_SWP with context %}
-
----
-aliases:
- - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
- - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
- - &cloudinit_user_data_1604 {{ CLOUDINIT_USER_DATA_1604 }}
- - &cloudinit_user_data_1604_swp {{ CLOUDINIT_USER_DATA_1604_SWP }}
-
-template:
-  devops_settings:
-    env_name: {{ os_env('ENV_NAME', LAB_CONFIG_NAME + '_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
-
-    address_pools:
-      private-pool01:
-        net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.60.0.0/16:24') }}
-        params:
-          ip_reserved:
-            gateway: +1
-            l2_network_device: +1
-            default_{{ HOSTNAME_CFG01 }}: +15
-
-            default_{{ HOSTNAME_CID }}: +90
-            default_{{ HOSTNAME_CID01 }}: +91
-            default_{{ HOSTNAME_CID02 }}: +92
-            default_{{ HOSTNAME_CID03 }}: +93
-            default_{{ HOSTNAME_PRX01 }}: +221
-            default_{{ HOSTNAME_PRX02 }}: +222
-
-          ip_ranges:
-            dhcp: [+90, -10]
-
-      admin-pool01:
-        net: {{ os_env('ADMIN_ADDRESS_POOL01', '10.70.0.0/16:24') }}
-        params:
-          ip_reserved:
-            gateway: +1
-            l2_network_device: +1
-            default_{{ HOSTNAME_CFG01 }}: +15
-
-            default_{{ HOSTNAME_CID }}: +90
-            default_{{ HOSTNAME_CID01 }}: +91
-            default_{{ HOSTNAME_CID02 }}: +92
-            default_{{ HOSTNAME_CID03 }}: +93
-
-            default_{{ HOSTNAME_CTL }}: +10
-            default_{{ HOSTNAME_CTL01 }}: +11
-            default_{{ HOSTNAME_CTL02 }}: +12
-            default_{{ HOSTNAME_CTL03 }}: +13
-            default_{{ HOSTNAME_CMP01 }}: +101
-            default_{{ HOSTNAME_CMP02 }}: +102
-            default_{{ HOSTNAME_CMP03 }}: +103
-            default_{{ HOSTNAME_CMP04 }}: +104
-            default_{{ HOSTNAME_PRX01 }}: +221
-            default_{{ HOSTNAME_PRX02 }}: +222
-            default_{{ HOSTNAME_KVM }}: +240
-            default_{{ HOSTNAME_KVM01 }}: +241
-            default_{{ HOSTNAME_KVM02 }}: +242
-            default_{{ HOSTNAME_KVM03 }}: +243
-
-          ip_ranges:
-            dhcp: [+90, -10]
-
-      tenant-pool01:
-        net: {{ os_env('TENANT_ADDRESS_POOL01', '10.80.0.0/16:24') }}
-        params:
-          ip_reserved:
-            gateway: +1
-            l2_network_device: +1
-            default_{{ HOSTNAME_CFG01 }}: +15
-
-            default_{{ HOSTNAME_CID }}: +90
-            default_{{ HOSTNAME_CID01 }}: +91
-            default_{{ HOSTNAME_CID02 }}: +92
-            default_{{ HOSTNAME_CID03 }}: +93
-
-          ip_ranges:
-            dhcp: [+10, -10]
-
-      external-pool01:
-        net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '10.90.0.0/16:24') }}
-        params:
-          ip_reserved:
-            gateway: +1
-            l2_network_device: +1
-            default_{{ HOSTNAME_CFG01 }}: +15
-
-            default_{{ HOSTNAME_CID }}: +90
-            default_{{ HOSTNAME_CID01 }}: +91
-            default_{{ HOSTNAME_CID02 }}: +92
-            default_{{ HOSTNAME_CID03 }}: +93
-            default_{{ HOSTNAME_CTL01 }}: +11
-            default_{{ HOSTNAME_CTL02 }}: +12
-            default_{{ HOSTNAME_CTL03 }}: +13
-            default_{{ HOSTNAME_CMP01 }}: +101
-            default_{{ HOSTNAME_CMP02 }}: +102
-            default_{{ HOSTNAME_CMP03 }}: +103
-            default_{{ HOSTNAME_CMP04 }}: +104
-            default_{{ HOSTNAME_PRX01 }}: +221
-            default_{{ HOSTNAME_PRX02 }}: +222
-
-          ip_ranges:
-            dhcp: [+10, -10]
-
-
-    groups:
-      - name: default
-        driver:
-          name: devops.driver.libvirt
-          params:
-            connection_string: {{ os_env('CONNECTION_STRING', 'qemu:///system') }}
-            storage_pool_name: {{ os_env('STORAGE_POOL_NAME', 'default') }}
-            stp: False
-            hpet: False
-            enable_acpi: true
-            use_host_cpu: {{ os_env('DRIVER_USE_HOST_CPU', true) }}
-            use_hugepages: {{ os_env('DRIVER_USE_HUGEPAGES', false) }}
-
-        network_pools:
-          admin: admin-pool01
-          private: private-pool01
-          tenant: tenant-pool01
-          external: external-pool01
-
-        l2_network_devices:
-          admin:
-            address_pool: admin-pool01
-            dhcp: true
-            forward:
-              mode: nat
-
-          private:
-            address_pool: private-pool01
-            dhcp: false
-            forward:
-              mode: route
-
-          external:
-            address_pool: external-pool01
-            dhcp: true
-            forward:
-              mode: nat
-
-
-        group_volumes:
-         - name: cfg01_day01_image               # Pre-configured day01 image
-           source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01') }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
-           format: qcow2
-         - name: mcp_ubuntu_1604_image           # Pre-configured image for VCP nodes initially based on kvm nodes.
-           # http://images.mirantis.com/ubuntu-16-04-x64-latest.qcow2  (preffered)
-           # or
-           # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
-           source_image: {{ os_env('MCP_IMAGE_PATH1604') }}
-           format: qcow2
-
-        nodes:
-          - name: {{ HOSTNAME_CFG01 }}
-            role: salt_master
-            params:
-              vcpu: {{ os_env('CFG_NODE_CPU', 3) }}
-              memory: {{ os_env('CFG_NODE_MEMORY', 12288) }}
-              boot:
-                - hd
-              volumes:
-                - name: system
-                  capacity: {{ os_env('CFG_NODE_VOLUME_SIZE', 150) }}
-                  backing_store: cfg01_day01_image
-                  format: qcow2
-                - name: config
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  # source_image: !os_env CFG01_CONFIG_PATH # no source image required.
-                                                            # it will be uploaded after config drive generation
-              interfaces: &all_interfaces
-                - label: ens3
-                  l2_network_device: admin
-                  interface_model: *interface_model
-                - label: ens4
-                  l2_network_device: private
-                  interface_model: *interface_model
-                - label: ens5
-                  l2_network_device: external
-                  interface_model: *interface_model
-              network_config: &all_network_config
-                ens3:
-                  networks:
-                    - admin
-                ens4:
-                  networks:
-                    - private
-                ens5:
-                  networks:
-                    - external
-
-          - name: {{ HOSTNAME_KVM01 }}
-            role: salt_minion
-            params:
-              vcpu: {{ os_env('KVM_NODE_CPU', 1) }}
-              memory: {{ os_env('KVM_NODE_MEMORY', 1024) }}
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: {{ os_env('KVM_NODE_VOLUME_SIZE', 150) }}
-                  backing_store: mcp_ubuntu_1604_image
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604_swp
-
-              interfaces: &interfaces
-                - label: ens3
-                  l2_network_device: admin
-                  interface_model: *interface_model
-                - label: ens4
-                  l2_network_device: private
-                  interface_model: *interface_model
-              network_config: &network_config
-                ens3:
-                  networks:
-                    - admin
-                ens4:
-                  networks:
-                    - private
-
-          - name: {{ HOSTNAME_KVM02 }}
-            role: salt_minion
-            params:
-              vcpu: {{ os_env('KVM_NODE_CPU', 1) }}
-              memory: {{ os_env('KVM_NODE_MEMORY', 1024) }}
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: {{ os_env('KVM_NODE_VOLUME_SIZE', 150) }}
-                  backing_store: mcp_ubuntu_1604_image
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604_swp
-
-              interfaces: *interfaces
-              network_config: *network_config
-
-          - name: {{ HOSTNAME_KVM03 }}
-            role: salt_minion
-            params:
-              vcpu: {{ os_env('KVM_NODE_CPU', 1) }}
-              memory: {{ os_env('KVM_NODE_MEMORY', 1024) }}
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: {{ os_env('KVM_NODE_VOLUME_SIZE', 150) }}
-                  backing_store: mcp_ubuntu_1604_image
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604_swp
-
-              interfaces: *interfaces
-              network_config: *network_config
-
-          - name: {{ HOSTNAME_CID01 }}
-            role: salt_minion
-            params:
-              vcpu: {{ os_env('CID_NODE_CPU', 3) }}
-              memory: {{ os_env('CID_NODE_MEMORY', 8192) }}
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: {{ os_env('CID_NODE_VOLUME_SIZE', 150) }}
-                  backing_store: mcp_ubuntu_1604_image
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604_swp
-
-              interfaces: *interfaces
-              network_config: *network_config
-
-          - name: {{ HOSTNAME_CID02 }}
-            role: salt_minion
-            params:
-              vcpu: {{ os_env('CID_NODE_CPU', 3) }}
-              memory: {{ os_env('CID_NODE_MEMORY', 8192) }}
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: {{ os_env('CID_NODE_VOLUME_SIZE', 150) }}
-                  backing_store: mcp_ubuntu_1604_image
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604_swp
-
-              interfaces: *interfaces
-              network_config: *network_config
-
-          - name: {{ HOSTNAME_CID03 }}
-            role: salt_minion
-            params:
-              vcpu: {{ os_env('CID_NODE_CPU', 3) }}
-              memory: {{ os_env('CID_NODE_MEMORY', 8192) }}
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: {{ os_env('CID_NODE_VOLUME_SIZE', 150) }}
-                  backing_store: mcp_ubuntu_1604_image
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604_swp
-
-              interfaces: *interfaces
-              network_config: *network_config
-
-          - name: {{ HOSTNAME_CTL01 }}
-            role: k8s_controller
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 8192
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: mcp_ubuntu_1604_image
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: *all_interfaces
-              network_config: *all_network_config
-
-          - name: {{ HOSTNAME_CTL02 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 8192
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: mcp_ubuntu_1604_image
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: *all_interfaces
-              network_config: *all_network_config
-
-          - name: {{ HOSTNAME_CTL03 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 8192
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: mcp_ubuntu_1604_image
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: *all_interfaces
-              network_config: *all_network_config
-
-          - name: {{ HOSTNAME_CMP01 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: mcp_ubuntu_1604_image
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: *all_interfaces
-              network_config: *all_network_config
-
-          - name: {{ HOSTNAME_CMP02 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: mcp_ubuntu_1604_image
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: *all_interfaces
-              network_config: *all_network_config
-
-          - name: {{ HOSTNAME_CMP03 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: mcp_ubuntu_1604_image
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: *all_interfaces
-              network_config: *all_network_config
-
-          - name: {{ HOSTNAME_CMP04 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: mcp_ubuntu_1604_image
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: *all_interfaces
-              network_config: *all_network_config
-
-          - name: {{ HOSTNAME_PRX01 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 1
-              memory: !os_env SLAVE_NODE_MEMORY, 2048
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: mcp_ubuntu_1604_image
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604_swp
-
-              interfaces: *all_interfaces
-              network_config: *all_network_config
-
-          - name: {{ HOSTNAME_PRX02 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 1
-              memory: !os_env SLAVE_NODE_MEMORY, 2048
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: mcp_ubuntu_1604_image
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604_swp
-
-              interfaces: *all_interfaces
-              network_config: *all_network_config
diff --git a/tcp_tests/templates/cookied-cicd-ovs-maas/salt-context-cookiecutter-openstack_ovs.yaml b/tcp_tests/templates/cookied-cicd-ovs-maas/salt-context-cookiecutter-openstack_ovs.yaml
deleted file mode 100644
index 9fefeff..0000000
--- a/tcp_tests/templates/cookied-cicd-ovs-maas/salt-context-cookiecutter-openstack_ovs.yaml
+++ /dev/null
@@ -1,931 +0,0 @@
-default_context:
-  backup_private_key: |-
-    -----BEGIN RSA PRIVATE KEY-----
-    MIIEogIBAAKCAQEAvDqzt/PHWvSSJrBtvD3UWp21CDrAajgOPeXrVm7VU+sDDpw0
-    YqDvVhYfT/N6ha+SWOK00KyTuMMbB8/I4tvsP9vvCXy7v2AJID/ZO2z/t8KfTDEJ
-    C75/6a0UBg6sl3i7+cUOHbeK+lkcfdnSI1H8Jzdhk4Xj7t7q+MIKTs5n+AlScgyz
-    NSiD2nG/U5LmaQ+rjZ1VsF9J0YTds0yLDF3hztVoDTs7j5obl7Xab3ZlwalgH5Gc
-    Id6BI09jkUbppdPzHpzV2oad7cFpVYTt9m3/MMT0amzPuwl/u/dI64rRuWPe60eT
-    qeVMQD0zP6o9F79upbzQStt82lPJcfF4CXvxYwIDAQABAoIBAAHUXDzUfMKQj/8a
-    RebHfxHmaIUM9SPTKahGXNQ5PY+UQDJbKFMxF0Jx8pn3VuCHxVdh1LoWg1UPaGra
-    BSzXUGOKgrdH5BdHGq+aj0T5mT6zAJNgAqN/lYSy7vfkGp9aSBF0bd+yEgK+7Pz4
-    Kge320iSTDt/2KhQuF30nN8JOI97m2uk2YHH8TixfVtmgLPEy+0Mw4VZLsHD4OY1
-    zu8xN6co2aQR0DB0MPKD6IxH62wSOJKBzF4o5xzzy/fl0ysDZbZ8Z/5Rejvp3yNT
-    68B0X5CM27hVdYE+/tcKGl9WKmewIf3fTZUfBcwFIObMIl9fkK/519nwFed4AfOX
-    /a2LCBECgYEA9Lyl/eyzXuU2dgs6Gw/WMobqOVnHF9wbukS1XSKdwMogtpt7Pb23
-    +32r9xHgeRDvvWwSp8lNPZ8mu77dQ6akbOuOk5C6V3Mqt4zam3DBDMtL63nKq8tq
-    LQ0PVjj8cAgu3GSDCz7htqUb44rn5tX9zlM0vrwxzyYqbet7ZbsGoYsCgYEAxORQ
-    JFn1vwag8VBw3bngx3SJ46CpCC8Gz830W7pEaTS6zTTiDC4p5sATGya91JS8l47G
-    ikP2bcWzvT6aP/u+TZSqZiqp5Kn37fx8Et+ltIl47SH+PJHIR9F9r2f9zqla6mlP
-    zcX/mTSuAJCTP4whQA3/f1wNAeBnewhK9fXCOokCgYAz6TPYSXW+giXsIfOAO/q2
-    GvHhmk5lnDVxbBOAHtCRTQ5lTVN1xCEbQgvQy0TuyQ3hAuRuHH+6u2BO4Gw0Zkto
-    IwrJ+B/eXrpH1qOj5uW73f9Lgjjf+bSau7NuGYZKCSJPcy5smzjrMdhZimQoDWnZ
-    csK0VlzGUJUdXZ599I6ygwKBgGTf+LN3J7H0Snb4WKsw9Zoa+h6WjKO1vE6xXVW1
-    rCEes+o5Autsp2ki1WcexTlp7unTa6MhSNta5Ei8Dzli2FBVL6xihWKzNmRG7Kaa
-    0QIbQMp1lRUhN7Sb/0HkDKRaHktlI07w95Bd7hw59kcjm1F/Gnz9A2kHuNzPFeDI
-    RffJAoGAdeCID5sb0oHEHTIxxB+cgfaiyaAe9qrW2INNWLVn5OTDh6cidatnWAor
-    M/SxwNoiYcCpi869q7wzjw5gNOVoNJbmwzDA7s+lgjTPQpq2jmO6RtweKbYoN5Zw
-    ++LiD3r07TD3p2QAyeooT29D/d6/2Hd6oyTJcZWIQTN+MTcXQO4=
-    -----END RSA PRIVATE KEY-----
-  backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC8OrO388da9JImsG28PdRanbUIOsBqOA495etWbtVT6wMOnDRioO9WFh9P83qFr5JY4rTQrJO4wxsHz8ji2+w/2+8JfLu/YAkgP9k7bP+3wp9MMQkLvn/prRQGDqyXeLv5xQ4dt4r6WRx92dIjUfwnN2GThePu3ur4wgpOzmf4CVJyDLM1KIPacb9TkuZpD6uNnVWwX0nRhN2zTIsMXeHO1WgNOzuPmhuXtdpvdmXBqWAfkZwh3oEjT2ORRuml0/MenNXahp3twWlVhO32bf8wxPRqbM+7CX+790jritG5Y97rR5Op5UxAPTM/qj0Xv26lvNBK23zaU8lx8XgJe/Fj
-  bmk_enabled: 'False'
-  cicd_control_node01_address: 10.167.11.91
-  cicd_control_node01_hostname: cid01
-  cicd_control_node02_address: 10.167.11.92
-  cicd_control_node02_hostname: cid02
-  cicd_control_node03_address: 10.167.11.93
-  cicd_control_node03_hostname: cid03
-  cicd_control_vip_address: 10.167.11.90
-  cicd_control_vip_hostname: cid
-  cicd_enabled: 'True'
-  cicd_private_key: |-
-    -----BEGIN RSA PRIVATE KEY-----
-    MIIEowIBAAKCAQEAshiE2vK11KH1/PHO9v5IcT1ol3kuAorv6IgW+1paT9w4pFnd
-    H2DHQxTJsZ629cig+ELVAKHQnkND2U++/DM20ai5ZfpOwlvd+dL95/FbGb62Ozxx
-    kxBjyc/Bbbs8LcZtS1SN+agdkjQG1StpckUbFppoJ9nzWgnEcdYdonQ6aThgd+YL
-    rAOX04s3cMlCflClQl3lGFo24Qdhk/Y4M5rodfqfD5NOSKEhYP/dTMunri8zB5bU
-    ifvOvCWUKUOxLjkx95raY82xMHUobMYk87RcLPcq8pyz96/FPoiLqxM1oznTKNiI
-    0bW0xjf7FFjfLCjTapKZPRz8+Wkvzmzj35LLrwIDAQABAoIBADJoGCo0Kdy93nay
-    JgboX66VV+YPaUNU+aQR6JdJsmgKB4oU2S4JYTyORKveJSCZoV3C5LCiG/6/QRPf
-    q0mMYUaj/51qZCJEiCYuXqjoOgWmYcOQTwD10ZiMEc4yAU1fbQ22J9zyhTQdP5XU
-    DKtH/eu+1h35ZRQl0ZD6rjaNuP6QekJM6IVCC7XBaCG5+wSER9R25HbbDhdb7CwH
-    W1GP9IgISqy9Z3f4PQOyCUmn/O99lN8kry6ui7bCywRfITV6C+pn02DpMgzKZ8jn
-    3yts1f2mIbYVxnahtCaI3QTag6wBsnFq+U0uIXrUGMeeRzg9N1Ur01qdJpIR9g0v
-    Nt7QUZkCgYEA4lEavsFitSll/33JY4q82nYpjXAxTON3xraUqNYn5Cde06gNi8n1
-    t9TCLUqDhSpvgEOyJE/uwo5LAj79Ce2EwLkCttNggqRXBlY5ZpljwfWmxZtuGm/z
-    BJaOtkaK/1diR/+Qn/fTMyPH5JIXuQ6/XF60W4MSbzPgY4GO1BDx+G0CgYEAyXRT
-    00GDdiXbxQmzeHTO9Bg5y36Y1FEWDLnc89bpHPTR4sT/XCczerevy/l8jsdzZlnu
-    5ZddfWMF7EGNo51Zbmi0oLQ7nzigoVFcnhFHRFoCP36T9mvJk7O8Ao3ttpl/J2r0
-    mFiaKi0lhmZVbNpmliKjWAMZJyt6I7AfYekcOwsCgYEA0W8MuQptNgkhgtX80ElL
-    iz9eJK12chjuds3vtG66a8CjWGtkXcB/y6bwKsmR/GHQ3XnIGSJ/aTwU3fc8YzuS
-    ZmbPxDDIVx2OCycv52p7jrqtoqC7u2tuEQji+Hs/lhxfrxEp3V+R6vlpunQX0AF9
-    xRU/ApDBNndjZ7I1YrprseECgYA+zx8HgaiMIJeZokGrb7fKkPcMBCeAItveEeDa
-    wYmito3txv/a6nn5a+XKkbmNBpBrO+To/j1ux33kQDf56Cgm7UxLwoXISa6DPUvE
-    GJ0AqZOD2mIldUu+2k3m+ftAcDEdyBIEobNHLRZDBgriSmGrs5b77NNdzAdjsxjF
-    vRlJKwKBgD8DcP/C9pABC2mRQyH//RTk6XZfiDY0L18lwH7acEdHlJiF1PTwvIHD
-    cj1nMyG2MxEiSt1E5O/YQ4Lo3sognFIb8keu7IYxEgLXhvWFR3RwaYCjrF4ZGfD2
-    +83eUFPZQvEwTY/8OCogzJQfs1CT8+pLdO9tZQbrAaxfmF6c48KN
-    -----END RSA PRIVATE KEY-----
-  cicd_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCyGITa8rXUofX88c72/khxPWiXeS4Ciu/oiBb7WlpP3DikWd0fYMdDFMmxnrb1yKD4QtUAodCeQ0PZT778MzbRqLll+k7CW9350v3n8VsZvrY7PHGTEGPJz8Ftuzwtxm1LVI35qB2SNAbVK2lyRRsWmmgn2fNaCcRx1h2idDppOGB35gusA5fTizdwyUJ+UKVCXeUYWjbhB2GT9jgzmuh1+p8Pk05IoSFg/91My6euLzMHltSJ+868JZQpQ7EuOTH3mtpjzbEwdShsxiTztFws9yrynLP3r8U+iIurEzWjOdMo2IjRtbTGN/sUWN8sKNNqkpk9HPz5aS/ObOPfksuv
-  cluster_domain: cookied-cicd-ovs-maas.local
-  cluster_name: cookied-cicd-ovs-maas
-  compute_bond_mode: active-backup
-  compute_primary_first_nic: eth1
-  compute_primary_second_nic: eth2
-  context_seed: zEFbUBMME6LFdiL0rJWFgHMdQGgywnDSE9vFYvHgEBeYHb4QJsDl3HqpdaTgqYlF
-  control_network_netmask: 255.255.255.0
-  control_network_subnet: 10.167.11.0/24
-  control_vlan: '2404'
-  cookiecutter_template_branch: proposed
-  cookiecutter_template_credentials: gerrit
-  cookiecutter_template_url: https://gerrit.mcp.mirantis.com/mk/cookiecutter-templates.git
-  deploy_network_gateway: 172.16.164.1
-  deploy_network_netmask: 255.255.255.192
-  deploy_network_subnet: 172.16.164.0/26
-  deployment_type: physical
-  dns_server01: 172.18.176.6
-  dns_server02: 172.18.208.44
-  email_address: test@mirantis.com
-  gateway_primary_first_nic: eth1
-  gateway_primary_second_nic: eth2
-  infra_bond_mode: active-backup
-  infra_deploy_nic: eth0
-  infra_kvm01_control_address: 10.167.11.241
-  infra_kvm01_deploy_address: 172.16.164.3
-  infra_kvm01_hostname: kvm01
-  infra_kvm02_control_address: 10.167.11.242
-  infra_kvm02_deploy_address: 172.16.164.4
-  infra_kvm02_hostname: kvm02
-  infra_kvm03_control_address: 10.167.11.243
-  infra_kvm03_deploy_address: 172.16.164.5
-  infra_kvm03_hostname: kvm03
-  infra_kvm04_control_address: 10.167.11.244
-  infra_kvm04_deploy_address: 172.16.164.6
-  infra_kvm04_hostname: kvm04
-  infra_kvm05_control_address: 10.167.11.245
-  infra_kvm05_deploy_address: 172.16.164.7
-  infra_kvm05_hostname: kvm05
-  infra_kvm06_control_address: 10.167.11.246
-  infra_kvm06_deploy_address: 172.16.164.8
-  infra_kvm06_hostname: kvm06
-  infra_kvm_vip_address: 10.167.11.240
-  infra_primary_first_nic: eth1
-  infra_primary_second_nic: eth2
-  kubernetes_enabled: 'False'
-  local_repositories: 'False'
-  maas_enabled: 'True'
-  maas_deploy_address: 172.16.164.2
-  maas_deploy_cidr: 172.16.164.0/26
-  maas_deploy_gateway: 172.16.164.1
-  maas_deploy_range_end: 172.16.164.62
-  maas_deploy_range_start: 172.16.164.18
-  maas_deploy_vlan: '0'
-  maas_dhcp_enabled: 'True'
-  maas_fabric_name: fabric-0
-  maas_hostname: cfg01
-  maas_manage_deploy_network: 'True'
-  maas_machines: |
-        kvm01: # #cz7625-kvm.host-telecom.com
-          distro_series: "xenial"
-          # hwe_kernel: "hwe-16.04"
-          # pxe_interface_mac:
-          pxe_interface_mac: "0c:c4:7a:33:24:be"
-          interfaces:
-            one1:
-              mac: "0c:c4:7a:33:24:be"
-              mode: "static"
-              ip: "172.16.164.3"
-              subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
-              gateway: ${_param:deploy_network_gateway}
-              name: one1
-            one2:
-              mode: UNCONFIGURED
-              mac: "0c:c4:7a:33:24:bf"
-              name: one2
-            ten1:
-              mode: UNCONFIGURED
-              mac: "0c:c4:7a:1e:01:3e"
-              name: ten1
-            ten2:
-              mode: UNCONFIGURED
-              mac: "0c:c4:7a:1e:01:3f"
-              name: ten2
-            sten1:
-              mode: UNCONFIGURED
-              mac: "0c:c4:7a:58:f3:ce"
-              name: sten1
-            sten2:
-              mode: UNCONFIGURED
-              mac: "0c:c4:7a:58:f3:cf"
-              name: sten2
-          power_parameters:
-            power_address: "185.8.59.227"
-            power_pass: ==IPMI_PASS==
-            power_type: ipmi
-            power_user: ==IPMI_USER==
-        kvm02: # #cz7627-kvm.host-telecom.com
-          distro_series: "xenial"
-          # hwe_kernel: "hwe-16.04"
-          pxe_interface_mac: "0c:c4:7a:33:2d:6a"
-          interfaces:
-            one1:
-              mac: "0c:c4:7a:33:2d:6a"
-              mode: "static"
-              ip: "172.16.164.4"
-              subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
-              gateway: ${_param:deploy_network_gateway}
-              name: one1
-            one2:
-              mode: UNCONFIGURED
-              mac: "0c:c4:7a:33:2d:6b"
-              name: one2
-            ten1:
-              mode: UNCONFIGURED
-              mac: "0c:c4:7a:58:43:b8"
-              name: ten1
-            ten2:
-              mode: UNCONFIGURED
-              mac: "0c:c4:7a:58:43:b9"
-              name: ten2
-            sten1:
-              mode: UNCONFIGURED
-              mac: "0c:c4:7a:1d:96:02"
-              name: sten1
-            sten2:
-              mode: UNCONFIGURED
-              mac: "0c:c4:7a:1d:96:03"
-              name: sten2
-          power_parameters:
-            power_address: "185.8.59.229"
-            power_pass: ==IPMI_PASS==
-            power_type: ipmi
-            power_user: ==IPMI_USER==
-        kvm03: # #cz7756-kvm.host-telecom.com
-          distro_series: "xenial"
-          # hwe_kernel: "hwe-16.04"
-          pxe_interface_mac: "0c:c4:7a:69:a0:4c"
-          interfaces:
-            one1:
-              mac: "0c:c4:7a:69:a0:4c"
-              mode: "static"
-              ip: "172.16.164.5"
-              subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
-              gateway: ${_param:deploy_network_gateway}
-              name: one1
-            one2:
-              mode: UNCONFIGURED
-              mac: "0c:c4:7a:69:a0:4d"
-              name: one2
-            ten1:
-              mode: UNCONFIGURED
-              mac: "00:25:90:c0:c2:14"
-              name: ten1
-            ten2:
-              mode: UNCONFIGURED
-              mac: "00:25:90:c0:c2:15"
-              name: ten2
-            sten1:
-              mode: UNCONFIGURED
-              mac: "0c:c4:7a:1e:09:c2"
-              name: sten1
-            sten2:
-              mode: UNCONFIGURED
-              mac: "0c:c4:7a:1e:09:c3"
-              name: sten2
-          power_parameters:
-            power_address: "5.43.225.88"
-            power_pass: ==IPMI_PASS==
-            power_type: ipmi
-            power_user: ==IPMI_USER==
-        kvm04: # #cz7792-kvm.host-telecom.com
-          distro_series: "xenial"
-          # hwe_kernel: "hwe-16.04"
-          # pxe_interface_mac:
-          pxe_interface_mac: "0c:c4:7a:6c:83:5c"
-          interfaces:
-            one1:
-              mac: "0c:c4:7a:6c:83:5c"
-              mode: "static"
-              ip: "172.16.164.6"
-              subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
-              gateway: ${_param:deploy_network_gateway}
-              name: one1
-            one2:
-              mode: UNCONFIGURED
-              mac: "0c:c4:7a:6c:83:5d"
-              name: one2
-            ten1:
-              mode: UNCONFIGURED
-              mac: "0c:c4:7a:1f:7d:98"
-              name: ten1
-            ten2:
-              mode: UNCONFIGURED
-              mac: "0c:c4:7a:1f:7d:99"
-              name: ten2
-            sten1:
-              mode: UNCONFIGURED
-              mac: "0c:c4:7a:1e:03:de"
-              name: sten1
-            sten2:
-              mode: UNCONFIGURED
-              mac: "0c:c4:7a:1e:03:df"
-              name: sten2
-          power_parameters:
-            power_address: "5.43.225.112"
-            power_pass: ==IPMI_PASS==
-            power_type: ipmi
-            power_user: ==IPMI_USER==
-        kvm05: # #cz7876-kvm.host-telecom.com
-          distro_series: "xenial"
-          # hwe_kernel: "hwe-16.04"
-          pxe_interface_mac: "0c:c4:7a:6c:88:d6"
-          interfaces:
-            one1:
-              mac: "0c:c4:7a:6c:88:d6"
-              mode: "static"
-              ip: "172.16.164.7"
-              subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
-              gateway: ${_param:deploy_network_gateway}
-              name: one1
-            one2:
-              mode: UNCONFIGURED
-              mac: "0c:c4:7a:6c:88:d7"
-              name: one2
-            ten1:
-              mode: UNCONFIGURED
-              mac: "0c:c4:7a:1e:03:74"
-              name: ten1
-            ten2:
-              mode: UNCONFIGURED
-              mac: "0c:c4:7a:1e:03:75"
-              name: ten2
-            sten1:
-              mode: UNCONFIGURED
-              mac: "0c:c4:7a:1f:89:be"
-              name: sten1
-            sten2:
-              mode: UNCONFIGURED
-              mac: "0c:c4:7a:1f:89:bf"
-              name: sten2
-          power_parameters:
-            power_address: "5.43.225.208"
-            power_pass: ==IPMI_PASS==
-            power_type: ipmi
-            power_user: ==IPMI_USER==
-        kvm06: # #cz8073-kvm.host-telecom.com
-          distro_series: "xenial"
-          # hwe_kernel: "hwe-16.04"
-          pxe_interface_mac: "0c:c4:7a:aa:df:ac"
-          interfaces:
-            one1:
-              mac: "0c:c4:7a:aa:df:ac"
-              mode: "static"
-              ip: "172.16.164.8"
-              subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
-              gateway: ${_param:deploy_network_gateway}
-              name: one1
-            one2:
-              mode: UNCONFIGURED
-              mac: "0c:c4:7a:aa:df:ad"
-              name: one2
-            ten1:
-              mode: UNCONFIGURED
-              mac: "0c:c4:7a:1e:3a:f2"
-              name: ten1
-            ten2:
-              mode: UNCONFIGURED
-              mac: "0c:c4:7a:1e:3a:f3"
-              name: ten2
-            sten1:
-              mode: UNCONFIGURED
-              mac: "0c:c4:7a:1e:a6:4c"
-              name: sten1
-            sten2:
-              mode: UNCONFIGURED
-              mac: "0c:c4:7a:1e:a6:4d"
-              name: sten2
-          power_parameters:
-            power_address: "5.43.227.118"
-            power_pass: ==IPMI_PASS==
-            power_type: ipmi
-            power_user: ==IPMI_USER==
-        gtw01: # #cz9039-kvm.host-telecom.com
-          distro_series: "xenial"
-          # hwe_kernel: "hwe-16.04"
-          pxe_interface_mac: "0c:c4:7a:aa:d5:84"
-          interfaces:
-            one1:
-              mac: "0c:c4:7a:aa:d5:84"
-              mode: "static"
-              ip: "172.16.164.9"
-              subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
-              gateway: ${_param:deploy_network_gateway}
-              name: one1
-            one2:
-              mode: UNCONFIGURED
-              mac: "0c:c4:7a:aa:d5:85"
-              name: one2
-            ten1:
-              mode: UNCONFIGURED
-              mac: "0c:c4:7a:58:41:b0"
-              name: ten1
-            ten2:
-              mode: UNCONFIGURED
-              mac: "0c:c4:7a:58:41:b1"
-              name: ten2
-            sten1:
-              mode: UNCONFIGURED
-              mac: "0c:c4:7a:1d:90:d2"
-              name: sten1
-            sten2:
-              mode: UNCONFIGURED
-              mac: "0c:c4:7a:1d:90:d3"
-              name: sten2
-          power_parameters:
-            power_address: "5.43.229.28"
-            power_pass: ==IPMI_PASS==
-            power_type: ipmi
-            power_user: ==IPMI_USER==
-        gtw02: # #cz9048-kvm.host-telecom.com
-          distro_series: "xenial"
-          # hwe_kernel: "hwe-16.04"
-          pxe_interface_mac: "0c:c4:7a:aa:d5:82"
-          interfaces:
-            one1:
-              mac: "0c:c4:7a:aa:d5:82"
-              mode: "static"
-              ip: "172.16.164.10"
-              subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
-              gateway: ${_param:deploy_network_gateway}
-              name: one1
-            one2:
-              mode: UNCONFIGURED
-              mac: "0c:c4:7a:aa:d5:83"
-              name: one2
-            ten1:
-              mode: UNCONFIGURED
-              mac: "0c:c4:7a:1f:00:7c"
-              name: ten1
-            ten2:
-              mode: UNCONFIGURED
-              mac: "0c:c4:7a:1f:00:7d"
-              name: ten2
-            sten1:
-              mode: UNCONFIGURED
-              mac: "0c:c4:7a:bc:88:8a"
-              name: sten1
-            sten2:
-              mode: UNCONFIGURED
-              mac: "0c:c4:7a:bc:88:8b"
-              name: sten2
-          power_parameters:
-            power_address: "5.43.225.23"
-            power_pass: ==IPMI_PASS==
-            power_type: ipmi
-            power_user: ==IPMI_USER==
-        gtw03: # #cz8159-kvm.host-telecom.com
-          distro_series: "xenial"
-          # hwe_kernel: "hwe-16.04"
-          pxe_interface_mac: "0c:c4:7a:6c:bc:f6"
-          interfaces:
-            one1:
-              mac: "0c:c4:7a:6c:bc:f6"
-              mode: "static"
-              ip: "172.16.164.11"
-              subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
-              gateway: ${_param:deploy_network_gateway}
-              name: one1
-            one2:
-              mode: UNCONFIGURED
-              mac: "0c:c4:7a:6c:bc:f7"
-              name: one2
-            ten1:
-              mode: UNCONFIGURED
-              mac: "00:25:90:9b:cc:32"
-              name: ten1
-            ten2:
-              mode: UNCONFIGURED
-              mac: "00:25:90:9b:cc:33"
-              name: ten2
-            sten1:
-              mode: UNCONFIGURED
-              mac: "00:25:90:c1:a5:04"
-              name: sten1
-            sten2:
-              mode: UNCONFIGURED
-              mac: "00:25:90:c1:a5:05"
-              name: sten2
-          power_parameters:
-            power_address: "185.8.58.9"
-            power_pass: ==IPMI_PASS==
-            power_type: ipmi
-            power_user: ==IPMI_USER==
-        osd001: # #cz9040-kvm.host-telecom.com
-          distro_series: "xenial"
-          # hwe_kernel: "hwe-16.04"
-          pxe_interface_mac: "0c:c4:7a:aa:c9:02"
-          interfaces:
-            one1:
-              mac: "0c:c4:7a:aa:c9:02"
-              mode: "static"
-              ip: "172.16.164.12"
-              subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
-              gateway: ${_param:deploy_network_gateway}
-              name: one1
-            one2:
-              mode: UNCONFIGURED
-              mac: "0c:c4:7a:aa:c9:03"
-              name: one2
-            ten1:
-              mode: UNCONFIGURED
-              mac: "0c:c4:7a:1e:aa:90"
-              name: ten1
-            ten2:
-              mode: UNCONFIGURED
-              mac: "0c:c4:7a:1e:aa:91"
-              name: ten2
-            sten1:
-              mode: UNCONFIGURED
-              mac: "0c:c4:7a:1e:0a:a4"
-              name: sten1
-            sten2:
-              mode: UNCONFIGURED
-              mac: "0c:c4:7a:1e:0a:a5"
-              name: sten2
-          power_parameters:
-            power_address: "185.8.58.246"
-            power_pass: ==IPMI_PASS==
-            power_type: ipmi
-            power_user: ==IPMI_USER==
-        osd002: # #cz9041-kvm.host-telecom.com
-          distro_series: "xenial"
-          # hwe_kernel: "hwe-16.04"
-          pxe_interface_mac: "0c:c4:7a:aa:d5:60"
-          interfaces:
-            one1:
-              mac: "0c:c4:7a:aa:d5:60"
-              mode: "static"
-              ip: "172.16.164.13"
-              subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
-              gateway: ${_param:deploy_network_gateway}
-              name: one1
-            one2:
-              mode: UNCONFIGURED
-              mac: "0c:c4:7a:aa:d5:61"
-              name: one2
-            ten1:
-              mode: UNCONFIGURED
-              mac: "0c:c4:7a:1f:04:2c"
-              name: ten1
-            ten2:
-              mode: UNCONFIGURED
-              mac: "0c:c4:7a:1f:04:2d"
-              name: ten2
-            sten1:
-              mode: UNCONFIGURED
-              mac: "0c:c4:7a:1f:01:f2"
-              name: sten1
-            sten2:
-              mode: UNCONFIGURED
-              mac: "0c:c4:7a:1f:01:f3"
-              name: sten2
-          power_parameters:
-            power_address: "185.8.58.243"
-            power_pass: ==IPMI_PASS==
-            power_type: ipmi
-            power_user: ==IPMI_USER==
-        osd003: # #cz9042-kvm.host-telecom.com
-          distro_series: "xenial"
-          # hwe_kernel: "hwe-16.04"
-          pxe_interface_mac: "0c:c4:7a:aa:c9:3a"
-          interfaces:
-            one1:
-              mac: "0c:c4:7a:aa:c9:3a"
-              mode: "static"
-              ip: "172.16.164.14"
-              subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
-              gateway: ${_param:deploy_network_gateway}
-              name: one1
-            one2:
-              mode: UNCONFIGURED
-              mac: "0c:c4:7a:aa:c9:3b"
-              name: one2
-            ten1:
-              mode: UNCONFIGURED
-              mac: "00:25:90:33:d7:10"
-              name: ten1
-            ten2:
-              mode: UNCONFIGURED
-              mac: "00:25:90:33:d7:11"
-              name: ten2
-            sten1:
-              mode: UNCONFIGURED
-              mac: "00:25:90:0b:5f:50"
-              name: sten1
-            sten2:
-              mode: UNCONFIGURED
-              mac: "00:25:90:0b:5f:51"
-              name: sten2
-          power_parameters:
-            power_address: "185.8.58.244"
-            power_pass: ==IPMI_PASS==
-            power_type: ipmi
-            power_user: ==IPMI_USER==
-        cmp001: # #cz9039-kvm.host-telecom.com
-          distro_series: "xenial"
-          # hwe_kernel: "hwe-16.04"
-          pxe_interface_mac: "0c:c4:7a:aa:d6:aa"
-          interfaces:
-            one1:
-              mac: "0c:c4:7a:aa:d6:aa"
-              mode: "static"
-              ip: "172.16.164.15"
-              subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
-              gateway: ${_param:deploy_network_gateway}
-              name: one1
-            one2:
-              mode: UNCONFIGURED
-              mac: "0c:c4:7a:aa:d6:ab"
-              name: one2
-            ten1:
-              mode: UNCONFIGURED
-              mac: "0c:c4:7a:1f:86:76"
-              name: ten1
-            ten2:
-              mode: UNCONFIGURED
-              mac: "0c:c4:7a:1f:86:77"
-              name: ten2
-            sten1:
-              mode: UNCONFIGURED
-              mac: "0c:c4:7a:1e:39:3c"
-              name: sten1
-            sten2:
-              mode: UNCONFIGURED
-              mac: "0c:c4:7a:1e:39:3d"
-              name: sten2
-          power_parameters:
-            power_address: "185.8.58.248"
-            power_pass: ==IPMI_PASS==
-            power_type: ipmi
-            power_user: ==IPMI_USER==
-        cmp002: # #cz9046-kvm.host-telecom.com
-          distro_series: "xenial"
-          # hwe_kernel: "hwe-16.04"
-          pxe_interface_mac: "0c:c4:7a:aa:ce:30"
-          interfaces:
-            one1:
-              mac: "0c:c4:7a:aa:ce:30"
-              mode: "static"
-              ip: "172.16.164.16"
-              subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
-              gateway: ${_param:deploy_network_gateway}
-              name: one1
-            one2:
-              mode: UNCONFIGURED
-              mac: "0c:c4:7a:aa:ce:31"
-              name: one2
-            one2:
-              mode: UNCONFIGURED
-              mac: "0c:c4:7a:aa:ce:31"
-              name: one2
-            ten1:
-              mode: UNCONFIGURED
-              mac: "00:25:90:e0:7d:e0"
-              name: ten1
-            ten2:
-              mode: UNCONFIGURED
-              mac: "00:25:90:e0:7d:e1"
-              name: ten2
-            sten1:
-              mode: UNCONFIGURED
-              mac: "0c:c4:7a:1f:0c:0e"
-              name: sten1
-            sten2:
-              mode: UNCONFIGURED
-              mac: "0c:c4:7a:1f:0c:0f"
-              name: sten2
-          power_parameters:
-            power_address: "185.8.59.222"
-            power_pass: ==IPMI_PASS==
-            power_type: ipmi
-            power_user: ==IPMI_USER==
-        cmp003: # #cz8061-kvm.host-telecom.com
-          distro_series: "xenial"
-          # hwe_kernel: "hwe-16.04"
-          pxe_interface_mac: "0c:c4:7a:aa:e0:ce"
-          interfaces:
-            one1:
-              mac: "0c:c4:7a:aa:e0:ce"
-              mode: "static"
-              ip: "172.16.164.17"
-              subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
-              gateway: ${_param:deploy_network_gateway}
-              name: one1
-            one2:
-              mode: UNCONFIGURED
-              mac: "0c:c4:7a:aa:e0:cf"
-              name: one2
-            ten1:
-              mode: UNCONFIGURED
-              mac: "0c:c4:7a:1d:94:5e"
-              name: ten1
-            ten2:
-              mode: UNCONFIGURED
-              mac: "0c:c4:7a:1d:94:5f"
-              name: ten2
-            sten1:
-              mode: UNCONFIGURED
-              mac: "0c:c4:7a:1f:87:e4"
-              name: sten1
-            sten2:
-              mode: UNCONFIGURED
-              mac: "0c:c4:7a:1f:87:e5"
-              name: sten2
-          power_parameters:
-            power_address: "5.43.225.228"
-            power_pass: ==IPMI_PASS==
-            power_type: ipmi
-            power_user: ==IPMI_USER==
-  mcp_version: proposed
-  mcp_common_scripts_branch: ''
-  offline_deployment: 'False'
-  opencontrail_enabled: 'False'
-  openldap_domain: cookied-cicd-ovs-maas.local
-  openldap_enabled: 'True'
-  openldap_organisation: ${_param:cluster_name}
-  openstack_benchmark_node01_address: 10.167.11.95
-  openstack_benchmark_node01_hostname: bmk01
-  openstack_cluster_size: golden
-  openstack_compute_count: '3'
-  openstack_compute_rack01_hostname: cmp
-  openstack_compute_single_address_ranges: 10.167.11.15-10.167.11.17
-  openstack_compute_deploy_address_ranges: 172.16.164.15-172.16.164.17
-  openstack_compute_tenant_address_ranges: 10.167.12.15-10.167.12.17
-  openstack_compute_backend_address_ranges: 10.167.12.15-10.167.12.17
-  openstack_control_address: 10.167.11.10
-  openstack_control_hostname: ctl
-  openstack_control_node01_address: 10.167.11.11
-  openstack_control_node01_hostname: ctl01
-  openstack_control_node02_address: 10.167.11.12
-  openstack_control_node02_hostname: ctl02
-  openstack_control_node03_address: 10.167.11.13
-  openstack_control_node03_hostname: ctl03
-  openstack_database_address: 10.167.11.50
-  openstack_database_hostname: dbs
-  openstack_database_node01_address: 10.167.11.51
-  openstack_database_node01_hostname: dbs01
-  openstack_database_node02_address: 10.167.11.52
-  openstack_database_node02_hostname: dbs02
-  openstack_database_node03_address: 10.167.11.53
-  openstack_database_node03_hostname: dbs03
-  openstack_enabled: 'True'
-  openstack_gateway_node01_deploy_address: 172.16.164.9
-  openstack_gateway_node02_deploy_address: 172.16.164.10
-  openstack_gateway_node03_deploy_address: 172.16.164.11
-  openstack_gateway_node01_address: 10.167.11.224
-  openstack_gateway_node01_hostname: gtw01
-  openstack_gateway_node02_hostname: gtw02
-  openstack_gateway_node03_hostname: gtw03
-  openstack_gateway_node01_tenant_address: 10.167.12.9
-  openstack_gateway_node02_address: 10.167.11.225
-  openstack_gateway_node02_tenant_address: 10.167.12.10
-  openstack_gateway_node03_address: 10.167.11.226
-  openstack_gateway_node03_tenant_address: 10.167.12.11
-  openstack_message_queue_address: 10.167.11.40
-  openstack_message_queue_hostname: msg
-  openstack_message_queue_node01_address: 10.167.11.41
-  openstack_message_queue_node01_hostname: msg01
-  openstack_message_queue_node02_address: 10.167.11.42
-  openstack_message_queue_node02_hostname: msg02
-  openstack_message_queue_node03_address: 10.167.11.43
-  openstack_message_queue_node03_hostname: msg03
-  openstack_network_engine: ovs
-  openstack_neutron_qos: 'True'
-  openstack_neutron_vlan_aware_vms: 'True'
-  openstack_nfv_dpdk_enabled: 'False'
-  openstack_nfv_sriov_enabled: 'False'
-  openstack_nova_compute_hugepages_count: '16'
-  openstack_nova_compute_nfv_req_enabled: 'False'
-  openstack_nfv_sriov_network: physnet2
-  openstack_nfv_sriov_numvfs: '7'
-  openstack_nfv_sriov_pf_nic: enp5s0f1
-  openstack_nova_cpu_pinning: 6,7,8,9,10,11
-  openstack_nova_compute_reserved_host_memory_mb: '900'
-  openstack_ovs_dvr_enabled: 'True'
-  openstack_ovs_encapsulation_type: vxlan
-  openstack_ovs_encapsulation_vlan_range: 2402:2406
-  openstack_proxy_address: 10.167.11.80
-  openstack_proxy_hostname: prx
-  openstack_proxy_node01_address: 10.167.11.81
-  openstack_proxy_node01_hostname: prx01
-  openstack_proxy_node02_address: 10.167.11.82
-  openstack_proxy_node02_hostname: prx02
-  openstack_upgrade_node01_address: 10.167.11.19
-  openstack_version: pike
-  cinder_version: ${_param:openstack_version}
-  oss_enabled: 'False'
-  oss_node03_address: ${_param:stacklight_monitor_node03_address}
-  platform: openstack_enabled
-  public_host: ${_param:openstack_proxy_address}
-  publication_method: email
-  reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
-  salt_api_password: HlcaUHzUnsWsg62uhF8ua5KEbqRbzijz
-  salt_api_password_hash: $6$qdIFillN$XnzP7oIXRcbroVch7nlthyrSekjKlWND8q2MtoMF3Wz2ymepjAOjyqpyR55nmbH9OQzS8EcQJ6sfr5hWKDesV1
-  salt_master_address: 10.167.11.2
-  salt_master_hostname: cfg01
-  salt_master_management_address: 172.16.164.2
-  shared_reclass_branch: proposed
-  shared_reclass_url: https://gerrit.mcp.mirantis.com/salt-models/reclass-system.git
-  stacklight_enabled: 'True'
-  stacklight_log_address: 10.167.11.60
-  stacklight_log_hostname: log
-  stacklight_log_node01_address: 10.167.11.61
-  stacklight_log_node01_hostname: log01
-  stacklight_log_node02_address: 10.167.11.62
-  stacklight_log_node02_hostname: log02
-  stacklight_log_node03_address: 10.167.11.63
-  stacklight_log_node03_hostname: log03
-  stacklight_long_term_storage_type: prometheus
-  stacklight_monitor_address: 10.167.11.70
-  stacklight_monitor_hostname: mon
-  stacklight_monitor_node01_address: 10.167.11.71
-  stacklight_monitor_node01_hostname: mon01
-  stacklight_monitor_node02_address: 10.167.11.72
-  stacklight_monitor_node02_hostname: mon02
-  stacklight_monitor_node03_address: 10.167.11.73
-  stacklight_monitor_node03_hostname: mon03
-  stacklight_telemetry_address: 10.167.11.96
-  stacklight_telemetry_hostname: mtr
-  stacklight_telemetry_node01_address: 10.167.11.97
-  stacklight_telemetry_node01_hostname: mtr01
-  stacklight_telemetry_node02_address: 10.167.11.98
-  stacklight_telemetry_node02_hostname: mtr02
-  stacklight_telemetry_node03_address: 10.167.11.99
-  stacklight_telemetry_node03_hostname: mtr03
-  stacklight_version: '2'
-  static_ips_on_deploy_network_enabled: 'False'
-  tenant_network_gateway: 10.167.12.1
-  tenant_network_netmask: 255.255.255.0
-  tenant_network_subnet: 10.167.12.0/24
-  tenant_vlan: '2406'
-  upstream_proxy_enabled: 'False'
-  use_default_network_scheme: 'True'
-  sriov_network_subnet: 192.168.10.0/24
-  ceph_enabled: 'True'
-  ceph_version: "luminous"
-  ceph_hyper_converged: "False"
-  ceph_osd_backend: "bluestore"
-  ceph_osd_count: "3"
-  ceph_osd_node_count: 3
-  ceph_osd_block_db_size: 20
-  ceph_osd_journal_size: 20
-  ceph_osd_bond_mode: "active-backup"
-  ceph_osd_data_partition_prefix: ""
-  ceph_public_network_allocation: storage
-  ceph_public_network: "10.167.11.0/24"
-  ceph_cluster_network: "10.167.11.0/24"
-  ceph_osd_single_address_ranges: "10.167.11.200-10.167.11.202"
-  ceph_osd_deploy_address_ranges: "172.16.164.8-172.16.164.10"
-  ceph_osd_storage_address_ranges: "10.167.11.200-10.167.11.202"
-  ceph_osd_backend_address_ranges: "10.167.12.200-10.167.12.202"
-  ceph_osd_data_disks: "/dev/sdb"
-  ceph_osd_journal_or_block_db_disks: "/dev/sdb"
-  ceph_osd_mode: "separated"
-  ceph_osd_deploy_nic: "eth0"
-  ceph_osd_primary_first_nic: "eth1"
-  ceph_osd_primary_second_nic: "eth2"
-  ceph_mon_node01_address: "10.167.11.66"
-  ceph_mon_node01_hostname: "cmn01"
-  ceph_mon_node02_address: "10.167.11.67"
-  ceph_mon_node02_hostname: "cmn02"
-  ceph_mon_node03_address: "10.167.11.68"
-  ceph_mon_node03_hostname: "cmn03"
-  ceph_rgw_address: "10.167.11.75"
-  ceph_rgw_node01_address: "10.167.11.76"
-  ceph_rgw_node01_hostname: "rgw01"
-  ceph_rgw_node02_address: "10.167.11.77"
-  ceph_rgw_node02_hostname: "rgw02"
-  ceph_rgw_node03_address: "10.167.11.78"
-  ceph_rgw_node03_hostname: "rgw03"
-  rsync_fernet_rotation: 'True'
-  compute_padding_with_zeros: True
-  designate_backend: powerdns
-  designate_enabled: 'True'
-  openstack_dns_node01_address: 10.167.11.113
-  openstack_dns_node02_address: 10.167.11.114
-  nova_vnc_tls_enabled: 'True'
-  galera_ssl_enabled: 'True'
-  openstack_mysql_x509_enabled: 'True'
-  rabbitmq_ssl_enabled: 'True'
-  openstack_rabbitmq_x509_enabled: 'True'
-  openstack_internal_protocol: 'https'
-  tenant_telemetry_enabled: 'True'
-  gnocchi_aggregation_storage: ceph
-  openstack_telemetry_address: 10.167.11.83
-  openstack_telemetry_hostname: mdb
-  openstack_telemetry_node01_address: 10.167.11.84
-  openstack_telemetry_node01_hostname: mdb01
-  openstack_telemetry_node02_address: 10.167.11.85
-  openstack_telemetry_node02_hostname: mdb02
-  openstack_telemetry_node03_address: 10.167.11.86
-  openstack_telemetry_node03_hostname: mdb03
-  barbican_backend: dogtag
-  barbican_enabled: 'True'
-  barbican_integration_enabled: 'False'
-  openstack_barbican_address: 10.167.11.44
-  openstack_barbican_hostname: kmn
-  openstack_barbican_node01_address: 10.167.11.45
-  openstack_barbican_node01_hostname: kmn01
-  openstack_barbican_node02_address: 10.167.11.46
-  openstack_barbican_node02_hostname: kmn02
-  openstack_barbican_node03_address: 10.167.11.47
-  openstack_barbican_node03_hostname: kmn03
-  openstack_create_public_network: 'True'
-  openstack_public_neutron_subnet_gateway: 172.17.42.1
-  openstack_public_neutron_subnet_cidr: 172.17.42.0/26
-  openstack_public_neutron_subnet_allocation_start: 172.17.42.15
-  openstack_public_neutron_subnet_allocation_end: 172.17.42.55
-  backend_vlan: '2402'
-  storage_vlan: '2405'  # not implemented yet, placeholder
-  kqueen_custom_mail_enabled: 'False'
-  kqueen_enabled: 'False'
-  manila_enabled: 'False'
-  openscap_enabled: 'True'
-  octavia_health_manager_node01_address: 192.168.1.10
-  octavia_health_manager_node02_address: 192.168.1.11
-  octavia_health_manager_node03_address: 192.168.1.12
-  octavia_manager_cluster: 'False'
-  octavia_hm_bind_ip: 192.168.1.12
-  octavia_lb_mgmt_cidr: 192.168.1.0/24
-  octavia_lb_mgmt_allocation_pool_start: 192.168.1.2
-  octavia_lb_mgmt_allocation_pool_end: 192.168.1.200
-  openstack_octavia_enabled: 'True'
-  octavia_private_key: |-
-    -----BEGIN RSA PRIVATE KEY-----
-    MIIEpAIBAAKCAQEAtjnPDJsQToHBtoqIo15mdSYpfi8z6DFMi8Gbo0KCN33OUn5u
-    OctbdtjUfeuhvI6px1SCnvyWi09Ft8eWwq+KwLCGKbUxLvqKltuJ7K3LIrGXkt+m
-    qZN4O9XKeVKfZH+mQWkkxRWgX2r8RKNV3GkdNtd74VjhP+R6XSKJQ1Z8b7eHM10v
-    6IjTY/jPczjK+eyCeEj4qbSnV8eKlqLhhquuSQRmUO2DRSjLVdpdf2BB4/BdWFsD
-    YOmX7mb8kpEr9vQ+c1JKMXDwD6ehzyU8kE+1kVm5zOeEy4HdYIMpvUfN49P1anRV
-    2ISQ1ZE+r22IAMKl0tekrGH0e/1NP1DF5rINMwIDAQABAoIBAQCkP/cgpaRNHyg8
-    ISKIHs67SWqdEm73G3ijgB+JSKmW2w7dzJgN//6xYUAnP/zIuM7PnJ0gMQyBBTMS
-    NBTv5spqZLKJZYivj6Tb1Ya8jupKm0jEWlMfBo2ZYVrfgFmrfGOfEebSvmuPlh9M
-    vuzlftmWVSSUOkjODmM9D6QpzgrbpktBuA/WpX+6esMTwJpOcQ5xZWEnHXnVzuTc
-    SncodVweE4gz6F1qorbqIJz8UAUQ5T0OZTdHzIS1IbamACHWaxQfixAO2s4+BoUK
-    ANGGZWkfneCxx7lthvY8DiKn7M5cSRnqFyDToGqaLezdkMNlGC7v3U11FF5blSEW
-    fL1o/HwBAoGBAOavhTr8eqezTchqZvarorFIq7HFWk/l0vguIotu6/wlh1V/KdF+
-    aLLHgPgJ5j+RrCMvTBoKqMeeHfVGrS2udEy8L1mK6b3meG+tMxU05OA55abmhYn7
-    7vF0q8XJmYIHIXmuCgF90R8Piscb0eaMlmHW9unKTKo8EOs5j+D8+AMJAoGBAMo4
-    8WW+D3XiD7fsymsfXalf7VpAt/H834QTbNZJweUWhg11eLutyahyyfjjHV200nNZ
-    cnU09DWKpBbLg7d1pyT69CNLXpNnxuWCt8oiUjhWCUpNqVm2nDJbUdlRFTzYb2fS
-    ZC4r0oQaPD5kMLSipjcwzMWe0PniySxNvKXKInFbAoGBAKxW2qD7uKKKuQSOQUft
-    aAksMmEIAHWKTDdvOA2VG6XvX5DHBLXmy08s7rPfqW06ZjCPCDq4Velzvgvc9koX
-    d/lP6cvqlL9za+x6p5wjPQ4rEt/CfmdcmOE4eY+1EgLrUt314LHGjjG3ScWAiirE
-    QyDrGOIGaYoQf89L3KqIMr0JAoGARYAklw8nSSCUvmXHe+Gf0yKA9M/haG28dCwo
-    780RsqZ3FBEXmYk1EYvCFqQX56jJ25MWX2n/tJcdpifz8Q2ikHcfiTHSI187YI34
-    lKQPFgWb08m1NnwoWrY//yx63BqWz1vjymqNQ5GwutC8XJi5/6Xp+tGGiRuEgJGH
-    EIPUKpkCgYAjBIVMkpNiLCREZ6b+qjrPV96ed3iTUt7TqP7yGlFI/OkORFS38xqC
-    hBP6Fk8iNWuOWQD+ohM/vMMnvIhk5jwlcwn+kF0ra04gi5KBFWSh/ddWMJxUtPC1
-    2htvlEc6zQAR6QfqXHmwhg1hP81JcpqpicQzCMhkzLoR1DC6stXdLg==
-    -----END RSA PRIVATE KEY-----
-  octavia_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC2Oc8MmxBOgcG2ioijXmZ1Jil+LzPoMUyLwZujQoI3fc5Sfm45y1t22NR966G8jqnHVIKe/JaLT0W3x5bCr4rAsIYptTEu+oqW24nsrcsisZeS36apk3g71cp5Up9kf6ZBaSTFFaBfavxEo1XcaR0213vhWOE/5HpdIolDVnxvt4czXS/oiNNj+M9zOMr57IJ4SPiptKdXx4qWouGGq65JBGZQ7YNFKMtV2l1/YEHj8F1YWwNg6ZfuZvySkSv29D5zUkoxcPAPp6HPJTyQT7WRWbnM54TLgd1ggym9R83j0/VqdFXYhJDVkT6vbYgAwqXS16SsYfR7/U0/UMXmsg0z  
diff --git a/tcp_tests/templates/cookied-cicd-ovs-maas/salt-context-environment.yaml b/tcp_tests/templates/cookied-cicd-ovs-maas/salt-context-environment.yaml
deleted file mode 100644
index 001131c..0000000
--- a/tcp_tests/templates/cookied-cicd-ovs-maas/salt-context-environment.yaml
+++ /dev/null
@@ -1,176 +0,0 @@
-nodes:
-    cfg01.cookied-cicd-ovs-maas.local:
-      reclass_storage_name: infra_config_node01
-      roles:
-      - infra_config
-      - linux_system_codename_xenial
-      - features_runtest_cfg
-      interfaces:
-        ens3:
-          role: single_static_mgm
-        ens4:
-          role: single_static_ctl
-
-    # Physical nodes
-    kvm01.cookied-cicd-ovs-maas.local:
-      reclass_storage_name: infra_kvm_node01
-      roles:
-      - infra_kvm
-      - linux_system_codename_xenial
-      interfaces:
-        one1:
-          role: single_mgm_dhcp
-        ten2:
-          role: bond_ctl_lacp
-        sten2:
-          role: bond_ctl_lacp
-
-    kvm02.cookied-cicd-ovs-maas.local:
-      reclass_storage_name: infra_kvm_node02
-      roles:
-      - infra_kvm
-      - linux_system_codename_xenial
-      interfaces:
-        one1:
-          role: single_mgm_dhcp
-        ten2:
-          role: bond_ctl_lacp
-        sten2:
-          role: bond_ctl_lacp
-
-    kvm03.cookied-cicd-ovs-maas.local:
-      reclass_storage_name: infra_kvm_node03
-      roles:
-      - infra_kvm
-      - linux_system_codename_xenial
-      interfaces:
-        one1:
-          role: single_mgm_dhcp
-        ten2:
-          role: bond_ctl_lacp
-        sten2:
-          role: bond_ctl_lacp
-
-    kvm04.cookied-cicd-ovs-maas.local:
-      reclass_storage_name: infra_kvm_node04
-      roles:
-      - infra_kvm_wo_gluster
-      - linux_system_codename_xenial
-      - salt_master_host
-      interfaces:
-        one1:
-          role: single_mgm_dhcp
-        ten2:
-          role: bond_ctl_lacp
-        sten2:
-          role: bond_ctl_lacp
-
-    kvm05.cookied-cicd-ovs-maas.local:
-      reclass_storage_name: infra_kvm_node05
-      roles:
-      - infra_kvm_wo_gluster
-      - linux_system_codename_xenial
-      - salt_master_host
-      interfaces:
-        one1:
-          role: single_mgm_dhcp
-        ten2:
-          role: bond_ctl_lacp
-        sten2:
-          role: bond_ctl_lacp
-
-    kvm06.cookied-cicd-ovs-maas.local:
-      reclass_storage_name: infra_kvm_node06
-      roles:
-      - infra_kvm_wo_gluster
-      - linux_system_codename_xenial
-      - salt_master_host
-      interfaces:
-        one1:
-          role: single_mgm_dhcp
-        ten2:
-          role: bond_ctl_lacp
-        sten2:
-          role: bond_ctl_lacp
-
-    osd<<count>>:
-      reclass_storage_name: ceph_osd_rack01
-      roles:
-      - ceph_osd
-      - linux_system_codename_xenial
-      interfaces:
-        one1:
-          role: single_mgm_dhcp
-        ten2:
-          role: bond_ctl_lacp
-        sten2:
-          role: bond_ctl_lacp
-#          role: bond0_ab_vlan_ceph_storage_backend
-
-    cmp<<count>>:
-      reclass_storage_name: openstack_compute_rack01
-      roles:
-      - openstack_compute
-      - linux_system_codename_xenial
-      interfaces:
-        one1:
-          role: single_mgm_dhcp
-        ten1:
-          role: bond_ctl_lacp
-        ten2:
-          role: bond_prv_lacp
-        sten1:
-          role: bond_ctl_lacp
-        sten2:
-          role: bond_prv_lacp
-
-    gtw01.cookied-cicd-ovs-maas.local:
-      reclass_storage_name: openstack_gateway_node01
-      roles:
-      - openstack_gateway
-      - linux_system_codename_xenial
-      interfaces:
-        one1:
-          role: single_mgm_dhcp
-        ten1:
-          role: bond_ctl_lacp
-        ten2:
-          role: bond_prv_lacp
-        sten1:
-          role: bond_ctl_lacp
-        sten2:
-          role: bond_prv_lacp
-
-    gtw02.cookied-cicd-ovs-maas.local:
-      reclass_storage_name: openstack_gateway_node02
-      roles:
-      - openstack_gateway
-      - linux_system_codename_xenial
-      interfaces:
-        one1:
-          role: single_mgm_dhcp
-        ten1:
-          role: bond_ctl_lacp
-        ten2:
-          role: bond_prv_lacp
-        sten1:
-          role: bond_ctl_lacp
-        sten2:
-          role: bond_prv_lacp
-
-    gtw03.cookied-cicd-ovs-maas.local:
-      reclass_storage_name: openstack_gateway_node03
-      roles:
-      - openstack_gateway
-      - linux_system_codename_xenial
-      interfaces:
-        one1:
-          role: single_mgm_dhcp
-        ten1:
-          role: bond_ctl_lacp
-        ten2:
-          role: bond_prv_lacp
-        sten1:
-          role: bond_ctl_lacp
-        sten2:
-          role: bond_prv_lacp
diff --git a/tcp_tests/templates/cookied-cicd-ovs-maas/salt-context-vcp-environment.yaml b/tcp_tests/templates/cookied-cicd-ovs-maas/salt-context-vcp-environment.yaml
deleted file mode 100644
index e9461ca..0000000
--- a/tcp_tests/templates/cookied-cicd-ovs-maas/salt-context-vcp-environment.yaml
+++ /dev/null
@@ -1,407 +0,0 @@
-nodes:
-    ctl01.cookied-cicd-ovs-maas.local:
-      reclass_storage_name: openstack_control_node01
-      roles:
-      - openstack_control_leader
-      - linux_system_codename_xenial
-      interfaces:
-        ens2:
-          role: single_dhcp
-        ens3:
-          role: single_ctl
-
-    ctl02.cookied-cicd-ovs-maas.local:
-      reclass_storage_name: openstack_control_node02
-      roles:
-      - openstack_control
-      - linux_system_codename_xenial
-      interfaces:
-        ens2:
-          role: single_dhcp
-        ens3:
-          role: single_ctl
-
-    ctl03.cookied-cicd-ovs-maas.local:
-      reclass_storage_name: openstack_control_node03
-      roles:
-      - openstack_control
-      - linux_system_codename_xenial
-      interfaces:
-        ens2:
-          role: single_dhcp
-        ens3:
-          role: single_ctl
-
-    dbs01.cookied-cicd-ovs-maas.local:
-      reclass_storage_name: openstack_database_node01
-      roles:
-      - openstack_database_leader
-      - linux_system_codename_xenial
-      interfaces:
-        ens2:
-          role: single_dhcp
-        ens3:
-          role: single_ctl
-
-    dbs02.cookied-cicd-ovs-maas.local:
-      reclass_storage_name: openstack_database_node02
-      roles:
-      - openstack_database
-      - linux_system_codename_xenial
-      interfaces:
-        ens2:
-          role: single_dhcp
-        ens3:
-          role: single_ctl
-
-    dbs03.cookied-cicd-ovs-maas.local:
-      reclass_storage_name: openstack_database_node03
-      roles:
-      - openstack_database
-      - linux_system_codename_xenial
-      interfaces:
-        ens2:
-          role: single_dhcp
-        ens3:
-          role: single_ctl
-
-    msg01.cookied-cicd-ovs-maas.local:
-      reclass_storage_name: openstack_message_queue_node01
-      roles:
-      - openstack_message_queue
-      - linux_system_codename_xenial
-      interfaces:
-        ens2:
-          role: single_dhcp
-        ens3:
-          role: single_ctl
-
-    msg02.cookied-cicd-ovs-maas.local:
-      reclass_storage_name: openstack_message_queue_node02
-      roles:
-      - openstack_message_queue
-      - linux_system_codename_xenial
-      interfaces:
-        ens2:
-          role: single_dhcp
-        ens3:
-          role: single_ctl
-
-    msg03.cookied-cicd-ovs-maas.local:
-      reclass_storage_name: openstack_message_queue_node03
-      roles:
-      - openstack_message_queue
-      - linux_system_codename_xenial
-      interfaces:
-        ens2:
-          role: single_dhcp
-        ens3:
-          role: single_ctl
-
-    prx01.cookied-cicd-ovs-maas.local:
-      reclass_storage_name: openstack_proxy_node01
-      roles:
-      - openstack_proxy
-      - linux_system_codename_xenial
-      interfaces:
-        ens2:
-          role: single_dhcp
-        ens3:
-          role: single_ctl
-
-    prx02.cookied-cicd-ovs-maas.local:
-      reclass_storage_name: openstack_proxy_node02
-      roles:
-      - openstack_proxy
-      - linux_system_codename_xenial
-      interfaces:
-        ens2:
-          role: single_dhcp
-        ens3:
-          role: single_ctl
-
-    cid01.cookied-cicd-ovs-maas.local:
-      reclass_storage_name: cicd_control_node01
-      roles:
-      - cicd_control_leader
-      - linux_system_codename_xenial
-      interfaces:
-        ens2:
-          role: single_dhcp
-        ens3:
-          role: single_ctl
-
-    cid02.cookied-cicd-ovs-maas.local:
-      reclass_storage_name: cicd_control_node02
-      roles:
-      - cicd_control_manager
-      - linux_system_codename_xenial
-      interfaces:
-        ens2:
-          role: single_dhcp
-        ens3:
-          role: single_ctl
-
-    cid03.cookied-cicd-ovs-maas.local:
-      reclass_storage_name: cicd_control_node03
-      roles:
-      - cicd_control_manager
-      - linux_system_codename_xenial
-      interfaces:
-        ens2:
-          role: single_dhcp
-        ens3:
-          role: single_ctl
-
-    mon01.cookied-cicd-ovs-maas.local:
-      reclass_storage_name: stacklight_server_node01
-      roles:
-      - stacklightv2_server_leader
-      - linux_system_codename_xenial
-      interfaces:
-        ens2:
-          role: single_dhcp
-        ens3:
-          role: single_ctl
-
-    mon02.cookied-cicd-ovs-maas.local:
-      reclass_storage_name: stacklight_server_node02
-      roles:
-      - stacklightv2_server
-      - linux_system_codename_xenial
-      interfaces:
-        ens2:
-          role: single_dhcp
-        ens3:
-          role: single_ctl
-
-    mon03.cookied-cicd-ovs-maas.local:
-      reclass_storage_name: stacklight_server_node03
-      roles:
-      - stacklightv2_server
-      - linux_system_codename_xenial
-      interfaces:
-        ens2:
-          role: single_dhcp
-        ens3:
-          role: single_ctl
-
-    mtr01.cookied-cicd-ovs-maas.local:
-      reclass_storage_name: stacklight_telemetry_node01
-      roles:
-      - stacklight_telemetry
-      - linux_system_codename_xenial
-      interfaces:
-        ens2:
-          role: single_dhcp
-        ens3:
-          role: single_ctl
-
-    mtr02.cookied-cicd-ovs-maas.local:
-      reclass_storage_name: stacklight_telemetry_node02
-      roles:
-      - stacklight_telemetry
-      - linux_system_codename_xenial
-      interfaces:
-        ens2:
-          role: single_dhcp
-        ens3:
-          role: single_ctl
-
-    mtr03.cookied-cicd-ovs-maas.local:
-      reclass_storage_name: stacklight_telemetry_node03
-      roles:
-      - stacklight_telemetry
-      - linux_system_codename_xenial
-      interfaces:
-        ens2:
-          role: single_dhcp
-        ens3:
-          role: single_ctl
-
-    log01.cookied-cicd-ovs-maas.local:
-      reclass_storage_name: stacklight_log_node01
-      roles:
-      - stacklight_log_leader_v2
-      - linux_system_codename_xenial
-      interfaces:
-        ens2:
-          role: single_dhcp
-        ens3:
-          role: single_ctl
-
-    log02.cookied-cicd-ovs-maas.local:
-      reclass_storage_name: stacklight_log_node02
-      roles:
-      - stacklight_log
-      - linux_system_codename_xenial
-      interfaces:
-        ens2:
-          role: single_dhcp
-        ens3:
-          role: single_ctl
-
-    log03.cookied-cicd-ovs-maas.local:
-      reclass_storage_name: stacklight_log_node03
-      roles:
-      - stacklight_log
-      - linux_system_codename_xenial
-      interfaces:
-        ens2:
-          role: single_dhcp
-        ens3:
-          role: single_ctl
-
-    cmn01.cookied-cicd-ovs-maas.local:
-      reclass_storage_name: ceph_mon_node01
-      roles:
-      - ceph_mon
-      - linux_system_codename_xenial
-      interfaces:
-        ens2:
-          role: single_dhcp
-        ens3:
-          role: single_ctl
-
-    cmn02.cookied-cicd-ovs-maas.local:
-      reclass_storage_name: ceph_mon_node02
-      roles:
-      - ceph_mon
-      - linux_system_codename_xenial
-      interfaces:
-        ens2:
-          role: single_dhcp
-        ens3:
-          role: single_ctl
-
-    cmn03.cookied-cicd-ovs-maas.local:
-      reclass_storage_name: ceph_mon_node03
-      roles:
-      - ceph_mon
-      - linux_system_codename_xenial
-      interfaces:
-        ens2:
-          role: single_dhcp
-        ens3:
-          role: single_ctl
-
-    rgw01.cookied-cicd-ovs-maas.local:
-      reclass_storage_name: ceph_rgw_node01
-      roles:
-      - ceph_rgw
-      - linux_system_codename_xenial
-      interfaces:
-        ens2:
-          role: single_dhcp
-        ens3:
-          role: single_ctl
-
-    rgw02.cookied-cicd-ovs-maas.local:
-      reclass_storage_name: ceph_rgw_node02
-      roles:
-      - ceph_rgw
-      - linux_system_codename_xenial
-      interfaces:
-        ens2:
-          role: single_dhcp
-        ens3:
-          role: single_ctl
-
-    rgw03.cookied-cicd-ovs-maas.local:
-      reclass_storage_name: ceph_rgw_node03
-      roles:
-      - ceph_rgw
-      - linux_system_codename_xenial
-      interfaces:
-        ens2:
-          role: single_dhcp
-        ens3:
-          role: single_ctl
-
-    mdb01.cookied-cicd-ovs-maas.local:
-      reclass_storage_name: openstack_telemetry_node01
-      roles:
-      - linux_system_codename_xenial
-      - openstack_telemetry
-      interfaces:
-        ens2:
-          role: single_dhcp
-        ens3:
-          role: single_ctl
-
-    mdb02.cookied-cicd-ovs-maas.local:
-      reclass_storage_name: openstack_telemetry_node02
-      roles:
-      - linux_system_codename_xenial
-      - openstack_telemetry
-      interfaces:
-        ens2:
-          role: single_dhcp
-        ens3:
-          role: single_ctl
-
-    mdb03.cookied-cicd-ovs-maas.local:
-      reclass_storage_name: openstack_telemetry_node03
-      roles:
-      - linux_system_codename_xenial
-      - openstack_telemetry
-      interfaces:
-        ens2:
-          role: single_dhcp
-        ens3:
-          role: single_ctl
-
-    dns01.cookied-cicd-ovs-maas.local:
-      reclass_storage_name: openstack_dns_node01
-      roles:
-      - openstack_dns
-      - linux_system_codename_xenial
-      interfaces:
-        ens2:
-          role: single_dhcp
-        ens3:
-          role: single_ctl
-
-    dns02.cookied-cicd-ovs-maas.local:
-      reclass_storage_name: openstack_dns_node02
-      roles:
-      - openstack_dns
-      - linux_system_codename_xenial
-      interfaces:
-        ens2:
-          role: single_dhcp
-        ens3:
-          role: single_ctl
-
-    kmn01.cookied-cicd-ovs-maas.local:
-      reclass_storage_name: openstack_barbican_node01
-      roles:
-      - openstack_barbican
-      - linux_system_codename_xenial
-      interfaces:
-        ens2:
-          role: single_dhcp
-        ens3:
-          role: single_ctl
-
-    kmn02.cookied-cicd-ovs-maas.local:
-      reclass_storage_name: openstack_barbican_node02
-      roles:
-      - openstack_barbican
-      - linux_system_codename_xenial
-      interfaces:
-        ens2:
-          role: single_dhcp
-        ens3:
-          role: single_ctl
-
-    kmn03.cookied-cicd-ovs-maas.local:
-      reclass_storage_name: openstack_barbican_node03
-      roles:
-      - openstack_barbican
-      - linux_system_codename_xenial
-      interfaces:
-        ens2:
-          role: single_dhcp
-        ens3:
-          role: single_ctl
diff --git a/tcp_tests/templates/cookied-cicd-ovs-maas/salt.yaml b/tcp_tests/templates/cookied-cicd-ovs-maas/salt.yaml
deleted file mode 100644
index 142d319..0000000
--- a/tcp_tests/templates/cookied-cicd-ovs-maas/salt.yaml
+++ /dev/null
@@ -1,78 +0,0 @@
-{% from 'cookied-cicd-ovs-maas/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-cicd-ovs-maas/underlay.yaml' import LAB_CONFIG_NAME with context %}
-{% from 'cookied-cicd-ovs-maas/underlay.yaml' import DOMAIN_NAME with context %}
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-- description: Wait for salt-master is ready after configdrive user-data
-  cmd: |
-    timeout 120 salt-call test.ping
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 5}
-  skip_fail: false
-
-{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
-
-- description: Generate a public key for machines in MAAS
-  cmd: |
-    ssh-keygen -y -f ~root/.ssh/id_rsa > ~root/.ssh/id_rsa.pub
-    maas mirantis sshkeys create key="$(cat ~root/.ssh/id_rsa.pub)"
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Run comissioning of BM nodes
-  cmd: |
-    salt-call maas.process_machines
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Wait for machines ready
-  cmd: |
-    salt-call maas.machines_status && timeout 120 salt-call state.sls maas.machines.wait_for_ready
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 7, delay: 5}
-  skip_fail: false
-
-- description: Enforce the interfaces configuration defined in the model for servers
-  cmd: |
-    salt-call state.sls maas.machines.assign_ip;
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Assign drive partitions to machines
-  cmd: salt-call state.sls maas.machines.storage
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Remove all the salt-minions and re-register the cfg01 minion
-  cmd: |
-    salt-key -y -D;
-    salt-call test.ping
-    sleep 5
-    # Check that the cfg01 is registered
-    salt-key | grep cfg01
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: provision the automatically commissioned physical nodes through MAAS
-  cmd: |
-    salt-call maas.deploy_machines;
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 3, delay: 5}
-  skip_fail: false
-
-- description: Wait for machines deployed
-  cmd: |
-    salt-call maas.machines_status && timeout 300 salt-call state.sls maas.machines.wait_for_deployed
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 6, delay: 5}
-  skip_fail: false
diff --git a/tcp_tests/templates/cookied-cicd-ovs-maas/underlay.yaml b/tcp_tests/templates/cookied-cicd-ovs-maas/underlay.yaml
deleted file mode 100644
index b4a84ea..0000000
--- a/tcp_tests/templates/cookied-cicd-ovs-maas/underlay.yaml
+++ /dev/null
@@ -1,127 +0,0 @@
-# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
-{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'proposed') %}
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-cicd-ovs-maas') %}
-{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME + '.local') %}
-{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
-
-{% set ETH1_IP_ADDRESS_CFG01 = os_env('ETH1_IP_ADDRESS_CFG01', '172.16.164.2') %}
-
----
-aliases:
- - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
- - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
- - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
-
-template:
-  devops_settings:
-    env_name: {{ os_env('ENV_NAME', 'cookied-cicd-ovs-maas_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
-
-    address_pools:
-      admin-pool01:
-        net: {{ os_env('ADMIN_ADDRESS_POOL01', '172.16.164.0/26:26') }}
-        params:
-          ip_reserved:
-            gateway: +1
-            l2_network_device: +1
-            default_{{ HOSTNAME_CFG01 }}: {{ ETH1_IP_ADDRESS_CFG01 }}
-          ip_ranges:
-              dhcp: [+2, -4]
-      private-pool01:
-        net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.167.11.0/24:24') }}
-        params:
-          ip_reserved:
-            gateway: +1
-            l2_network_device: +1
-          ip_ranges:
-              dhcp: [+2, -4]
-
-      tenant-pool01:
-        net: {{ os_env('TENANT_ADDRESS_POOL01', '10.167.12.0/24:24') }}
-        params:
-          ip_reserved:
-            gateway: +1
-            l2_network_device: +1
-          ip_ranges:
-              dhcp: [+2, -4]
-
-      external-pool01:
-        net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '172.17.42.0/26:26') }}
-        params:
-          ip_reserved:
-            gateway: '172.17.42.1'
-          ip_ranges:
-              dhcp: ['172.17.42.10', '172.17.42.20']
-
-
-    groups:
-      - name: default
-        driver:
-          name: devops.driver.libvirt
-          params:
-            connection_string: !os_env CONNECTION_STRING, qemu:///system
-            storage_pool_name: !os_env STORAGE_POOL_NAME, default
-            stp: False
-            hpet: False
-            enable_acpi: true
-            use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
-            use_hugepages: !os_env DRIVER_USE_HUGEPAGES, false
-
-        network_pools:
-          admin: admin-pool01
-
-        l2_network_devices:
-          # maas management interface
-          admin:
-            address_pool: admin-pool01
-            dhcp: false
-            forward:
-              mode: bridge
-            parent_iface:
-              phys_dev: !os_env LAB_MANAGEMENT_IFACE
-          private:
-            address_pool: private-pool01
-            dhcp: false
-            forward:
-              mode: bridge
-            parent_iface:
-              phys_dev: !os_env LAB_CONTROL_IFACE
-
-        group_volumes:
-         - name: cfg01_day01_image               # Pre-configured day01 image
-           source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01') }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
-           format: qcow2
-
-        nodes:
-          - name: {{ HOSTNAME_CFG01 }}
-            role: salt_master
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 4
-              memory: !os_env SLAVE_NODE_MEMORY, 12288
-              boot:
-                - hd
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: cfg01_day01_image
-                  format: qcow2
-                - name: config
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-
-              interfaces:
-                - label: ens3
-                  l2_network_device: admin
-                  interface_model: *interface_model
-                - label: ens4
-                  l2_network_device: private
-                  interface_model: *interface_model
-
-              network_config:
-                ens3:
-                  networks:
-                    - admin
-                ens4:
-                  networks:
-                    - private
diff --git a/tcp_tests/templates/cookied-cicd-pike-dvr-ceph/cookiecutter-context-dvr-ceph.yaml b/tcp_tests/templates/cookied-cicd-pike-dvr-ceph/cookiecutter-context-dvr-ceph.yaml
deleted file mode 100644
index e6ed5eb..0000000
--- a/tcp_tests/templates/cookied-cicd-pike-dvr-ceph/cookiecutter-context-dvr-ceph.yaml
+++ /dev/null
@@ -1,360 +0,0 @@
-default_context:
-  auditd_enabled: 'False'
-  backend_network_netmask: 255.255.255.0
-  backend_network_subnet: 10.167.4.0/24
-  backend_vlan: '10'
-  backup_private_key: |-
-    -----BEGIN RSA PRIVATE KEY-----
-    MIIEpQIBAAKCAQEAuY7v++mza4e75f80GYE2iIdZ30d7yvT6Xym00iD/OxRWNtXe
-    rIh7M0X30Q0F2D3hVvPz57axTheOK3xFRVvPoIZjm3fVgwNzQmTyfAZz4TOdTtWx
-    9cye8Bo20qlRpq8wFQMSDhgRv0J1iX6LjJsr8pM1A8q3e4GYnv0DrLBZ1Iq7+T/k
-    qzzsT7PuvGvEK63J/DaE6BI73QG+0W0MvblddznwXvXLo/VlBXajWOv37YHiMFMT
-    Zap7lTvGVEyxByVEM04Bo7ABF2PEPwGrGL9nOpJ1LSxBCcryNVyZbveFF/e8A1Cj
-    178rD+W4H5p2Agr5A/y3LZpTkyhnTtWXzwT3YwIDAQABAoIBACiUNa8lgHM3j8PQ
-    d5hMRZy93M2TWGMeB9Lf0AdT5/0HiYMcakHY5vhjiLpS2sBbZ/gYCXLW5Rdq11Bz
-    MMLmPRWhzg6lui+YhZAze0PcNWM+YlxnJy/Vu7xOP0b6eDy3exBdR4mFgfwNkJ6s
-    6d+p34aA4ssdfdqokLPUKQWO21Y7UVYbht6Tv55nd3YMGXHxJ0phitf7/dFsEX9Z
-    sPSdWqkYMP2UWQBrFSjxV9Q+kE8OQ1VYDFCRa/9a5QHMrFo/0dOxLkZosTcCHM8A
-    H2RHPcKrxFWn7A3eAiA4VCvtM8RX239Bi7Gdvfl1HflSkQwBDUV8F2RZLHM2NU2T
-    EGBQcuECgYEA4ZBwZAtJIQ0R35prGLFj+drb/IKr+x2WD9WOZ83cheGSwdCRk/he
-    zZ5cCKgmSqg9cDJ4/vraoqmAlwQ4uj4e1TudgHPwdDUPuwoveIbUfUyzdIZMt0s4
-    fe61AUhEniIOi09H+E2yHz6OWSw3uA4SKkNsMT4RZc4Nag3Fo86Rrj8CgYEA0piY
-    HMYPHposfjVNM0PMU9F1lwQJMdx3a55JYgUc8cMvrsZPzvwJqrGCMNjP4lPwl/AS
-    x73yaxcxEYGiG6wDkvx+hujjyAx+sal62EB9ofJGDI7u8L2/0voW53RWvTUBsy8e
-    +xOQTewCAAYGLIJnGfEyVqEAu9IPwz3pep8xtd0CgYEAruTusDOr9SuMI0M5LQFG
-    UpHnJogvT1smYoqki0osZcZ8ozjT19aps2bJV5EBd7uxP5BzDsl0wtEIuo90aLwH
-    7i/2NIYw9/m4g78nBZ4NnkXdk0karLhvSf3PbPoa8j3X5x6G4DlmFiHL/8pwPY7z
-    eL+kYR4OIVC+R+/7wcJGZMMCgYEAqOLg0epvw53mYoxCTgNoACvw/lupOAhS6MY2
-    mVn6XVOnkKTO6fIrmmziOGQXSq0APAi2NuL4XrNpkV2BcGmhMCY3Hd/0k8CZdcax
-    km0dk1skm/ugWQYCqKIQ7irZSMESjO0UDkwhJKxI6lXqa5VkM2S/dsOFQBp0s6GZ
-    9NFn3y0CgYEAogzKchxouu4BgqHn76W0IB/XeTuiCDSGRv+IwMoghxbPoT6lO920
-    OHWoo+bX3VuxpCFkN2fFH6V8WncUrv4ItAgxGftL8h9BhMRKiatwOBAw0vG/CO2G
-    CIyvmjhIvpIdAl8i1jIJw1sn/ZVYm8+ZKy4VAqPevc3Ze7WGoMUkFyg=
-    -----END RSA PRIVATE KEY-----
-  backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC5ju/76bNrh7vl/zQZgTaIh1nfR3vK9PpfKbTSIP87FFY21d6siHszRffRDQXYPeFW8/PntrFOF44rfEVFW8+ghmObd9WDA3NCZPJ8BnPhM51O1bH1zJ7wGjbSqVGmrzAVAxIOGBG/QnWJfouMmyvykzUDyrd7gZie/QOssFnUirv5P+SrPOxPs+68a8Qrrcn8NoToEjvdAb7RbQy9uV13OfBe9cuj9WUFdqNY6/ftgeIwUxNlqnuVO8ZUTLEHJUQzTgGjsAEXY8Q/AasYv2c6knUtLEEJyvI1XJlu94UX97wDUKPXvysP5bgfmnYCCvkD/LctmlOTKGdO1ZfPBPdj
-  bmk_enabled: 'False'
-  ceph_cluster_network: 10.167.4.0/24
-  ceph_enabled: 'True'
-  ceph_hyper_converged: 'False'
-  ceph_mon_node01_address: 10.167.4.66
-  ceph_mon_node01_hostname: cmn01
-  ceph_mon_node02_address: 10.167.4.67
-  ceph_mon_node02_hostname: cmn02
-  ceph_mon_node03_address: 10.167.4.68
-  ceph_mon_node03_hostname: cmn03
-  ceph_osd_backend: bluestore
-  ceph_osd_block_db_size: '10'
-  ceph_osd_bond_mode: active-backup
-  ceph_osd_count: '3'
-  ceph_osd_data_disks: /dev/vdb
-  ceph_osd_journal_or_block_db_disks: /dev/vdc
-  ceph_osd_node_count: '3'
-  ceph_osd_journal_size: '10'
-  ceph_osd_primary_first_nic: eth1
-  ceph_osd_primary_second_nic: eth2
-  ceph_osd_rack01_backend_subnet: 10.167.4
-  ceph_osd_rack01_hostname: osd
-  ceph_osd_rack01_single_subnet: 10.167.4
-  ceph_osd_single_address_ranges: 10.167.4.87-10.167.4.89
-  ceph_osd_deploy_address_ranges: 10.167.5.87-10.167.5.89
-  ceph_osd_backend_address_ranges: 10.167.4.87-10.167.4.89
-  ceph_public_network: 10.167.4.0/24
-  ceph_rgw_address: 10.167.4.75
-  ceph_rgw_hostname: rgw
-  ceph_rgw_node01_address: 10.167.4.76
-  ceph_rgw_node01_hostname: rgw01
-  ceph_rgw_node02_address: 10.167.4.77
-  ceph_rgw_node02_hostname: rgw02
-  ceph_rgw_node03_address: 10.167.4.78
-  ceph_rgw_node03_hostname: rgw03
-  ceph_version: luminous
-  cicd_control_node01_address: 10.167.4.91
-  cicd_control_node01_hostname: cid01
-  cicd_control_node02_address: 10.167.4.92
-  cicd_control_node02_hostname: cid02
-  cicd_control_node03_address: 10.167.4.93
-  cicd_control_node03_hostname: cid03
-  cicd_control_vip_address: 10.167.4.90
-  cicd_control_vip_hostname: cid
-  cicd_enabled: 'True'
-  cicd_private_key: |-
-    -----BEGIN RSA PRIVATE KEY-----
-    MIIEpAIBAAKCAQEAv64AnFbEuuOQHLlmMkmaZ+Hh/8hJ+VfFpJ/MzW1wWzYyhis7
-    3A8rxNFWJ/I1/LJSsFI8qU0DpxjFjS9LMTTFXhDPPpuzgRLwfVusEmuQdXjOiT34
-    AIs07Q4w1nlvJ2+/l788ie1AEfnewd9erUHOs8Wt/PT3OOM/0ikY7EibvYF4L1Lb
-    xGRKYnUkY7G3eal9XcQpsTzAFRXoK3WafbCFBFsfzEWOhx1T+tn1SwaxPYJDt1OB
-    B1s77enFtBwbmbd0m1F1ufSXmdWea2xF3+5caS6tps/hwhCoOSJUQb7+dK4ri8og
-    q2YIhfEptrMP1R+nVqEY76P31aa/YSw4zOvcQwIDAQABAoIBAQCLKOzQlD4n4ObT
-    s9Z6U+2B1gCaDvOFzy9yoYGy8u1Li0GLHwBKd8kzDzgZsEN5vo1B7bKUx5ELU3S5
-    V8ijZMiVzmZn8eqUnwdyO4flp6otXxOzmAXhfy9hm5fhXjBQ1VSn+vMcv95wLpSG
-    9IBsEQbchXwX1lFWP8Yp8iRiByTqoz6A7qSxRzIOtq1/coYS9Vcy7VZDMiUjqvuc
-    pYvwYHvrgeYqxLXyDRzbZX1BbkSoNI/5VwxLb9IMG901IXph0r4V3uVgnnq+Xzkk
-    MoOfmB3cyOrvtWblZAjkyA+jzTs/QNALRUeI7wUeh4FvlwEGHE6v5G4G28zOS0vL
-    7IEhCqThAoGBAOeyDO07b060l+NOO+Jkv+NV31VD0w3S4TMyLPVSxXsrRPoHM9RM
-    udi6lewmALE4wk2Lc1Il6n0UrUGVbXxf55NJp2BQoSic+ZK2nTki0cZ/CkUDVNwY
-    R0WtWE0i3J+eF3e8j9VYm1mIlv0aDoYeH4qCp5is/JanvLy4MUl6tM7/AoGBANPJ
-    XheDO5lmqq1ejDTo3GAzYuAs44dQLDs0znEuuaUKZ4MKgQ4ax0L9n0MxvsuUGVcN
-    Nm7fZS4uMY3zLCOLcAXyD1jXY210gmOgFdXeYrH+2kSmqfflV8KHOLCatxLzRtbe
-    KBflcrEnrpUVNGKlpZaYr+4AyapXeMuXIxwveva9AoGAYtoDS9/UwHaqau+A+zlS
-    6TJFA8LZNAepz0b0CYLUAJXYavhRs508mWwZ9NPN7c6yj5UUkZLdtZnxxY50VOEy
-    ExQUljIwX/yBOogxEiR57b9b6U/fj7vIBMFNcDOUf4Far9pCX5rbRNrS2I+abLxD
-    ZrwRt0Duz3QnQTkwxhHVPI8CgYAaIjQJJLl7AW84O32DneRrvouJ7CAbd2ot2CNN
-    Vh20XudNBUPNkMJb4t3/Nak8h8bktg2sesaKf0rAIGym6jLlmOwJ43IydHkOgBeR
-    r4JwQml+pS4+F7/Pkk4NhNnobbqlEv7RjA+uCp6BaP9w2M3pGmhDLzezXF3ciYbc
-    mINM5QKBgQCyM9ZWwSiA0D3oitnhs7C4eC0IHBfnSoa7f40osKm4VvmqKBFgRu8L
-    qYK9qX++pUm4sk0q7poGUscc1udMlejAkfc/HLIlUi6MM+S7ZQ2NHtnZ7COZa5O4
-    9fG8FTiigLvMHka9ihYXtPbyGvusCaqyHp3D9VyOT+WsyM5eJe40lA==
-    -----END RSA PRIVATE KEY-----
-  cicd_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC/rgCcVsS645AcuWYySZpn4eH/yEn5V8Wkn8zNbXBbNjKGKzvcDyvE0VYn8jX8slKwUjypTQOnGMWNL0sxNMVeEM8+m7OBEvB9W6wSa5B1eM6JPfgAizTtDjDWeW8nb7+XvzyJ7UAR+d7B316tQc6zxa389Pc44z/SKRjsSJu9gXgvUtvEZEpidSRjsbd5qX1dxCmxPMAVFegrdZp9sIUEWx/MRY6HHVP62fVLBrE9gkO3U4EHWzvt6cW0HBuZt3SbUXW59JeZ1Z5rbEXf7lxpLq2mz+HCEKg5IlRBvv50riuLyiCrZgiF8Sm2sw/VH6dWoRjvo/fVpr9hLDjM69xD
-  cluster_domain: cookied-cicd-pike-dvr-ceph.local
-  cluster_name: cookied-cicd-pike-dvr-ceph
-  compute_bond_mode: active-backup
-  compute_padding_with_zeros: 'False'
-  compute_primary_first_nic: eth1
-  compute_primary_second_nic: eth2
-  context_seed: tekHhhWzn3YrxKbXGMvtWYj1usHGrRBYd2gfFwWNCnRentwCu1QKANHvpIeZCRvz
-  control_network_netmask: 255.255.255.0
-  control_network_subnet: 10.167.4.0/24
-  control_vlan: '10'
-  cookiecutter_template_branch: 'proposed'
-  jenkins_pipelines_branch: 'release/2019.2.0'
-  cookiecutter_template_credentials: gerrit
-  cookiecutter_template_url: https://gerrit.mcp.mirantis.com/mk/cookiecutter-templates.git
-  deploy_network_gateway: 10.167.5.1
-  deploy_network_netmask: 255.255.255.0
-  deploy_network_subnet: 10.167.5.0/24
-  deployment_type: physical
-  dns_server01: 172.18.176.6
-  dns_server02: 172.18.208.44
-  email_address: test@mirantis.com
-  gainsight_service_enabled: 'False'
-  gateway_primary_first_nic: eth1
-  gateway_primary_second_nic: eth2
-  gnocchi_aggregation_storage: ceph
-  infra_bond_mode: active-backup
-  infra_deploy_nic: eth0
-  infra_kvm01_control_address: 10.167.4.11
-  infra_kvm01_deploy_address: 10.167.5.11
-  infra_kvm01_hostname: kvm01
-  infra_kvm02_control_address: 10.167.4.12
-  infra_kvm02_deploy_address: 10.167.5.12
-  infra_kvm02_hostname: kvm02
-  infra_kvm03_control_address: 10.167.4.13
-  infra_kvm03_deploy_address: 10.167.5.13
-  infra_kvm03_hostname: kvm03
-  infra_kvm_vip_address: 10.167.4.10
-  infra_primary_first_nic: eth1
-  infra_primary_second_nic: eth2
-  internal_proxy_enabled: 'False'
-  kubernetes_ctl_on_kvm: 'False'
-  kubernetes_enabled: 'False'
-  local_repositories: 'False'
-  maas_deploy_address: 10.167.5.15
-  maas_deploy_network_name: deploy_network
-  maas_deploy_range_end: 10.167.5.230
-  maas_deploy_range_start: 10.167.5.20
-  maas_deploy_vlan: '0'
-  maas_enabled: 'False'
-  maas_fabric_name: deploy_fabric
-  maas_hostname: cfg01
-  mcp_common_scripts_branch: 'proposed'
-  mcp_version: proposed
-  no_platform: 'False'
-  offline_deployment: 'False'
-  opencontrail_enabled: 'False'
-  openldap_domain: ${_param:cluster_name}.local
-  openldap_enabled: 'True'
-  openldap_organisation: ${_param:cluster_name}
-  openssh_groups: cicd
-  openstack_benchmark_node01_address: 10.167.4.95
-  openstack_benchmark_node01_hostname: bmk01
-  openstack_cluster_size: compact
-  openstack_compute_count: '2'
-  openstack_compute_rack01_hostname: cmp
-  openstack_compute_rack01_single_subnet: 10.167.4
-  openstack_compute_rack01_tenant_subnet: 10.167.6
-  openstack_compute_single_address_ranges: 10.167.4.105-10.167.4.106
-  openstack_compute_deploy_address_ranges: 10.167.5.105-10.167.5.106
-  openstack_compute_tenant_address_ranges: 10.167.6.105-10.167.6.106
-  openstack_control_address: 10.167.4.10
-  openstack_control_hostname: ctl
-  openstack_control_node01_address: 10.167.4.11
-  openstack_control_node01_hostname: ctl01
-  openstack_control_node02_address: 10.167.4.12
-  openstack_control_node02_hostname: ctl02
-  openstack_control_node03_address: 10.167.4.13
-  openstack_control_node03_hostname: ctl03
-  openstack_database_address: 10.167.4.10
-  openstack_database_hostname: ctl
-  openstack_database_node01_address: 10.167.4.11
-  openstack_database_node01_hostname: ctl01
-  openstack_database_node02_address: 10.167.4.12
-  openstack_database_node02_hostname: ctl02
-  openstack_database_node03_address: 10.167.4.13
-  openstack_database_node03_hostname: ctl03
-  openstack_enabled: 'True'
-  openstack_gateway_node01_address: 10.167.4.224
-  openstack_gateway_node01_hostname: gtw01
-  openstack_gateway_node01_tenant_address: 10.167.6.6
-  openstack_gateway_node02_address: 10.167.4.225
-  openstack_gateway_node02_hostname: gtw02
-  openstack_gateway_node02_tenant_address: 10.167.6.7
-  openstack_gateway_node03_address: 10.167.4.226
-  openstack_gateway_node03_hostname: gtw03
-  openstack_gateway_node03_tenant_address: 10.167.6.8
-  openstack_message_queue_address: 10.167.4.10
-  openstack_message_queue_hostname: ctl
-  openstack_message_queue_node01_address: 10.167.4.11
-  openstack_message_queue_node01_hostname: ctl01
-  openstack_message_queue_node02_address: 10.167.4.12
-  openstack_message_queue_node02_hostname: ctl02
-  openstack_message_queue_node03_address: 10.167.4.13
-  openstack_message_queue_node03_hostname: ctl03
-  openstack_network_engine: ovs
-  openstack_neutron_bgp_vpn: 'False'
-  openstack_neutron_bgp_vpn_driver: bagpipe
-  openstack_neutron_qos: 'False'
-  openstack_neutron_vlan_aware_vms: 'False'
-  openstack_nfv_dpdk_enabled: 'False'
-  openstack_nfv_sriov_enabled: 'False'
-  openstack_nova_compute_nfv_req_enabled: 'False'
-  openstack_nova_compute_reserved_host_memory_mb: '900'
-  openstack_ovs_dvr_enabled: 'True'
-  openstack_ovs_encapsulation_type: vxlan
-  openstack_proxy_address: 172.17.16.80  # external network endpoint
-  openstack_proxy_vip_interface: ens5
-  openstack_proxy_hostname: prx
-  openstack_proxy_node01_address: 10.167.4.121
-  openstack_proxy_node01_hostname: prx01
-  openstack_proxy_node02_address: 10.167.4.122
-  openstack_proxy_node02_hostname: prx02
-  openstack_upgrade_node01_address: 10.167.4.19
-  openstack_version: pike
-  osd_padding_with_zeros: 'False'
-  oss_enabled: 'False'
-  platform: openstack_enabled
-  public_host: ${_param:openstack_proxy_address}
-  publication_method: email
-  reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
-  salt_api_password: BX7ium4MaRPIWBdyhj4LTbiedwg3yLep
-  salt_api_password_hash: $6$qYqzkiRP$MiqA5ZMfsmdXJcuTTyeCgNPv9CBGO5nSH4HwRKPGUh0MFXcEa8JDCUEtS8xLHCkol7CMdq.l6CG7of0iaUJ.u.
-  salt_master_address: 10.167.4.15
-  salt_master_hostname: cfg01
-  salt_master_management_address: 10.167.5.15
-  shared_reclass_branch: 'proposed'
-  shared_reclass_url: https://github.com/Mirantis/reclass-system-salt-model.git
-  sriov_network_subnet: 10.55.0.0/16
-  static_ips_on_deploy_network_enabled: 'False'
-  tenant_network_gateway: 10.167.6.1
-  tenant_network_netmask: 255.255.255.0
-  tenant_network_subnet: 10.167.6.0/24
-  tenant_telemetry_enabled: 'False'
-  tenant_vlan: '20'
-  upstream_proxy_enabled: 'False'
-  use_default_network_scheme: 'True'
-  version: proposed
-  vnf_onboarding_enabled: 'False'
-  openstack_telemetry_address: 10.167.4.83
-  openstack_telemetry_hostname: mdb
-  openstack_telemetry_node01_address: 10.167.4.84
-  openstack_telemetry_node01_hostname: mdb01
-  openstack_telemetry_node02_address: 10.167.4.85
-  openstack_telemetry_node02_hostname: mdb02
-  openstack_telemetry_node03_address: 10.167.4.86
-  openstack_telemetry_node03_hostname: mdb03
-  fluentd_enabled: 'True'
-  stacklight_enabled: 'True'
-  stacklight_log_address: 10.167.4.60
-  stacklight_log_hostname: log
-  stacklight_log_node01_address: 10.167.4.61
-  stacklight_log_node01_hostname: log01
-  stacklight_log_node02_address: 10.167.4.62
-  stacklight_log_node02_hostname: log02
-  stacklight_log_node03_address: 10.167.4.63
-  stacklight_log_node03_hostname: log03
-  stacklight_monitor_address: 10.167.4.70
-  stacklight_monitor_hostname: mon
-  stacklight_monitor_node01_address: 10.167.4.71
-  stacklight_monitor_node01_hostname: mon01
-  stacklight_monitor_node02_address: 10.167.4.72
-  stacklight_monitor_node02_hostname: mon02
-  stacklight_monitor_node03_address: 10.167.4.73
-  stacklight_monitor_node03_hostname: mon03
-  stacklight_telemetry_address: 10.167.4.50
-  stacklight_telemetry_hostname: mtr
-  stacklight_telemetry_node01_address: 10.167.4.51
-  stacklight_telemetry_node01_hostname: mtr01
-  stacklight_telemetry_node02_address: 10.167.4.52
-  stacklight_telemetry_node02_hostname: mtr02
-  stacklight_telemetry_node03_address: 10.167.4.53
-  stacklight_telemetry_node03_hostname: mtr03
-  stacklight_version: '2'
-  stacklight_long_term_storage_type: prometheus
-  nova_vnc_tls_enabled: 'True'
-  galera_ssl_enabled: 'True'
-  openstack_mysql_x509_enabled: 'True'
-  rabbitmq_ssl_enabled: 'True'
-  openstack_rabbitmq_x509_enabled: 'True'
-  openstack_internal_protocol: 'https'
-  openstack_create_public_network: 'True'
-  openstack_public_neutron_subnet_gateway: 172.17.16.1
-  openstack_public_neutron_subnet_cidr: 172.17.16.0/24
-  openstack_public_neutron_subnet_allocation_start: 172.17.16.201
-  openstack_public_neutron_subnet_allocation_end: 172.17.16.245
-  manila_enabled: 'False'
-  barbican_enabled: 'True'
-  barbican_integration_enabled: 'False'
-
-  openstack_barbican_address: 10.167.4.44
-  openstack_barbican_hostname: kmn
-  openstack_barbican_node01_address: 10.167.4.45
-  openstack_barbican_node01_hostname: kmn01
-  openstack_barbican_node02_address: 10.167.4.46
-  openstack_barbican_node02_hostname: kmn02
-  openstack_barbican_node03_address: 10.167.4.47
-  openstack_barbican_node03_hostname: kmn03
-
-  designate_backend: powerdns
-  designate_enabled: 'True'
-  openstack_dns_node01_address: 10.167.4.113
-  openstack_dns_node02_address: 10.167.4.114
-  octavia_private_key: |-
-    -----BEGIN RSA PRIVATE KEY-----
-    MIIEpAIBAAKCAQEAtjnPDJsQToHBtoqIo15mdSYpfi8z6DFMi8Gbo0KCN33OUn5u
-    OctbdtjUfeuhvI6px1SCnvyWi09Ft8eWwq+KwLCGKbUxLvqKltuJ7K3LIrGXkt+m
-    qZN4O9XKeVKfZH+mQWkkxRWgX2r8RKNV3GkdNtd74VjhP+R6XSKJQ1Z8b7eHM10v
-    6IjTY/jPczjK+eyCeEj4qbSnV8eKlqLhhquuSQRmUO2DRSjLVdpdf2BB4/BdWFsD
-    YOmX7mb8kpEr9vQ+c1JKMXDwD6ehzyU8kE+1kVm5zOeEy4HdYIMpvUfN49P1anRV
-    2ISQ1ZE+r22IAMKl0tekrGH0e/1NP1DF5rINMwIDAQABAoIBAQCkP/cgpaRNHyg8
-    ISKIHs67SWqdEm73G3ijgB+JSKmW2w7dzJgN//6xYUAnP/zIuM7PnJ0gMQyBBTMS
-    NBTv5spqZLKJZYivj6Tb1Ya8jupKm0jEWlMfBo2ZYVrfgFmrfGOfEebSvmuPlh9M
-    vuzlftmWVSSUOkjODmM9D6QpzgrbpktBuA/WpX+6esMTwJpOcQ5xZWEnHXnVzuTc
-    SncodVweE4gz6F1qorbqIJz8UAUQ5T0OZTdHzIS1IbamACHWaxQfixAO2s4+BoUK
-    ANGGZWkfneCxx7lthvY8DiKn7M5cSRnqFyDToGqaLezdkMNlGC7v3U11FF5blSEW
-    fL1o/HwBAoGBAOavhTr8eqezTchqZvarorFIq7HFWk/l0vguIotu6/wlh1V/KdF+
-    aLLHgPgJ5j+RrCMvTBoKqMeeHfVGrS2udEy8L1mK6b3meG+tMxU05OA55abmhYn7
-    7vF0q8XJmYIHIXmuCgF90R8Piscb0eaMlmHW9unKTKo8EOs5j+D8+AMJAoGBAMo4
-    8WW+D3XiD7fsymsfXalf7VpAt/H834QTbNZJweUWhg11eLutyahyyfjjHV200nNZ
-    cnU09DWKpBbLg7d1pyT69CNLXpNnxuWCt8oiUjhWCUpNqVm2nDJbUdlRFTzYb2fS
-    ZC4r0oQaPD5kMLSipjcwzMWe0PniySxNvKXKInFbAoGBAKxW2qD7uKKKuQSOQUft
-    aAksMmEIAHWKTDdvOA2VG6XvX5DHBLXmy08s7rPfqW06ZjCPCDq4Velzvgvc9koX
-    d/lP6cvqlL9za+x6p5wjPQ4rEt/CfmdcmOE4eY+1EgLrUt314LHGjjG3ScWAiirE
-    QyDrGOIGaYoQf89L3KqIMr0JAoGARYAklw8nSSCUvmXHe+Gf0yKA9M/haG28dCwo
-    780RsqZ3FBEXmYk1EYvCFqQX56jJ25MWX2n/tJcdpifz8Q2ikHcfiTHSI187YI34
-    lKQPFgWb08m1NnwoWrY//yx63BqWz1vjymqNQ5GwutC8XJi5/6Xp+tGGiRuEgJGH
-    EIPUKpkCgYAjBIVMkpNiLCREZ6b+qjrPV96ed3iTUt7TqP7yGlFI/OkORFS38xqC
-    hBP6Fk8iNWuOWQD+ohM/vMMnvIhk5jwlcwn+kF0ra04gi5KBFWSh/ddWMJxUtPC1
-    2htvlEc6zQAR6QfqXHmwhg1hP81JcpqpicQzCMhkzLoR1DC6stXdLg==
-    -----END RSA PRIVATE KEY-----
-  octavia_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC2Oc8MmxBOgcG2ioijXmZ1Jil+LzPoMUyLwZujQoI3fc5Sfm45y1t22NR966G8jqnHVIKe/JaLT0W3x5bCr4rAsIYptTEu+oqW24nsrcsisZeS36apk3g71cp5Up9kf6ZBaSTFFaBfavxEo1XcaR0213vhWOE/5HpdIolDVnxvt4czXS/oiNNj+M9zOMr57IJ4SPiptKdXx4qWouGGq65JBGZQ7YNFKMtV2l1/YEHj8F1YWwNg6ZfuZvySkSv29D5zUkoxcPAPp6HPJTyQT7WRWbnM54TLgd1ggym9R83j0/VqdFXYhJDVkT6vbYgAwqXS16SsYfR7/U0/UMXmsg0z
-  openstack_octavia_enabled: 'True'
-  octavia_health_manager_node01_address: 192.168.1.10
-  octavia_health_manager_node02_address: 192.168.1.11
-  octavia_health_manager_node03_address: 192.168.1.12
-  octavia_manager_cluster: 'True'
-  octavia_hm_bind_ip: 192.168.1.12
-  octavia_lb_mgmt_cidr: 192.168.1.0/24
-  octavia_lb_mgmt_allocation_pool_start: 192.168.1.2
-  octavia_lb_mgmt_allocation_pool_end: 192.168.1.200
-  cinder_backup_engine: 'ceph'
-  cinder_ceph_backup_pool_name: 'backups'
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-cicd-pike-dvr-ceph/environment_context.yaml b/tcp_tests/templates/cookied-cicd-pike-dvr-ceph/environment_context.yaml
deleted file mode 100644
index 8974e6e..0000000
--- a/tcp_tests/templates/cookied-cicd-pike-dvr-ceph/environment_context.yaml
+++ /dev/null
@@ -1,380 +0,0 @@
-nodes:
-    cfg01:
-      reclass_storage_name: infra_config_node01
-      roles:
-      - infra_config
-      - linux_system_codename_xenial
-      - features_runtest_cfg
-      #classes:
-      #- environment.cookied-cicd-pike-dvr-ceph.override_ntp_virtual
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_static_ctl
-
-    cid01:
-      reclass_storage_name: cicd_control_node01
-      roles:
-      - cicd_control_leader
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl
-
-    cid02:
-      reclass_storage_name: cicd_control_node02
-      roles:
-      - cicd_control_manager
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl
-
-    cid03:
-      reclass_storage_name: cicd_control_node03
-      roles:
-      - cicd_control_manager
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl
-
-    ctl01:
-      reclass_storage_name: openstack_control_node01
-      roles:
-      - infra_kvm
-      - openstack_control_leader
-      - openstack_database_leader
-      - openstack_message_queue
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl
-
-    ctl02:
-      reclass_storage_name: openstack_control_node02
-      roles:
-      - infra_kvm
-      - openstack_control
-      - openstack_database
-      - openstack_message_queue
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl
-
-    ctl03:
-      reclass_storage_name: openstack_control_node03
-      roles:
-      - infra_kvm
-      - openstack_control
-      - openstack_database
-      - openstack_message_queue
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl
-
-    prx01:
-      reclass_storage_name: openstack_proxy_node01
-      roles:
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl
-        ens5:
-          role: single_external
-          external_address: 172.17.16.121
-          external_network_netmask: 255.255.255.0
-
-    prx02:
-      reclass_storage_name: openstack_proxy_node02
-      roles:
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl
-        ens5:
-          role: single_external
-          external_address: 172.17.16.122
-          external_network_netmask: 255.255.255.0
-
-    # Generator-based computes. For compatibility only
-    cmp<<count>>:
-      reclass_storage_name: openstack_compute_rack01
-      roles:
-      - openstack_compute
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl
-        ens5:
-          role: bond0_ab_ovs_vxlan_mesh
-        ens6:
-          role: bond1_ab_ovs_floating
-
-    gtw01:
-      reclass_storage_name: openstack_gateway_node01
-      roles:
-      - openstack_gateway
-      - linux_system_codename_xenial
-      classes:
-      - system.linux.system.repo.mcp.apt_mirantis.docker
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl
-        ens5:
-          role: bond0_ab_ovs_vxlan_mesh
-        ens6:
-          role: bond1_ab_ovs_floating
-
-    osd<<count>>:
-      reclass_storage_name: ceph_osd_rack01
-      roles:
-      - ceph_osd
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl
-
-    cmn01:
-      reclass_storage_name: ceph_mon_node01
-      roles:
-      - ceph_mon
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl
-
-    cmn02:
-      reclass_storage_name: ceph_mon_node02
-      roles:
-      - ceph_mon
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl
-
-    cmn03:
-      reclass_storage_name: ceph_mon_node03
-      roles:
-      - ceph_mon
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl
-
-    rgw01:
-      reclass_storage_name: ceph_rgw_node01
-      roles:
-      - ceph_rgw
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl
-
-    rgw02:
-      reclass_storage_name: ceph_rgw_node02
-      roles:
-      - ceph_rgw
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl
-
-    rgw03:
-      reclass_storage_name: ceph_rgw_node03
-      roles:
-      - ceph_rgw
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl
-
-    mon01:
-      reclass_storage_name: stacklight_server_node01
-      roles:
-      - stacklightv2_server_leader
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl
-
-    mon02:
-      reclass_storage_name: stacklight_server_node02
-      roles:
-      - stacklightv2_server
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl
-
-    mon03:
-      reclass_storage_name: stacklight_server_node03
-      roles:
-      - stacklightv2_server
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl
-
-    log01:
-      reclass_storage_name: stacklight_log_node01
-      roles:
-      - stacklight_log_leader_v2
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl
-
-    log02:
-      reclass_storage_name: stacklight_log_node02
-      roles:
-      - stacklight_log
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl
-
-    log03:
-      reclass_storage_name: stacklight_log_node03
-      roles:
-      - stacklight_log
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl
-
-    mtr01:
-      reclass_storage_name: stacklight_telemetry_node01
-      roles:
-      - stacklight_telemetry_leader
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl
-
-    mtr02:
-      reclass_storage_name: stacklight_telemetry_node02
-      roles:
-      - stacklight_telemetry
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl
-
-    mtr03:
-      reclass_storage_name: stacklight_telemetry_node03
-      roles:
-      - stacklight_telemetry
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl
-    kmn01:
-      reclass_storage_name: openstack_barbican_node01
-      roles:
-      - openstack_barbican
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl
-
-    kmn02:
-      reclass_storage_name: openstack_barbican_node02
-      roles:
-      - openstack_barbican
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl
-
-    kmn03:
-      reclass_storage_name: openstack_barbican_node03
-      roles:
-      - openstack_barbican
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl
-
-    dns01:
-      reclass_storage_name: openstack_dns_node01
-      roles:
-      - openstack_dns
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl
-
-    dns02:
-      reclass_storage_name: openstack_dns_node02
-      roles:
-      - openstack_dns
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl
diff --git a/tcp_tests/templates/cookied-cicd-pike-dvr-ceph/salt.yaml b/tcp_tests/templates/cookied-cicd-pike-dvr-ceph/salt.yaml
deleted file mode 100644
index 4b9c68c..0000000
--- a/tcp_tests/templates/cookied-cicd-pike-dvr-ceph/salt.yaml
+++ /dev/null
@@ -1,14 +0,0 @@
-{% from 'cookied-cicd-pike-dvr-ceph/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-cicd-pike-dvr-ceph/underlay.yaml' import LAB_CONFIG_NAME with context %}
-{% from 'cookied-cicd-pike-dvr-ceph/underlay.yaml' import DOMAIN_NAME with context %}
-
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
-# Other salt model repository parameters see in shared-salt.yaml
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
diff --git a/tcp_tests/templates/cookied-cicd-pike-dvr-ceph/underlay--meta-data.yaml b/tcp_tests/templates/cookied-cicd-pike-dvr-ceph/underlay--meta-data.yaml
deleted file mode 100644
index 3699401..0000000
--- a/tcp_tests/templates/cookied-cicd-pike-dvr-ceph/underlay--meta-data.yaml
+++ /dev/null
@@ -1,4 +0,0 @@
-| # All the data below will be stored as a string object
-  instance-id: iid-local1
-  hostname: {hostname}
-  local-hostname: {hostname}
diff --git a/tcp_tests/templates/cookied-cicd-pike-dvr-ceph/underlay--user-data1604.yaml b/tcp_tests/templates/cookied-cicd-pike-dvr-ceph/underlay--user-data1604.yaml
deleted file mode 100644
index 81936a4..0000000
--- a/tcp_tests/templates/cookied-cicd-pike-dvr-ceph/underlay--user-data1604.yaml
+++ /dev/null
@@ -1,84 +0,0 @@
-| # All the data below will be stored as a string object
-  #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
-  ssh_pwauth: True
-  users:
-   - name: root
-     sudo: ALL=(ALL) NOPASSWD:ALL
-     shell: /bin/bash
-     ssh_authorized_keys:
-     {% for key in config.underlay.ssh_keys %}
-      - ssh-rsa {{ key['public'] }}
-     {% endfor %}
-
-  disable_root: false
-  chpasswd:
-   list: |
-    root:r00tme
-   expire: False
-
-  bootcmd:
-   # Enable root access
-   - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
-   - service sshd restart
-  output:
-    all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
-  runcmd:
-   - if lvs vg0; then pvresize /dev/vda3; fi
-   - if lvs vg0; then /usr/bin/growlvm.py --image-layout-file /usr/share/growlvm/image-layout.yml; fi
-
-   - export TERM=linux
-   - export LANG=C
-   # Configure dhclient
-   - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
-   - sudo resolvconf -u
-
-   # Enable grub menu using updated config below
-   - update-grub
-
-   # Prepare network connection
-   - sudo ifup ens3
-   #- sudo route add default gw {gateway} {interface_name}
-
-   # Create swap
-   - fallocate -l 16G /swapfile
-   - chmod 600 /swapfile
-   - mkswap /swapfile
-   - swapon /swapfile
-   - echo "/swapfile   none    swap    defaults   0   0" >> /etc/fstab
-
-  write_files:
-   - path: /etc/default/grub.d/97-enable-grub-menu.cfg
-     content: |
-         GRUB_RECORDFAIL_TIMEOUT=30
-         GRUB_TIMEOUT=3
-         GRUB_TIMEOUT_STYLE=menu
-
-   - path: /etc/network/interfaces
-     content: |
-          auto ens3
-          iface ens3 inet dhcp
-
-   - path: /usr/share/growlvm/image-layout.yml
-     content: |
-       root:
-         size: '30%VG'
-       home:
-         size: '1G'
-       var_log:
-         size: '11%VG'
-       var_log_audit:
-         size: '5G'
-       var_tmp:
-         size: '11%VG'
-       tmp:
-         size: '5G'
-     owner: root:root
-
-  growpart:
-    mode: auto
-    devices:
-      - '/'
-      - '/dev/vda3'
-    ignore_growroot_disabled: false
diff --git a/tcp_tests/templates/cookied-cicd-pike-dvr-ceph/underlay.yaml b/tcp_tests/templates/cookied-cicd-pike-dvr-ceph/underlay.yaml
deleted file mode 100644
index c0eab42..0000000
--- a/tcp_tests/templates/cookied-cicd-pike-dvr-ceph/underlay.yaml
+++ /dev/null
@@ -1,1288 +0,0 @@
-{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'proposed') %}
-
-{% import 'cookied-cicd-pike-dvr-ceph/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
-{% import 'cookied-cicd-pike-dvr-ceph/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
-
----
-aliases:
- - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
- - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
- - &cloudinit_user_data_1604 {{ CLOUDINIT_USER_DATA_1604 }}
-
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-cicd-pike-dvr-ceph') %}
-{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME + '.local') %}
-{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CID01 = os_env('HOSTNAME_CID01', 'cid01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CID02 = os_env('HOSTNAME_CID02', 'cid02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CID03 = os_env('HOSTNAME_CID03', 'cid03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp1.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp2.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMN01 = os_env('HOSTNAME_CMN01', 'cmn01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMN02 = os_env('HOSTNAME_CMN02', 'cmn02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMN03 = os_env('HOSTNAME_CMN03', 'cmn03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_RGW01 = os_env('HOSTNAME_RGW01', 'rgw01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_RGW02 = os_env('HOSTNAME_RGW02', 'rgw02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_RGW03 = os_env('HOSTNAME_RGW03', 'rgw03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_OSD01 = os_env('HOSTNAME_OSD01', 'osd1.' + DOMAIN_NAME) %}
-{% set HOSTNAME_OSD02 = os_env('HOSTNAME_OSD02', 'osd2.' + DOMAIN_NAME) %}
-{% set HOSTNAME_OSD03 = os_env('HOSTNAME_OSD03', 'osd3.' + DOMAIN_NAME) %}
-{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_PRX02 = os_env('HOSTNAME_PRX02', 'prx02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON01 = os_env('HOSTNAME_MON01', 'mon01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON02 = os_env('HOSTNAME_MON02', 'mon02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON03 = os_env('HOSTNAME_MON03', 'mon03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_LOG01 = os_env('HOSTNAME_LOG01', 'log01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_LOG02 = os_env('HOSTNAME_LOG02', 'log02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_LOG03 = os_env('HOSTNAME_LOG03', 'log03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MTR01 = os_env('HOSTNAME_MTR01', 'mtr01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MTR02 = os_env('HOSTNAME_MTR02', 'mtr02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MTR03 = os_env('HOSTNAME_MTR03', 'mtr03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_KMN01 = os_env('HOSTNAME_KMN01', 'kmn01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_KMN02 = os_env('HOSTNAME_KMN02', 'kmn02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_KMN03 = os_env('HOSTNAME_KMN03', 'kmn03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_DNS01 = os_env('HOSTNAME_DNS01', 'dns01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_DNS02 = os_env('HOSTNAME_DNS02', 'dns02.' + DOMAIN_NAME) %}
-
-
-
-template:
-  devops_settings:
-    env_name: {{ os_env('ENV_NAME', 'cookied-cicd-pike-dvr-ceph_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
-
-    address_pools:
-      private-pool01:
-        net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.60.0.0/16:24') }}
-        params:
-          ip_reserved:
-            gateway: +1
-            l2_network_device: +1
-            default_{{ HOSTNAME_CFG01 }}: +15
-            default_{{ HOSTNAME_CTL01 }}: +11
-            default_{{ HOSTNAME_CTL02 }}: +12
-            default_{{ HOSTNAME_CTL03 }}: +13
-            default_{{ HOSTNAME_CMP01 }}: +105
-            default_{{ HOSTNAME_CMP02 }}: +106
-            default_{{ HOSTNAME_CID }}: +90
-            default_{{ HOSTNAME_CID01 }}: +91
-            default_{{ HOSTNAME_CID02 }}: +92
-            default_{{ HOSTNAME_CID03 }}: +93
-            default_{{ HOSTNAME_OSD01 }}: +87
-            default_{{ HOSTNAME_OSD02 }}: +88
-            default_{{ HOSTNAME_OSD03 }}: +89
-            default_{{ HOSTNAME_CMN01 }}: +96
-            default_{{ HOSTNAME_CMN02 }}: +97
-            default_{{ HOSTNAME_CMN03 }}: +98
-            default_{{ HOSTNAME_RGW01 }}: +76
-            default_{{ HOSTNAME_RGW02 }}: +77
-            default_{{ HOSTNAME_RGW03 }}: +78
-            default_{{ HOSTNAME_GTW01 }}: +110
-            default_{{ HOSTNAME_PRX01 }}: +121
-            default_{{ HOSTNAME_PRX02 }}: +122
-            default_{{ HOSTNAME_MON01 }}: +71
-            default_{{ HOSTNAME_MON02 }}: +72
-            default_{{ HOSTNAME_MON03 }}: +73
-            default_{{ HOSTNAME_LOG01 }}: +61
-            default_{{ HOSTNAME_LOG02 }}: +62
-            default_{{ HOSTNAME_LOG03 }}: +63
-            default_{{ HOSTNAME_MTR01 }}: +51
-            default_{{ HOSTNAME_MTR02 }}: +52
-            default_{{ HOSTNAME_MTR03 }}: +53
-            default_{{ HOSTNAME_KMN01 }}: +45
-            default_{{ HOSTNAME_KMN02 }}: +46
-            default_{{ HOSTNAME_KMN03 }}: +47
-            default_{{ HOSTNAME_DNS01 }}: +113
-            default_{{ HOSTNAME_DNS02 }}: +114
-          ip_ranges:
-            dhcp: [+10, -10]
-
-      admin-pool01:
-        net: {{ os_env('ADMIN_ADDRESS_POOL01', '10.70.0.0/16:24') }}
-        params:
-          ip_reserved:
-            gateway: +1
-            l2_network_device: +1
-            default_{{ HOSTNAME_CFG01 }}: +15
-            default_{{ HOSTNAME_CTL01 }}: +11
-            default_{{ HOSTNAME_CTL02 }}: +12
-            default_{{ HOSTNAME_CTL03 }}: +13
-            default_{{ HOSTNAME_CMP01 }}: +105
-            default_{{ HOSTNAME_CMP02 }}: +106
-            default_{{ HOSTNAME_CID }}: +90
-            default_{{ HOSTNAME_CID01 }}: +91
-            default_{{ HOSTNAME_CID02 }}: +92
-            default_{{ HOSTNAME_CID03 }}: +93
-            default_{{ HOSTNAME_OSD01 }}: +87
-            default_{{ HOSTNAME_OSD02 }}: +88
-            default_{{ HOSTNAME_OSD03 }}: +89
-            default_{{ HOSTNAME_CMN01 }}: +96
-            default_{{ HOSTNAME_CMN02 }}: +97
-            default_{{ HOSTNAME_CMN03 }}: +98
-            default_{{ HOSTNAME_RGW01 }}: +76
-            default_{{ HOSTNAME_RGW02 }}: +77
-            default_{{ HOSTNAME_RGW03 }}: +78
-            default_{{ HOSTNAME_GTW01 }}: +110
-            default_{{ HOSTNAME_PRX01 }}: +121
-            default_{{ HOSTNAME_PRX02 }}: +122
-            default_{{ HOSTNAME_MON01 }}: +71
-            default_{{ HOSTNAME_MON02 }}: +72
-            default_{{ HOSTNAME_MON03 }}: +73
-            default_{{ HOSTNAME_LOG01 }}: +61
-            default_{{ HOSTNAME_LOG02 }}: +62
-            default_{{ HOSTNAME_LOG03 }}: +63
-            default_{{ HOSTNAME_MTR01 }}: +51
-            default_{{ HOSTNAME_MTR02 }}: +52
-            default_{{ HOSTNAME_MTR03 }}: +53
-            default_{{ HOSTNAME_KMN01 }}: +45
-            default_{{ HOSTNAME_KMN02 }}: +46
-            default_{{ HOSTNAME_KMN03 }}: +47
-            default_{{ HOSTNAME_DNS01 }}: +113
-            default_{{ HOSTNAME_DNS02 }}: +114
-          ip_ranges:
-            dhcp: [+70, -10]
-
-      tenant-pool01:
-        net: {{ os_env('TENANT_ADDRESS_POOL01', '10.80.0.0/16:24') }}
-        params:
-          ip_reserved:
-            gateway: +1
-            l2_network_device: +1
-            default_{{ HOSTNAME_CFG01 }}: +15
-            default_{{ HOSTNAME_CTL01 }}: +11
-            default_{{ HOSTNAME_CTL02 }}: +12
-            default_{{ HOSTNAME_CTL03 }}: +13
-            default_{{ HOSTNAME_CMP01 }}: +105
-            default_{{ HOSTNAME_CMP02 }}: +106
-            default_{{ HOSTNAME_OSD01 }}: +87
-            default_{{ HOSTNAME_OSD02 }}: +88
-            default_{{ HOSTNAME_OSD03 }}: +89
-            default_{{ HOSTNAME_CMN01 }}: +96
-            default_{{ HOSTNAME_CMN02 }}: +97
-            default_{{ HOSTNAME_CMN03 }}: +98
-            default_{{ HOSTNAME_RGW01 }}: +76
-            default_{{ HOSTNAME_RGW02 }}: +77
-            default_{{ HOSTNAME_RGW03 }}: +78
-            default_{{ HOSTNAME_GTW01 }}: +110
-            default_{{ HOSTNAME_PRX01 }}: +121
-            default_{{ HOSTNAME_PRX02 }}: +122
-            default_{{ HOSTNAME_MON01 }}: +71
-            default_{{ HOSTNAME_MON02 }}: +72
-            default_{{ HOSTNAME_MON03 }}: +73
-            default_{{ HOSTNAME_LOG01 }}: +61
-            default_{{ HOSTNAME_LOG02 }}: +62
-            default_{{ HOSTNAME_LOG03 }}: +63
-            default_{{ HOSTNAME_MTR01 }}: +51
-            default_{{ HOSTNAME_MTR02 }}: +52
-            default_{{ HOSTNAME_MTR03 }}: +53
-            default_{{ HOSTNAME_KMN01 }}: +45
-            default_{{ HOSTNAME_KMN02 }}: +46
-            default_{{ HOSTNAME_KMN03 }}: +47
-            default_{{ HOSTNAME_DNS01 }}: +113
-            default_{{ HOSTNAME_DNS02 }}: +114
-          ip_ranges:
-            dhcp: [+10, -10]
-
-      external-pool01:
-        net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '10.90.0.0/16:24') }}
-        params:
-          ip_reserved:
-            gateway: +1
-            l2_network_device: +1
-            default_{{ HOSTNAME_CFG01 }}: +15
-            default_{{ HOSTNAME_CTL01 }}: +11
-            default_{{ HOSTNAME_CTL02 }}: +12
-            default_{{ HOSTNAME_CTL03 }}: +13
-            default_{{ HOSTNAME_CMP01 }}: +105
-            default_{{ HOSTNAME_CMP02 }}: +106
-            default_{{ HOSTNAME_OSD01 }}: +87
-            default_{{ HOSTNAME_OSD02 }}: +88
-            default_{{ HOSTNAME_OSD03 }}: +89
-            default_{{ HOSTNAME_CMN01 }}: +96
-            default_{{ HOSTNAME_CMN02 }}: +97
-            default_{{ HOSTNAME_CMN03 }}: +98
-            default_{{ HOSTNAME_RGW01 }}: +76
-            default_{{ HOSTNAME_RGW02 }}: +77
-            default_{{ HOSTNAME_RGW03 }}: +78
-            default_{{ HOSTNAME_GTW01 }}: +110
-            default_{{ HOSTNAME_PRX01 }}: +121
-            default_{{ HOSTNAME_PRX02 }}: +122
-            default_{{ HOSTNAME_MON01 }}: +71
-            default_{{ HOSTNAME_MON02 }}: +72
-            default_{{ HOSTNAME_MON03 }}: +73
-            default_{{ HOSTNAME_LOG01 }}: +61
-            default_{{ HOSTNAME_LOG02 }}: +62
-            default_{{ HOSTNAME_LOG03 }}: +63
-            default_{{ HOSTNAME_MTR01 }}: +51
-            default_{{ HOSTNAME_MTR02 }}: +52
-            default_{{ HOSTNAME_MTR03 }}: +53
-            default_{{ HOSTNAME_KMN01 }}: +45
-            default_{{ HOSTNAME_KMN02 }}: +46
-            default_{{ HOSTNAME_KMN03 }}: +47
-            default_{{ HOSTNAME_DNS01 }}: +113
-            default_{{ HOSTNAME_DNS02 }}: +114
-          ip_ranges:
-            dhcp: [+130, +230]
-
-    groups:
-      - name: default
-        driver:
-          name: devops.driver.libvirt
-          params:
-            connection_string: !os_env CONNECTION_STRING, qemu:///system
-            storage_pool_name: !os_env STORAGE_POOL_NAME, default
-            stp: False
-            hpet: False
-            enable_acpi: true
-            use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
-            use_hugepages: !os_env DRIVER_USE_HUGEPAGES, false
-
-        network_pools:
-          admin: admin-pool01
-          private: private-pool01
-          tenant: tenant-pool01
-          external: external-pool01
-
-        l2_network_devices:
-          admin:
-            address_pool: admin-pool01
-            dhcp: true
-            forward:
-              mode: nat
-
-          private:
-            address_pool: private-pool01
-            dhcp: false
-            forward:
-              mode: route
-
-          tenant:
-            address_pool: tenant-pool01
-            dhcp: false
-
-          external:
-            address_pool: external-pool01
-            dhcp: false
-            forward:
-              mode: route
-
-        group_volumes:
-         - name: cloudimage1604    # This name is used for 'backing_store' option for node volumes.
-           source_image: {{ os_env('MCP_IMAGE_PATH1604') }} # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
-           format: qcow2
-         - name: cfg01_day01_image               # Pre-configured day01 image
-           source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01') }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
-           format: qcow2
-         - name: mcp_ubuntu_1604_image           # Pre-configured image for control plane
-           source_image: !os_env MCP_IMAGE_PATH1604
-           format: qcow2
-
-        nodes:
-          - name: {{ HOSTNAME_CFG01 }}
-            role: salt_master
-            params:
-              vcpu: {{ os_env('CFG_NODE_CPU', 3) }}
-              memory: {{ os_env('CFG_NODE_MEMORY', 12288) }}
-              boot:
-                - hd
-              volumes:
-                - name: system
-                  capacity:  {{ os_env('CFG_NODE_VOLUME_SIZE', 150) }}
-                  backing_store: cfg01_day01_image
-                  format: qcow2
-                - name: config
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-
-              interfaces:
-                - label: ens3
-                  l2_network_device: admin
-                  interface_model: *interface_model
-                - label: ens4
-                  l2_network_device: private
-                  interface_model: *interface_model
-              network_config:
-                ens3:
-                  networks:
-                    - admin
-                ens4:
-                  networks:
-                    - private
-
-          - name: {{ HOSTNAME_CTL01 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 4
-              memory: !os_env SLAVE_NODE_MEMORY, 32768
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: mcp_ubuntu_1604_image
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: &interfaces
-                - label: ens3
-                  l2_network_device: admin
-                  interface_model: *interface_model
-                - label: ens4
-                  l2_network_device: private
-                  interface_model: *interface_model
-              network_config: &network_config
-                ens3:
-                  networks:
-                    - admin
-                ens4:
-                  networks:
-                    - private
-
-          - name: {{ HOSTNAME_CTL02 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 4
-              memory: !os_env SLAVE_NODE_MEMORY, 32768
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: mcp_ubuntu_1604_image
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: *interfaces
-              network_config: *network_config
-
-          - name: {{ HOSTNAME_CTL03 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 4
-              memory: !os_env SLAVE_NODE_MEMORY, 32768
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: mcp_ubuntu_1604_image
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: *interfaces
-              network_config: *network_config
-
-          - name: {{ HOSTNAME_CMN01 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 1
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: cloudimage1604
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: *interfaces
-              network_config: *network_config
-
-          - name: {{ HOSTNAME_CMN02 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 1
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: cloudimage1604
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: *interfaces
-              network_config: *network_config
-
-          - name: {{ HOSTNAME_CMN03 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 1
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: cloudimage1604
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: *interfaces
-              network_config: *network_config
-
-          - name: {{ HOSTNAME_OSD01 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 1
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: cloudimage1604
-                  format: qcow2
-                - name: ceph_osd
-                  capacity: 50
-                  format: qcow2
-                - name: ceph_journal
-                  capacity: 50
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: *interfaces
-              network_config: *network_config
-
-          - name: {{ HOSTNAME_OSD02 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 1
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: cloudimage1604
-                  format: qcow2
-                - name: ceph_osd
-                  capacity: 50
-                  format: qcow2
-                - name: ceph_journal
-                  capacity: 50
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: *interfaces
-              network_config: *network_config
-
-          - name: {{ HOSTNAME_OSD03 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 1
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: cloudimage1604
-                  format: qcow2
-                - name: ceph_osd
-                  capacity: 50
-                  format: qcow2
-                - name: ceph_journal
-                  capacity: 50
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: *interfaces
-              network_config: *network_config
-
-          - name: {{ HOSTNAME_RGW01 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 1
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: cloudimage1604
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: *interfaces
-              network_config: *network_config
-
-          - name: {{ HOSTNAME_RGW02 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 1
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: cloudimage1604
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: *interfaces
-              network_config: *network_config
-          - name: {{ HOSTNAME_RGW03 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 1
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: cloudimage1604
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: *interfaces
-              network_config: *network_config
-
-          - name: {{ HOSTNAME_PRX01 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 1
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: mcp_ubuntu_1604_image
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces:
-                - label: ens3
-                  l2_network_device: admin
-                  interface_model: *interface_model
-                - label: ens4
-                  l2_network_device: private
-                  interface_model: *interface_model
-                - label: ens5
-                  l2_network_device: external
-                  interface_model: *interface_model
-              network_config:
-                ens3:
-                  networks:
-                    - admin
-                ens4:
-                  networks:
-                    - private
-                ens5:
-                  networks:
-                    - external
-
-          - name: {{ HOSTNAME_PRX02 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 1
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: mcp_ubuntu_1604_image
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces:
-                - label: ens3
-                  l2_network_device: admin
-                  interface_model: *interface_model
-                - label: ens4
-                  l2_network_device: private
-                  interface_model: *interface_model
-                - label: ens5
-                  l2_network_device: external
-                  interface_model: *interface_model
-              network_config:
-                ens3:
-                  networks:
-                    - admin
-                ens4:
-                  networks:
-                    - private
-                ens5:
-                  networks:
-                    - external
-
-          - name: {{ HOSTNAME_CMP01 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 3
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: cloudimage1604
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-
-              interfaces: &all_interfaces
-                - label: ens3
-                  l2_network_device: admin
-                  interface_model: *interface_model
-                - label: ens4
-                  l2_network_device: private
-                  interface_model: *interface_model
-                - label: ens5
-                  l2_network_device: tenant
-                  interface_model: *interface_model
-                - label: ens6
-                  l2_network_device: external
-                  interface_model: *interface_model
-              network_config: &all_network_config
-                ens3:
-                  networks:
-                    - admin
-                ens4:
-                  networks:
-                    - private
-                ens5:
-                  networks:
-                    - tenant
-                ens6:
-                  networks:
-                    - external
-
-          - name: {{ HOSTNAME_CMP02 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 3
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: cloudimage1604
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: *all_interfaces
-              network_config: *all_network_config
-
-          - name: {{ HOSTNAME_GTW01 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 1
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: cloudimage1604
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: *all_interfaces
-              network_config: *all_network_config
-
-          - name: {{ HOSTNAME_CID01 }}
-            role: salt_minion
-            params:
-              vcpu: {{ os_env('CID_NODE_CPU', 2) }}
-              memory: {{ os_env('CID_NODE_MEMORY', 6144) }}
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: {{ os_env('CID_NODE_VOLUME_SIZE', 150) }}
-                  backing_store: mcp_ubuntu_1604_image
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: *interfaces
-              network_config: *network_config
-
-          - name: {{ HOSTNAME_CID02 }}
-            role: salt_minion
-            params:
-              vcpu: {{ os_env('CID_NODE_CPU', 2) }}
-              memory: {{ os_env('CID_NODE_MEMORY', 6144) }}
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: {{ os_env('CID_NODE_VOLUME_SIZE', 150) }}
-                  backing_store: mcp_ubuntu_1604_image
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: *interfaces
-              network_config: *network_config
-
-          - name: {{ HOSTNAME_CID03 }}
-            role: salt_minion
-            params:
-              vcpu: {{ os_env('CID_NODE_CPU', 2) }}
-              memory: {{ os_env('CID_NODE_MEMORY', 6144) }}
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: {{ os_env('CID_NODE_VOLUME_SIZE', 150) }}
-                  backing_store: mcp_ubuntu_1604_image
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: *interfaces
-              network_config: *network_config
-
-          - name: {{ HOSTNAME_MON01 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 1
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: mcp_ubuntu_1604_image
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: *interfaces
-              network_config: *network_config
-
-          - name: {{ HOSTNAME_MON02 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 1
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: mcp_ubuntu_1604_image
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: *interfaces
-              network_config: *network_config
-
-          - name: {{ HOSTNAME_MON03 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 1
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: mcp_ubuntu_1604_image
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: *interfaces
-              network_config: *network_config
-
-          - name: {{ HOSTNAME_LOG01 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 1
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: mcp_ubuntu_1604_image
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: *interfaces
-              network_config: *network_config
-
-          - name: {{ HOSTNAME_LOG02 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 1
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: mcp_ubuntu_1604_image
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: *interfaces
-              network_config: *network_config
-
-          - name: {{ HOSTNAME_LOG03 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 1
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: mcp_ubuntu_1604_image
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: *interfaces
-              network_config: *network_config
-
-          - name: {{ HOSTNAME_MTR01 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 1
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: mcp_ubuntu_1604_image
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: *interfaces
-              network_config: *network_config
-
-          - name: {{ HOSTNAME_MTR02 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 1
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: mcp_ubuntu_1604_image
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: *interfaces
-              network_config: *network_config
-
-          - name: {{ HOSTNAME_MTR03 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 1
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: mcp_ubuntu_1604_image
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: *interfaces
-              network_config: *network_config
-
-          - name: {{ HOSTNAME_KMN01 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 1
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: mcp_ubuntu_1604_image
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: *interfaces
-              network_config: *network_config
-
-          - name: {{ HOSTNAME_KMN02 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 1
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: mcp_ubuntu_1604_image
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: *interfaces
-              network_config: *network_config
-
-          - name: {{ HOSTNAME_KMN03 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 1
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: mcp_ubuntu_1604_image
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: *interfaces
-              network_config: *network_config
-
-          - name: {{ HOSTNAME_DNS01 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 1
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: cloudimage1604
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: *interfaces
-              network_config: *network_config
-
-          - name: {{ HOSTNAME_DNS02 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 1
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: cloudimage1604
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: *interfaces
-              network_config: *network_config
diff --git a/tcp_tests/templates/cookied-cicd-queens-dvr-sl/cookiecutter-context-queens-dvr-sl.yaml b/tcp_tests/templates/cookied-cicd-queens-dvr-sl/cookiecutter-context-queens-dvr-sl.yaml
deleted file mode 100644
index 4d395b3..0000000
--- a/tcp_tests/templates/cookied-cicd-queens-dvr-sl/cookiecutter-context-queens-dvr-sl.yaml
+++ /dev/null
@@ -1,255 +0,0 @@
-default_context:
-  auditd_enabled: 'False'
-  bmk_enabled: 'False'
-  cicd_control_node01_address: 10.167.4.91
-  cicd_control_node01_hostname: cid01
-  cicd_control_node02_address: 10.167.4.92
-  cicd_control_node02_hostname: cid02
-  cicd_control_node03_address: 10.167.4.93
-  cicd_control_node03_hostname: cid03
-  cicd_control_vip_address: 10.167.4.90
-  cicd_control_vip_hostname: cid
-  cicd_enabled: 'True'
-  cluster_domain: cookied-cicd-queens-dvr-sl.local
-  cluster_name: cookied-cicd-queens-dvr-sl
-  ceph_cluster_network: 10.167.4.0/24
-  ceph_enabled: 'True'
-  ceph_hyper_converged: 'False'
-  ceph_mon_node01_address: 10.167.4.96
-  ceph_mon_node01_hostname: cmn01
-  ceph_mon_node02_address: 10.167.4.97
-  ceph_mon_node02_hostname: cmn02
-  ceph_mon_node03_address: 10.167.4.98
-  ceph_mon_node03_hostname: cmn03
-  ceph_osd_backend: bluestore
-  ceph_osd_block_db_size: '10'
-  ceph_osd_bond_mode: active-backup
-  ceph_osd_count: '3'
-  ceph_osd_data_disks: /dev/vdb
-  ceph_osd_journal_or_block_db_disks: /dev/vdc
-  ceph_osd_node_count: '3'
-  ceph_osd_journal_size: '10'
-  ceph_osd_primary_first_nic: eth1
-  ceph_osd_primary_second_nic: eth2
-  ceph_osd_rack01_backend_subnet: 10.167.4
-  ceph_osd_rack01_hostname: osd
-  ceph_osd_rack01_single_subnet: 10.167.4
-  ceph_osd_single_address_ranges: 10.167.4.37-10.167.4.39
-  ceph_osd_deploy_address_ranges: 10.167.5.37-10.167.5.39
-  ceph_osd_backend_address_ranges: 10.167.4.37-10.167.4.39
-  ceph_public_network: 10.167.4.0/24
-  ceph_rgw_address: 10.167.4.75
-  ceph_rgw_hostname: rgw
-  ceph_rgw_node01_address: 10.167.4.76
-  ceph_rgw_node01_hostname: rgw01
-  ceph_rgw_node02_address: 10.167.4.77
-  ceph_rgw_node02_hostname: rgw02
-  ceph_rgw_node03_address: 10.167.4.78
-  ceph_rgw_node03_hostname: rgw03
-  ceph_version: luminous
-  compute_bond_mode: active-backup
-  compute_primary_first_nic: eth1
-  compute_primary_second_nic: eth2
-  context_seed: wUqrwKeBTCpRpVrhK1KwZQv4cjM9VhG7L2vQ0iQsTuMrXASklEBDmJEf6bnPEqcK
-  control_network_netmask: 255.255.255.0
-  control_network_subnet: 10.167.4.0/24
-  control_vlan: '10'
-  cookiecutter_template_branch: ''
-  jenkins_pipelines_branch: 'release/2019.2.0'
-  cookiecutter_template_credentials: gerrit
-  cookiecutter_template_url: https://gerrit.mcp.mirantis.com/mk/cookiecutter-templates.git
-  deploy_network_gateway: 10.167.5.1
-  deploy_network_netmask: 255.255.255.0
-  deploy_network_subnet: 10.167.5.0/24
-  deployment_type: physical
-  dns_server01: 172.18.176.6
-  dns_server02: 172.18.208.44
-  email_address: ddmitriev@mirantis.com
-  gateway_primary_first_nic: eth1
-  gateway_primary_second_nic: eth2
-  infra_bond_mode: active-backup
-  infra_deploy_nic: eth0
-  infra_kvm01_control_address: 10.167.4.101
-  infra_kvm01_deploy_address: 10.167.5.101
-  infra_kvm01_hostname: kvm01
-  infra_kvm02_control_address: 10.167.4.102
-  infra_kvm02_deploy_address: 10.167.5.102
-  infra_kvm02_hostname: kvm02
-  infra_kvm03_control_address: 10.167.4.103
-  infra_kvm03_deploy_address: 10.167.5.103
-  infra_kvm03_hostname: kvm03
-  infra_kvm_vip_address: 10.167.4.100
-  infra_primary_first_nic: eth1
-  infra_primary_second_nic: eth2
-  kubernetes_enabled: 'False'
-  local_repositories: 'False'
-  maas_deploy_address: 10.167.5.15
-  maas_deploy_range_end: 10.167.5.199
-  maas_deploy_range_start: 10.167.5.180
-  maas_deploy_vlan: '0'
-  maas_fabric_name: deploy-fabric0
-  maas_hostname: cfg01
-  mcp_version: proposed
-  offline_deployment: 'False'
-  opencontrail_enabled: 'False'
-  openldap_domain: ${_param:cluster_name}.local
-  openldap_enabled: 'True'
-  openldap_organisation: ${_param:cluster_name}
-  openssh_groups: cicd
-  openstack_benchmark_node01_address: 10.167.4.95
-  openstack_benchmark_node01_hostname: bmk01
-  openstack_cluster_size: compact
-  openstack_compute_count: '2'
-  openstack_compute_rack01_hostname: cmp
-  openstack_compute_rack01_single_subnet: 10.167.4
-  openstack_compute_rack01_tenant_subnet: 10.167.6
-  openstack_compute_single_address_ranges: 10.167.4.105-10.167.4.106
-  openstack_compute_deploy_address_ranges: 10.167.5.105-10.167.5.106
-  openstack_compute_tenant_address_ranges: 10.167.6.105-10.167.6.106
-  openstack_control_address: 10.167.4.100
-  openstack_control_hostname: ctl
-  openstack_control_node01_address: 10.167.4.101
-  openstack_control_node01_hostname: ctl01
-  openstack_control_node02_address: 10.167.4.102
-  openstack_control_node02_hostname: ctl02
-  openstack_control_node03_address: 10.167.4.103
-  openstack_control_node03_hostname: ctl03
-  openstack_database_address: 10.167.4.100
-  openstack_database_hostname: ctl
-  openstack_database_node01_address: 10.167.4.101
-  openstack_database_node01_hostname: ctl01
-  openstack_database_node02_address: 10.167.4.102
-  openstack_database_node02_hostname: ctl02
-  openstack_database_node03_address: 10.167.4.103
-  openstack_database_node03_hostname: ctl03
-  openstack_enabled: 'True'
-  openstack_gateway_node01_address: 10.167.4.110
-  openstack_gateway_node01_hostname: gtw01
-  openstack_gateway_node01_tenant_address: 10.167.6.6
-  openstack_gateway_node02_address: 10.167.4.111
-  openstack_gateway_node02_hostname: gtw02
-  openstack_gateway_node02_tenant_address: 10.167.6.7
-  openstack_gateway_node03_address: 10.167.4.112
-  openstack_gateway_node03_hostname: gtw03
-  openstack_gateway_node03_tenant_address: 10.167.6.8
-  openstack_message_queue_address: 10.167.4.100
-  openstack_message_queue_hostname: ctl
-  openstack_message_queue_node01_address: 10.167.4.101
-  openstack_message_queue_node01_hostname: ctl01
-  openstack_message_queue_node02_address: 10.167.4.102
-  openstack_message_queue_node02_hostname: ctl02
-  openstack_message_queue_node03_address: 10.167.4.103
-  openstack_message_queue_node03_hostname: ctl03
-  openstack_network_engine: ovs
-  openstack_neutron_qos: 'False'
-  openstack_neutron_vlan_aware_vms: 'False'
-  openstack_nfv_dpdk_enabled: 'False'
-  openstack_nfv_sriov_enabled: 'False'
-  openstack_nova_compute_nfv_req_enabled: 'False'
-  openstack_ovs_dvr_enabled: 'True'
-  openstack_ovs_encapsulation_type: vxlan
-  openstack_proxy_address: 172.17.16.80  # external network endpoint
-  openstack_proxy_vip_interface: ens5
-  openstack_proxy_hostname: prx
-  openstack_proxy_node01_address: 10.167.4.121
-  openstack_proxy_node01_hostname: prx01
-  openstack_proxy_node02_address: 10.167.4.122
-  openstack_proxy_node02_hostname: prx02
-  openstack_upgrade_node01_address: 10.167.4.19
-  openstack_version: queens
-  oss_enabled: 'False'
-  oss_node03_address: ${_param:stacklight_monitor_node03_address}
-  oss_webhook_app_id: '24'
-  oss_pushkin_email_sender_password: password
-  oss_pushkin_smtp_port: '587'
-  oss_webhook_login_id: '13'
-  platform: openstack_enabled
-  public_host: ${_param:openstack_proxy_address}
-  publication_method: email
-  reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
-  salt_api_password: H0rTPdmktZ8RI7T7y6fjqY0uEbbs7Kwi
-  salt_api_password_hash: $6$lfbIFtMZ$.nTbTDMzs1iYv0WqkZHia8H8Fma963Nv3qyyz1x68jQh0YXK9i907B/hvoG4QHMvfolE7V7vQnFClJ1mVA3Yb.
-  salt_master_address: 10.167.4.15
-  salt_master_hostname: cfg01
-  salt_master_management_address: 10.167.5.15
-  shared_reclass_branch: 'proposed'
-  shared_reclass_url: https://gerrit.mcp.mirantis.com/salt-models/reclass-system.git
-  fluentd_enabled: 'True'
-  stacklight_enabled: 'True'
-  stacklight_log_address: 10.167.4.60
-  stacklight_log_hostname: log
-  stacklight_log_node01_address: 10.167.4.61
-  stacklight_log_node01_hostname: log01
-  stacklight_log_node02_address: 10.167.4.62
-  stacklight_log_node02_hostname: log02
-  stacklight_log_node03_address: 10.167.4.63
-  stacklight_log_node03_hostname: log03
-  stacklight_monitor_address: 10.167.4.70
-  stacklight_monitor_hostname: mon
-  stacklight_monitor_node01_address: 10.167.4.71
-  stacklight_monitor_node01_hostname: mon01
-  stacklight_monitor_node02_address: 10.167.4.72
-  stacklight_monitor_node02_hostname: mon02
-  stacklight_monitor_node03_address: 10.167.4.73
-  stacklight_monitor_node03_hostname: mon03
-  stacklight_telemetry_address: 10.167.4.85
-  stacklight_telemetry_hostname: mtr
-  stacklight_telemetry_node01_address: 10.167.4.86
-  stacklight_telemetry_node01_hostname: mtr01
-  stacklight_telemetry_node02_address: 10.167.4.87
-  stacklight_telemetry_node02_hostname: mtr02
-  stacklight_telemetry_node03_address: 10.167.4.88
-  stacklight_telemetry_node03_hostname: mtr03
-  stacklight_version: '2'
-  stacklight_long_term_storage_type: prometheus
-  static_ips_on_deploy_network_enabled: 'False'
-  tenant_network_gateway: 10.167.6.1
-  tenant_network_netmask: 255.255.255.0
-  tenant_network_subnet: 10.167.6.0/24
-  tenant_vlan: '20'
-  upstream_proxy_enabled: 'False'
-  use_default_network_scheme: 'False'
-  openstack_octavia_enabled: 'True'
-  octavia_health_manager_node01_address: 192.168.1.10
-  octavia_health_manager_node02_address: 192.168.1.11
-  octavia_health_manager_node03_address: 192.168.1.12
-  octavia_manager_cluster: 'True'
-  octavia_hm_bind_ip: 192.168.1.12
-  octavia_lb_mgmt_cidr: 192.168.1.0/24
-  octavia_lb_mgmt_allocation_pool_start: 192.168.1.2
-  octavia_lb_mgmt_allocation_pool_end: 192.168.1.200
-  openstack_create_public_network: 'True'
-  openstack_public_neutron_subnet_gateway: 172.17.16.1
-  openstack_public_neutron_subnet_cidr: 172.17.16.0/24
-  openstack_public_neutron_subnet_allocation_start: 172.17.16.201
-  openstack_public_neutron_subnet_allocation_end: 172.17.16.245
-
-  manila_enabled: 'False'
-  barbican_enabled: 'True'
-  barbican_backend: dogtag
-  barbican_integration_enabled: 'False'
-
-  openstack_barbican_address: 10.167.4.44
-  openstack_barbican_hostname: kmn
-  openstack_barbican_node01_address: 10.167.4.45
-  openstack_barbican_node01_hostname: kmn01
-  openstack_barbican_node02_address: 10.167.4.46
-  openstack_barbican_node02_hostname: kmn02
-  openstack_barbican_node03_address: 10.167.4.47
-  openstack_barbican_node03_hostname: kmn03
-
-  nova_vnc_tls_enabled: 'True'
-  galera_ssl_enabled: 'True'
-  openstack_mysql_x509_enabled: 'True'
-  rabbitmq_ssl_enabled: 'True'
-  openstack_rabbitmq_x509_enabled: 'True'
-  openstack_internal_protocol: 'https'
-
-  designate_backend: powerdns
-  designate_enabled: 'True'
-  openstack_dns_node01_address: 10.167.4.113
-  openstack_dns_node02_address: 10.167.4.114
-
-  secrets_encryption_enabled: 'False'
-  cinder_backup_engine: 'ceph'
-  cinder_ceph_backup_pool_name: 'backups'
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-cicd-queens-dvr-sl/environment_context.yaml b/tcp_tests/templates/cookied-cicd-queens-dvr-sl/environment_context.yaml
deleted file mode 100644
index 9d5f841..0000000
--- a/tcp_tests/templates/cookied-cicd-queens-dvr-sl/environment_context.yaml
+++ /dev/null
@@ -1,410 +0,0 @@
-nodes:
-    cfg01:
-      reclass_storage_name: infra_config_node01
-      roles:
-      - infra_config
-      - linux_system_codename_xenial
-      - features_runtest_cfg
-      #classes:
-      #- environment.cookied-cicd-queens-dvr-sl.override_ntp_virtual
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_static_ctl
-
-    cid01:
-      reclass_storage_name: cicd_control_node01
-      roles:
-      - cicd_control_leader
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl
-
-    cid02:
-      reclass_storage_name: cicd_control_node02
-      roles:
-      - cicd_control_manager
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl
-
-    cid03:
-      reclass_storage_name: cicd_control_node03
-      roles:
-      - cicd_control_manager
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl
-
-    ctl01:
-      reclass_storage_name: openstack_control_node01
-      roles:
-      - infra_kvm
-      - openstack_control_leader
-      - openstack_database_leader
-      - openstack_message_queue
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl
-
-    ctl02:
-      reclass_storage_name: openstack_control_node02
-      roles:
-      - infra_kvm
-      - openstack_control
-      - openstack_database
-      - openstack_message_queue
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl
-
-    ctl03:
-      reclass_storage_name: openstack_control_node03
-      roles:
-      - infra_kvm
-      - openstack_control
-      - openstack_database
-      - openstack_message_queue
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl
-
-    prx01:
-      reclass_storage_name: openstack_proxy_node01
-      roles:
-      #- openstack_proxy  # another VIP interface used
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl
-        ens5:
-          role: single_external
-          external_address: 172.17.16.121
-          external_network_netmask: 255.255.255.0
-
-    prx02:
-      reclass_storage_name: openstack_proxy_node02
-      roles:
-      #- openstack_proxy  # another VIP interface used
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl
-        ens5:
-          role: single_external
-          external_address: 172.17.16.122
-          external_network_netmask: 255.255.255.0
-
-    kmn01:
-      reclass_storage_name: openstack_barbican_node01
-      roles:
-      - openstack_barbican
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl
-
-    kmn02:
-      reclass_storage_name: openstack_barbican_node02
-      roles:
-      - openstack_barbican
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl
-
-    kmn03:
-      reclass_storage_name: openstack_barbican_node03
-      roles:
-      - openstack_barbican
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl
-
-    mon01:
-      reclass_storage_name: stacklight_server_node01
-      roles:
-      - stacklightv2_server_leader
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl
-
-    mon02:
-      reclass_storage_name: stacklight_server_node02
-      roles:
-      - stacklightv2_server
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl
-
-    mon03:
-      reclass_storage_name: stacklight_server_node03
-      roles:
-      - stacklightv2_server
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl
-
-    log01:
-      reclass_storage_name: stacklight_log_node01
-      roles:
-      - stacklight_log_leader_v2
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl
-
-    log02:
-      reclass_storage_name: stacklight_log_node02
-      roles:
-      - stacklight_log
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl
-
-    log03:
-      reclass_storage_name: stacklight_log_node03
-      roles:
-      - stacklight_log
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl
-
-    mtr01:
-      reclass_storage_name: stacklight_telemetry_node01
-      roles:
-      - stacklight_telemetry_leader
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl
-
-    mtr02:
-      reclass_storage_name: stacklight_telemetry_node02
-      roles:
-      - stacklight_telemetry
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl
-
-    mtr03:
-      reclass_storage_name: stacklight_telemetry_node03
-      roles:
-      - stacklight_telemetry
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl
-
-    # Generator-based computes. For compatibility only
-    cmp<<count>>:
-      reclass_storage_name: openstack_compute_rack01
-      roles:
-      - openstack_compute
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl
-        ens5:
-          role: bond0_ab_ovs_vxlan_mesh
-        ens6:
-          role: bond1_ab_ovs_floating
-
-    gtw01:
-      reclass_storage_name: openstack_gateway_node01
-      roles:
-      - linux_system_codename_xenial
-      classes:
-      - system.linux.system.repo.mcp.apt_mirantis.docker
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl
-        ens5:
-          role: bond0_ab_ovs_vxlan_mesh
-        ens6:
-          role: bond1_ab_ovs_floating
-
-    gtw02:
-      reclass_storage_name: openstack_gateway_node02
-      roles:
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl
-        ens5:
-          role: bond0_ab_ovs_vxlan_mesh
-        ens6:
-          role: bond1_ab_ovs_floating
-
-    gtw03:
-      reclass_storage_name: openstack_gateway_node03
-      roles:
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl
-        ens5:
-          role: bond0_ab_ovs_vxlan_mesh
-        ens6:
-          role: bond1_ab_ovs_floating
-
-    osd<<count>>:
-      reclass_storage_name: ceph_osd_rack01
-      roles:
-      - ceph_osd
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl
-
-    cmn01:
-      reclass_storage_name: ceph_mon_node01
-      roles:
-      - ceph_mon
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl
-
-    cmn02:
-      reclass_storage_name: ceph_mon_node02
-      roles:
-      - ceph_mon
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl
-
-    cmn03:
-      reclass_storage_name: ceph_mon_node03
-      roles:
-      - ceph_mon
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl
-
-    rgw01:
-      reclass_storage_name: ceph_rgw_node01
-      roles:
-      - ceph_rgw
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl
-
-    rgw02:
-      reclass_storage_name: ceph_rgw_node02
-      roles:
-      - ceph_rgw
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl
-
-    rgw03:
-      reclass_storage_name: ceph_rgw_node03
-      roles:
-      - ceph_rgw
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl
-
-    dns01:
-      reclass_storage_name: openstack_dns_node01
-      roles:
-      - openstack_dns
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl
-
-    dns02:
-      reclass_storage_name: openstack_dns_node02
-      roles:
-      - openstack_dns
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl
diff --git a/tcp_tests/templates/cookied-cicd-queens-dvr-sl/salt.yaml b/tcp_tests/templates/cookied-cicd-queens-dvr-sl/salt.yaml
deleted file mode 100644
index bb6125c..0000000
--- a/tcp_tests/templates/cookied-cicd-queens-dvr-sl/salt.yaml
+++ /dev/null
@@ -1,11 +0,0 @@
-{% from 'cookied-cicd-queens-dvr-sl/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-cicd-queens-dvr-sl/underlay.yaml' import LAB_CONFIG_NAME with context %}
-{% from 'cookied-cicd-queens-dvr-sl/underlay.yaml' import DOMAIN_NAME with context %}
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
diff --git a/tcp_tests/templates/cookied-cicd-queens-dvr-sl/underlay--meta-data.yaml b/tcp_tests/templates/cookied-cicd-queens-dvr-sl/underlay--meta-data.yaml
deleted file mode 100644
index 3699401..0000000
--- a/tcp_tests/templates/cookied-cicd-queens-dvr-sl/underlay--meta-data.yaml
+++ /dev/null
@@ -1,4 +0,0 @@
-| # All the data below will be stored as a string object
-  instance-id: iid-local1
-  hostname: {hostname}
-  local-hostname: {hostname}
diff --git a/tcp_tests/templates/cookied-cicd-queens-dvr-sl/underlay--user-data1604-swp.yaml b/tcp_tests/templates/cookied-cicd-queens-dvr-sl/underlay--user-data1604-swp.yaml
deleted file mode 100644
index 006a798..0000000
--- a/tcp_tests/templates/cookied-cicd-queens-dvr-sl/underlay--user-data1604-swp.yaml
+++ /dev/null
@@ -1,79 +0,0 @@
-#cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
-ssh_pwauth: True
-users:
-  - name: root
-    sudo: ALL=(ALL) NOPASSWD:ALL
-    shell: /bin/bash
-
-disable_root: false
-chpasswd:
-  list: |
-    root:r00tme
-  expire: False
-
-bootcmd:
-  # Enable root access
-  - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
-  - service sshd restart
-output:
-  all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
-runcmd:
-  - if lvs vg0; then pvresize /dev/vda3; fi
-  - if lvs vg0; then /usr/bin/growlvm.py --image-layout-file /usr/share/growlvm/image-layout.yml; fi
-
-  - export TERM=linux
-  - export LANG=C
-  # Configure dhclient
-  - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
-  - sudo resolvconf -u
-
-  # Enable grub menu using updated config below
-  - update-grub
-
-  # Prepare network connection
-  - sudo ifup ens3
-  #- sudo route add default gw {gateway} {interface_name}
-
-  # Create swap
-  - fallocate -l 16G /swapfile
-  - chmod 600 /swapfile
-  - mkswap /swapfile
-  - swapon /swapfile
-  - echo "/swapfile   none    swap    defaults   0   0" >> /etc/fstab
-
-write_files:
-  - path: /etc/default/grub.d/97-enable-grub-menu.cfg
-    content: |
-      GRUB_RECORDFAIL_TIMEOUT=30
-      GRUB_TIMEOUT=3
-      GRUB_TIMEOUT_STYLE=menu
-
-  - path: /etc/network/interfaces
-    content: |
-      auto ens3
-      iface ens3 inet dhcp
-
-  - path: /usr/share/growlvm/image-layout.yml
-    content: |
-      root:
-        size: '30%VG'
-      home:
-        size: '1G'
-      var_log:
-        size: '11%VG'
-      var_log_audit:
-        size: '5G'
-      var_tmp:
-        size: '11%VG'
-      tmp:
-        size: '5G'
-    owner: root:root
-
-growpart:
-  mode: auto
-  devices:
-    - '/'
-    - '/dev/vda3'
-  ignore_growroot_disabled: false
diff --git a/tcp_tests/templates/cookied-cicd-queens-dvr-sl/underlay.yaml b/tcp_tests/templates/cookied-cicd-queens-dvr-sl/underlay.yaml
deleted file mode 100644
index 7ec644f..0000000
--- a/tcp_tests/templates/cookied-cicd-queens-dvr-sl/underlay.yaml
+++ /dev/null
@@ -1,1362 +0,0 @@
-# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
-{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
-
-{% import 'cookied-cicd-queens-dvr-sl/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
-{% import 'cookied-cicd-queens-dvr-sl/underlay--user-data1604-swp.yaml' as CLOUDINIT_USER_DATA_1604_SWP with context %}
-{%- macro user_data() %}{{ CLOUDINIT_USER_DATA_1604_SWP }}{% endmacro %}
-
----
-aliases:
- - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
- - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
- - &cloudinit_user_data_1604_swp |
-{{ user_data()|indent(4, first=True) }}
-
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-cicd-queens-dvr-sl') %}
-{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME) + '.local' %}
-{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01') %}
-{% set HOSTNAME_CID01 = os_env('HOSTNAME_CID01', 'cid01') %}
-{% set HOSTNAME_CID02 = os_env('HOSTNAME_CID02', 'cid02') %}
-{% set HOSTNAME_CID03 = os_env('HOSTNAME_CID03', 'cid03') %}
-{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01') %}
-{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02') %}
-{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03') %}
-{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp001') %}
-{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp002') %}
-{% set HOSTNAME_MON01 = os_env('HOSTNAME_MON01', 'mon01') %}
-{% set HOSTNAME_MON02 = os_env('HOSTNAME_MON02', 'mon02') %}
-{% set HOSTNAME_MON03 = os_env('HOSTNAME_MON03', 'mon03') %}
-{% set HOSTNAME_LOG01 = os_env('HOSTNAME_LOG01', 'log01') %}
-{% set HOSTNAME_LOG02 = os_env('HOSTNAME_LOG02', 'log02') %}
-{% set HOSTNAME_LOG03 = os_env('HOSTNAME_LOG03', 'log03') %}
-{% set HOSTNAME_MTR01 = os_env('HOSTNAME_MTR01', 'mtr01') %}
-{% set HOSTNAME_MTR02 = os_env('HOSTNAME_MTR02', 'mtr02') %}
-{% set HOSTNAME_MTR03 = os_env('HOSTNAME_MTR03', 'mtr03') %}
-{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01') %}
-{% set HOSTNAME_GTW02 = os_env('HOSTNAME_GTW02', 'gtw02') %}
-{% set HOSTNAME_GTW03 = os_env('HOSTNAME_GTW03', 'gtw03') %}
-{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01') %}
-{% set HOSTNAME_PRX02 = os_env('HOSTNAME_PRX02', 'prx02') %}
-{% set HOSTNAME_KMN01 = os_env('HOSTNAME_KMN01', 'kmn01') %}
-{% set HOSTNAME_KMN02 = os_env('HOSTNAME_KMN02', 'kmn02') %}
-{% set HOSTNAME_KMN03 = os_env('HOSTNAME_KMN03', 'kmn03') %}
-{% set HOSTNAME_CMN01 = os_env('HOSTNAME_CMN01', 'cmn01') %}
-{% set HOSTNAME_CMN02 = os_env('HOSTNAME_CMN02', 'cmn02') %}
-{% set HOSTNAME_CMN03 = os_env('HOSTNAME_CMN03', 'cmn03') %}
-{% set HOSTNAME_RGW01 = os_env('HOSTNAME_RGW01', 'rgw01') %}
-{% set HOSTNAME_RGW02 = os_env('HOSTNAME_RGW02', 'rgw02') %}
-{% set HOSTNAME_RGW03 = os_env('HOSTNAME_RGW03', 'rgw03') %}
-{% set HOSTNAME_OSD01 = os_env('HOSTNAME_OSD01', 'osd001') %}
-{% set HOSTNAME_OSD02 = os_env('HOSTNAME_OSD02', 'osd002') %}
-{% set HOSTNAME_OSD03 = os_env('HOSTNAME_OSD03', 'osd003') %}
-{% set HOSTNAME_DNS01 = os_env('HOSTNAME_DNS01', 'dns01') %}
-{% set HOSTNAME_DNS02 = os_env('HOSTNAME_DNS02', 'dns02') %}
-
-
-
-template:
-  devops_settings:
-    env_name: {{ os_env('ENV_NAME', 'cookied-cicd-queens-dvr-sl_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
-
-    address_pools:
-      private-pool01:
-        net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.60.0.0/16:24') }}
-        params:
-          ip_reserved:
-            gateway: +1
-            l2_network_device: +1
-            default_{{ HOSTNAME_CFG01 }}: +15
-            default_{{ HOSTNAME_CID }}: +90
-            default_{{ HOSTNAME_CID01 }}: +91
-            default_{{ HOSTNAME_CID02 }}: +92
-            default_{{ HOSTNAME_CID03 }}: +93
-            default_{{ HOSTNAME_CTL01 }}: +101
-            default_{{ HOSTNAME_CTL02 }}: +102
-            default_{{ HOSTNAME_CTL03 }}: +103
-            default_{{ HOSTNAME_CMP01 }}: +105
-            default_{{ HOSTNAME_CMP02 }}: +106
-            default_{{ HOSTNAME_KMN01 }}: +45
-            default_{{ HOSTNAME_KMN02 }}: +46
-            default_{{ HOSTNAME_KMN03 }}: +47
-            default_{{ HOSTNAME_MON01 }}: +71
-            default_{{ HOSTNAME_MON02 }}: +72
-            default_{{ HOSTNAME_MON03 }}: +73
-            default_{{ HOSTNAME_LOG01 }}: +61
-            default_{{ HOSTNAME_LOG02 }}: +62
-            default_{{ HOSTNAME_LOG03 }}: +63
-            default_{{ HOSTNAME_MTR01 }}: +86
-            default_{{ HOSTNAME_MTR02 }}: +87
-            default_{{ HOSTNAME_MTR03 }}: +88
-            default_{{ HOSTNAME_GTW01 }}: +110
-            default_{{ HOSTNAME_GTW02 }}: +111
-            default_{{ HOSTNAME_GTW03 }}: +112
-            default_{{ HOSTNAME_PRX01 }}: +121
-            default_{{ HOSTNAME_PRX02 }}: +122
-            default_{{ HOSTNAME_OSD01 }}: +37
-            default_{{ HOSTNAME_OSD02 }}: +38
-            default_{{ HOSTNAME_OSD03 }}: +39
-            default_{{ HOSTNAME_CMN01 }}: +96
-            default_{{ HOSTNAME_CMN02 }}: +97
-            default_{{ HOSTNAME_CMN03 }}: +98
-            default_{{ HOSTNAME_RGW01 }}: +76
-            default_{{ HOSTNAME_RGW02 }}: +77
-            default_{{ HOSTNAME_RGW03 }}: +78
-            default_{{ HOSTNAME_DNS01 }}: +113
-            default_{{ HOSTNAME_DNS02 }}: +114
-          ip_ranges:
-            dhcp: [+90, -10]
-
-      admin-pool01:
-        net: {{ os_env('ADMIN_ADDRESS_POOL01', '10.70.0.0/16:24') }}
-        params:
-          ip_reserved:
-            gateway: +1
-            l2_network_device: +1
-            default_{{ HOSTNAME_CFG01 }}: +15
-            default_{{ HOSTNAME_CID }}: +90
-            default_{{ HOSTNAME_CID01 }}: +91
-            default_{{ HOSTNAME_CID02 }}: +92
-            default_{{ HOSTNAME_CID03 }}: +93
-            default_{{ HOSTNAME_CTL01 }}: +101
-            default_{{ HOSTNAME_CTL02 }}: +102
-            default_{{ HOSTNAME_CTL03 }}: +103
-            default_{{ HOSTNAME_CMP01 }}: +105
-            default_{{ HOSTNAME_CMP02 }}: +106
-            default_{{ HOSTNAME_KMN01 }}: +45
-            default_{{ HOSTNAME_KMN02 }}: +46
-            default_{{ HOSTNAME_KMN03 }}: +47
-            default_{{ HOSTNAME_MON01 }}: +71
-            default_{{ HOSTNAME_MON02 }}: +72
-            default_{{ HOSTNAME_MON03 }}: +73
-            default_{{ HOSTNAME_LOG01 }}: +61
-            default_{{ HOSTNAME_LOG02 }}: +62
-            default_{{ HOSTNAME_LOG03 }}: +63
-            default_{{ HOSTNAME_MTR01 }}: +86
-            default_{{ HOSTNAME_MTR02 }}: +87
-            default_{{ HOSTNAME_MTR03 }}: +88
-            default_{{ HOSTNAME_GTW01 }}: +110
-            default_{{ HOSTNAME_GTW02 }}: +111
-            default_{{ HOSTNAME_GTW03 }}: +112
-            default_{{ HOSTNAME_PRX01 }}: +121
-            default_{{ HOSTNAME_PRX02 }}: +122
-            default_{{ HOSTNAME_OSD01 }}: +37
-            default_{{ HOSTNAME_OSD02 }}: +38
-            default_{{ HOSTNAME_OSD03 }}: +39
-            default_{{ HOSTNAME_CMN01 }}: +96
-            default_{{ HOSTNAME_CMN02 }}: +97
-            default_{{ HOSTNAME_CMN03 }}: +98
-            default_{{ HOSTNAME_RGW01 }}: +76
-            default_{{ HOSTNAME_RGW02 }}: +77
-            default_{{ HOSTNAME_RGW03 }}: +78
-            default_{{ HOSTNAME_DNS01 }}: +113
-            default_{{ HOSTNAME_DNS02 }}: +114
-          ip_ranges:
-            dhcp: [+90, -10]
-
-      tenant-pool01:
-        net: {{ os_env('TENANT_ADDRESS_POOL01', '10.80.0.0/16:24') }}
-        params:
-          ip_reserved:
-            gateway: +1
-            l2_network_device: +1
-            default_{{ HOSTNAME_CFG01 }}: +15
-            default_{{ HOSTNAME_CTL01 }}: +101
-            default_{{ HOSTNAME_CTL02 }}: +102
-            default_{{ HOSTNAME_CTL03 }}: +103
-            default_{{ HOSTNAME_CMP01 }}: +105
-            default_{{ HOSTNAME_CMP02 }}: +106
-            default_{{ HOSTNAME_KMN01 }}: +45
-            default_{{ HOSTNAME_KMN02 }}: +46
-            default_{{ HOSTNAME_KMN03 }}: +47
-            default_{{ HOSTNAME_MON01 }}: +71
-            default_{{ HOSTNAME_MON02 }}: +72
-            default_{{ HOSTNAME_MON03 }}: +73
-            default_{{ HOSTNAME_LOG01 }}: +61
-            default_{{ HOSTNAME_LOG02 }}: +62
-            default_{{ HOSTNAME_LOG03 }}: +63
-            default_{{ HOSTNAME_MTR01 }}: +86
-            default_{{ HOSTNAME_MTR02 }}: +87
-            default_{{ HOSTNAME_MTR03 }}: +88
-            default_{{ HOSTNAME_GTW01 }}: +110
-            default_{{ HOSTNAME_GTW02 }}: +111
-            default_{{ HOSTNAME_GTW03 }}: +112
-            default_{{ HOSTNAME_PRX01 }}: +121
-            default_{{ HOSTNAME_PRX02 }}: +122
-            default_{{ HOSTNAME_OSD01 }}: +37
-            default_{{ HOSTNAME_OSD02 }}: +38
-            default_{{ HOSTNAME_OSD03 }}: +39
-            default_{{ HOSTNAME_CMN01 }}: +96
-            default_{{ HOSTNAME_CMN02 }}: +97
-            default_{{ HOSTNAME_CMN03 }}: +98
-            default_{{ HOSTNAME_RGW01 }}: +76
-            default_{{ HOSTNAME_RGW02 }}: +77
-            default_{{ HOSTNAME_RGW03 }}: +78
-            default_{{ HOSTNAME_DNS01 }}: +113
-            default_{{ HOSTNAME_DNS02 }}: +114
-          ip_ranges:
-            dhcp: [+10, -10]
-
-      external-pool01:
-        net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '10.90.0.0/16:24') }}
-        params:
-          ip_reserved:
-            gateway: +1
-            l2_network_device: +1
-            default_{{ HOSTNAME_CFG01 }}: +15
-            default_{{ HOSTNAME_CTL01 }}: +101
-            default_{{ HOSTNAME_CTL02 }}: +102
-            default_{{ HOSTNAME_CTL03 }}: +103
-            default_{{ HOSTNAME_CMP01 }}: +105
-            default_{{ HOSTNAME_CMP02 }}: +106
-            default_{{ HOSTNAME_KMN01 }}: +45
-            default_{{ HOSTNAME_KMN02 }}: +46
-            default_{{ HOSTNAME_KMN03 }}: +47
-            default_{{ HOSTNAME_MON01 }}: +71
-            default_{{ HOSTNAME_MON02 }}: +72
-            default_{{ HOSTNAME_MON03 }}: +73
-            default_{{ HOSTNAME_LOG01 }}: +61
-            default_{{ HOSTNAME_LOG02 }}: +62
-            default_{{ HOSTNAME_LOG03 }}: +63
-            default_{{ HOSTNAME_MTR01 }}: +86
-            default_{{ HOSTNAME_MTR02 }}: +87
-            default_{{ HOSTNAME_MTR03 }}: +88
-            default_{{ HOSTNAME_GTW01 }}: +110
-            default_{{ HOSTNAME_GTW02 }}: +111
-            default_{{ HOSTNAME_GTW03 }}: +112
-            default_{{ HOSTNAME_PRX01 }}: +121
-            default_{{ HOSTNAME_PRX02 }}: +122
-            default_{{ HOSTNAME_OSD01 }}: +37
-            default_{{ HOSTNAME_OSD02 }}: +38
-            default_{{ HOSTNAME_OSD03 }}: +39
-            default_{{ HOSTNAME_CMN01 }}: +96
-            default_{{ HOSTNAME_CMN02 }}: +97
-            default_{{ HOSTNAME_CMN03 }}: +98
-            default_{{ HOSTNAME_RGW01 }}: +76
-            default_{{ HOSTNAME_RGW02 }}: +77
-            default_{{ HOSTNAME_RGW03 }}: +78
-            default_{{ HOSTNAME_DNS01 }}: +113
-            default_{{ HOSTNAME_DNS02 }}: +114
-          ip_ranges:
-            dhcp: [+180, +220]
-
-    groups:
-      - name: default
-        driver:
-          name: devops.driver.libvirt
-          params:
-            connection_string: !os_env CONNECTION_STRING, qemu:///system
-            storage_pool_name: !os_env STORAGE_POOL_NAME, default
-            stp: False
-            hpet: False
-            enable_acpi: true
-            use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
-            use_hugepages: !os_env DRIVER_USE_HUGEPAGES, false
-
-        network_pools:
-          admin: admin-pool01
-          private: private-pool01
-          tenant: tenant-pool01
-          external: external-pool01
-
-        l2_network_devices:
-          private:
-            address_pool: private-pool01
-            dhcp: false
-            forward:
-              mode: route
-
-          admin:
-            address_pool: admin-pool01
-            dhcp: true
-            forward:
-              mode: nat
-
-          tenant:
-            address_pool: tenant-pool01
-            dhcp: false
-
-          external:
-            address_pool: external-pool01
-            dhcp: false
-            forward:
-              mode: route
-
-
-        group_volumes:
-         - name: cloudimage1604    # This name is used for 'backing_store' option for node volumes.
-           source_image: {{ os_env('MCP_IMAGE_PATH1604') }}  # http://ci.mcp.mirantis.net:8085/images/ubuntu-16-04-x64-mcpproposed.qcow2
-           format: qcow2
-         - name: cfg01_day01_image               # Pre-configured day01 image
-           source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01') }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
-           format: qcow2
-         - name: mcp_ubuntu_1604_image           # Pre-configured image for control plane
-           source_image: !os_env MCP_IMAGE_PATH1604
-           format: qcow2
-
-        nodes:
-          - name: {{ HOSTNAME_CFG01 }}
-            role: salt_master
-            params:
-              vcpu: {{ os_env('CFG_NODE_CPU', 3) }}
-              memory: {{ os_env('CFG_NODE_MEMORY', 12288) }}
-              boot:
-                - hd
-              volumes:
-                - name: system
-                  capacity: {{ os_env('CFG_NODE_VOLUME_SIZE', 150) }}
-                  backing_store: cfg01_day01_image
-                  format: qcow2
-                - name: config
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  # source_image: !os_env CFG01_CONFIG_PATH # no source image required.
-                                                            # it will be uploaded after config drive generation
-              interfaces:
-                - label: ens3
-                  l2_network_device: admin
-                  interface_model: *interface_model
-                - label: ens4
-                  l2_network_device: private
-                  interface_model: *interface_model
-              network_config:
-                ens3:
-                  networks:
-                    - admin
-                ens4:
-                  networks:
-                    - private
-
-          - name: {{ HOSTNAME_CTL01 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 4
-              memory: !os_env SLAVE_NODE_MEMORY, 32768
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: mcp_ubuntu_1604_image
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604_swp
-
-              interfaces: &interfaces
-                - label: ens3
-                  l2_network_device: admin
-                  interface_model: *interface_model
-                - label: ens4
-                  l2_network_device: private
-                  interface_model: *interface_model
-              network_config: &network_config
-                ens3:
-                  networks:
-                    - admin
-                ens4:
-                  networks:
-                    - private
-
-          - name: {{ HOSTNAME_CTL02 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 4
-              memory: !os_env SLAVE_NODE_MEMORY, 32768
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: mcp_ubuntu_1604_image
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604_swp
-
-              interfaces: *interfaces
-              network_config: *network_config
-
-          - name: {{ HOSTNAME_CTL03 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 4
-              memory: !os_env SLAVE_NODE_MEMORY, 32768
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: mcp_ubuntu_1604_image
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604_swp
-
-              interfaces: *interfaces
-              network_config: *network_config
-
-          - name: {{ HOSTNAME_MON01 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 1
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: mcp_ubuntu_1604_image
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604_swp
-
-              interfaces: *interfaces
-              network_config: *network_config
-
-          - name: {{ HOSTNAME_MON02 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 1
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: mcp_ubuntu_1604_image
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604_swp
-
-              interfaces: *interfaces
-              network_config: *network_config
-
-          - name: {{ HOSTNAME_MON03 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 1
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: mcp_ubuntu_1604_image
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604_swp
-
-              interfaces: *interfaces
-              network_config: *network_config
-
-          - name: {{ HOSTNAME_LOG01 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 1
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: mcp_ubuntu_1604_image
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604_swp
-
-              interfaces: *interfaces
-              network_config: *network_config
-
-          - name: {{ HOSTNAME_LOG02 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 1
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: mcp_ubuntu_1604_image
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604_swp
-
-              interfaces: *interfaces
-              network_config: *network_config
-
-          - name: {{ HOSTNAME_LOG03 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 1
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: mcp_ubuntu_1604_image
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604_swp
-
-              interfaces: *interfaces
-              network_config: *network_config
-
-          - name: {{ HOSTNAME_MTR01 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 1
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: mcp_ubuntu_1604_image
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604_swp
-
-              interfaces: *interfaces
-              network_config: *network_config
-
-          - name: {{ HOSTNAME_MTR02 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 1
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: mcp_ubuntu_1604_image
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604_swp
-
-              interfaces: *interfaces
-              network_config: *network_config
-
-          - name: {{ HOSTNAME_MTR03 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 1
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: mcp_ubuntu_1604_image
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604_swp
-
-              interfaces: *interfaces
-              network_config: *network_config
-
-          - name: {{ HOSTNAME_PRX01 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 1
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: mcp_ubuntu_1604_image
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604_swp
-
-              interfaces:
-                - label: ens3
-                  l2_network_device: admin
-                  interface_model: *interface_model
-                - label: ens4
-                  l2_network_device: private
-                  interface_model: *interface_model
-                - label: ens5
-                  l2_network_device: external
-                  interface_model: *interface_model
-              network_config:
-                ens3:
-                  networks:
-                    - admin
-                ens4:
-                  networks:
-                    - private
-                ens5:
-                  networks:
-                    - external
-
-          - name: {{ HOSTNAME_PRX02 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 1
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: mcp_ubuntu_1604_image
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604_swp
-
-              interfaces:
-                - label: ens3
-                  l2_network_device: admin
-                  interface_model: *interface_model
-                - label: ens4
-                  l2_network_device: private
-                  interface_model: *interface_model
-                - label: ens5
-                  l2_network_device: external
-                  interface_model: *interface_model
-              network_config:
-                ens3:
-                  networks:
-                    - admin
-                ens4:
-                  networks:
-                    - private
-                ens5:
-                  networks:
-                    - external
-
-          - name: {{ HOSTNAME_CMP01 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: cloudimage1604
-                  format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604_swp
-
-
-              interfaces: &all_interfaces
-                - label: ens3
-                  l2_network_device: admin
-                  interface_model: *interface_model
-                - label: ens4
-                  l2_network_device: private
-                  interface_model: *interface_model
-                - label: ens5
-                  l2_network_device: tenant
-                  interface_model: *interface_model
-                - label: ens6
-                  l2_network_device: external
-                  interface_model: *interface_model
-              network_config: &all_network_config
-                ens3:
-                  networks:
-                    - admin
-                ens4:
-                  networks:
-                    - private
-                ens5:
-                  networks:
-                    - tenant
-                ens6:
-                  networks:
-                    - external
-
-          - name: {{ HOSTNAME_CMP02 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: cloudimage1604
-                  format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604_swp
-
-              interfaces: *all_interfaces
-              network_config: *all_network_config
-
-          - name: {{ HOSTNAME_GTW01 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 1
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: cloudimage1604
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604_swp
-
-              interfaces: *all_interfaces
-              network_config: *all_network_config
-
-          - name: {{ HOSTNAME_GTW02 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 1
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: cloudimage1604
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604_swp
-
-              interfaces: *all_interfaces
-              network_config: *all_network_config
-
-          - name: {{ HOSTNAME_GTW03 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 1
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: cloudimage1604
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604_swp
-
-              interfaces: *all_interfaces
-              network_config: *all_network_config
-
-          - name: {{ HOSTNAME_CID01 }}
-            role: salt_minion
-            params:
-              vcpu: {{ os_env('CID_NODE_CPU', 2) }}
-              memory: {{ os_env('CID_NODE_MEMORY', 6144) }}
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: {{ os_env('CID_NODE_VOLUME_SIZE', 150) }}
-                  backing_store: mcp_ubuntu_1604_image
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604_swp
-
-              interfaces: *interfaces
-              network_config: *network_config
-
-          - name: {{ HOSTNAME_CID02 }}
-            role: salt_minion
-            params:
-              vcpu: {{ os_env('CID_NODE_CPU', 2) }}
-              memory: {{ os_env('CID_NODE_MEMORY', 6144) }}
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: {{ os_env('CID_NODE_VOLUME_SIZE', 150) }}
-                  backing_store: mcp_ubuntu_1604_image
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604_swp
-
-              interfaces: *interfaces
-              network_config: *network_config
-
-          - name: {{ HOSTNAME_CID03 }}
-            role: salt_minion
-            params:
-              vcpu: {{ os_env('CID_NODE_CPU', 2) }}
-              memory: {{ os_env('CID_NODE_MEMORY', 6144) }}
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: {{ os_env('CID_NODE_VOLUME_SIZE', 150) }}
-                  backing_store: mcp_ubuntu_1604_image
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604_swp
-
-              interfaces: *interfaces
-              network_config: *network_config
-
-          - name: {{ HOSTNAME_KMN01 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 1
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: mcp_ubuntu_1604_image
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604_swp
-
-              interfaces: *interfaces
-              network_config: *network_config
-
-          - name: {{ HOSTNAME_KMN02 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 1
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: mcp_ubuntu_1604_image
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604_swp
-
-              interfaces: *interfaces
-              network_config: *network_config
-
-          - name: {{ HOSTNAME_KMN03 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 1
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: mcp_ubuntu_1604_image
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604_swp
-
-              interfaces: *interfaces
-              network_config: *network_config
-
-          - name: {{ HOSTNAME_CMN01 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 1
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: cloudimage1604
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604_swp
-
-              interfaces: *interfaces
-              network_config: *network_config
-
-          - name: {{ HOSTNAME_CMN02 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 1
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: cloudimage1604
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604_swp
-
-              interfaces: *interfaces
-              network_config: *network_config
-
-          - name: {{ HOSTNAME_CMN03 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 1
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: cloudimage1604
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604_swp
-
-              interfaces: *interfaces
-              network_config: *network_config
-
-          - name: {{ HOSTNAME_OSD01 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 1
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: cloudimage1604
-                  format: qcow2
-                - name: ceph_osd
-                  capacity: 50
-                  format: qcow2
-                - name: ceph_journal
-                  capacity: 50
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604_swp
-
-              interfaces: *interfaces
-              network_config: *network_config
-
-          - name: {{ HOSTNAME_OSD02 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 1
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: cloudimage1604
-                  format: qcow2
-                - name: ceph_osd
-                  capacity: 50
-                  format: qcow2
-                - name: ceph_journal
-                  capacity: 50
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604_swp
-
-              interfaces: *interfaces
-              network_config: *network_config
-
-          - name: {{ HOSTNAME_OSD03 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 1
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: cloudimage1604
-                  format: qcow2
-                - name: ceph_osd
-                  capacity: 50
-                  format: qcow2
-                - name: ceph_journal
-                  capacity: 50
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604_swp
-
-              interfaces: *interfaces
-              network_config: *network_config
-
-          - name: {{ HOSTNAME_RGW01 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 1
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: cloudimage1604
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604_swp
-
-              interfaces: *interfaces
-              network_config: *network_config
-
-          - name: {{ HOSTNAME_RGW02 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 1
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: cloudimage1604
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604_swp
-
-              interfaces: *interfaces
-              network_config: *network_config
-
-          - name: {{ HOSTNAME_RGW03 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 1
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: cloudimage1604
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604_swp
-
-              interfaces: *interfaces
-              network_config: *network_config
-
-          - name: {{ HOSTNAME_DNS01 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 1
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: cloudimage1604
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604_swp
-
-              interfaces: *interfaces
-              network_config: *network_config
-
-          - name: {{ HOSTNAME_DNS02 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 1
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: cloudimage1604
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604_swp
-
-              interfaces: *interfaces
-              network_config: *network_config
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-bm-k8s-contrail40-maas.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-bm-k8s-contrail40-maas.yaml
deleted file mode 100644
index 0943346..0000000
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-bm-k8s-contrail40-maas.yaml
+++ /dev/null
@@ -1,55 +0,0 @@
-{% from 'cookied-model-generator/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-model-generator/underlay.yaml' import DOMAIN_NAME with context %}
-
-{% set LAB_CONFIG_NAME = 'cookied-cicd-bm-k8s-contrail40-maas' %}
-# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
-{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME','cookied-cicd-bm-k8s-contrail40-maas') %}
-# Path to the context files used to render Cluster and Environment models
-{%- set CLUSTER_CONTEXT_NAME = 'salt-context-cookiecutter-k8s-contrail.yaml' %}
-{%- set ENVIRONMENT_CONTEXT_NAMES = ['salt-context-environment.yaml','lab04-physical-inventory.yaml','salt-context-cookiecutter-k8s-contrail.yaml'] %}
-{%- set CONTROL_VLAN = os_env('CONTROL_VLAN', '2410') %}
-{%- set TENANT_VLAN = os_env('TENANT_VLAN', '2411') %}
-
-{%- set IPMI_USER = os_env('IPMI_USER', 'mcp-qa') %}
-{%- set IPMI_PASS = os_env('IPMI_PASS', 'password') %}
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CFG01) }}
-
-{{ SHARED.MACRO_INSTALL_FORMULAS_FROM_UPDATE() }}
-
-{{ SHARED.MACRO_INSTALL_FORMULAS('\*') }}
-
-{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL(CONTROL_VLAN=CONTROL_VLAN, TENANT_VLAN=TENANT_VLAN) }}
-
-{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
-
-- description: Temporary WR for correct bridge name according to envoronment templates
-  cmd: |
-    sed -i 's/br\-ctl/br\_ctl/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
-    sed -i 's/br\-mgm/br\_mgm/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-{{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}
-
-- description: Temporary WR for correct bridge name according to envoronment templates
-  cmd: |
-    sed -i 's/==IPMI_USER==/${_param:power_user}/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/maas_machines.yml;
-    sed -i 's/==IPMI_PASS==/${_param:power_password}/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/maas_machines.yml;
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-- description: "Add user/password for IPMI access"
-  cmd: |
-    set -e;
-    set -x;
-    . /root/venv-reclass-tools/bin/activate;
-    reclass-tools add-key parameters._param.power_user {{ IPMI_USER }} /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/maas_machines.yml;
-    reclass-tools add-key parameters._param.power_password {{ IPMI_PASS }} /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/maas_machines.yml;
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-bm-os-contrail40-maas.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-bm-os-contrail40-maas.yaml
deleted file mode 100644
index 951d6fa..0000000
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-bm-os-contrail40-maas.yaml
+++ /dev/null
@@ -1,55 +0,0 @@
-{% from 'cookied-model-generator/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-model-generator/underlay.yaml' import DOMAIN_NAME with context %}
-
-{% set LAB_CONFIG_NAME = 'cookied-cicd-bm-os-contrail40-maas' %}
-# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
-{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME','cookied-cicd-bm-os-contrail40-maas') %}
-# Path to the context files used to render Cluster and Environment models
-{%- set CLUSTER_CONTEXT_NAME = 'salt-context-cookiecutter-contrail.yaml' %}
-{%- set ENVIRONMENT_CONTEXT_NAMES = ['salt-context-environment.yaml','lab04-physical-inventory.yaml', 'salt-context-cookiecutter-contrail.yaml'] %}
-{%- set CONTROL_VLAN = os_env('CONTROL_VLAN', '2422') %}
-{%- set TENANT_VLAN = os_env('TENANT_VLAN', '2423') %}
-
-{%- set IPMI_USER = os_env('IPMI_USER', 'mcp-qa') %}
-{%- set IPMI_PASS = os_env('IPMI_PASS', 'password') %}
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CFG01) }}
-
-{{ SHARED.MACRO_INSTALL_FORMULAS_FROM_UPDATE() }}
-
-{{ SHARED.MACRO_INSTALL_FORMULAS('\*') }}
-
-{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL(CONTROL_VLAN=CONTROL_VLAN, TENANT_VLAN=TENANT_VLAN) }}
-
-{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
-
-- description: Temporary WR for correct bridge name according to envoronment templates
-  cmd: |
-    sed -i 's/br\-ctl/br\_ctl/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
-    sed -i 's/br\-mgm/br\_mgm/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-{{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}
-
-- description: Defining username and password params for IPMI access
-  cmd: |
-    sed -i 's/==IPMI_USER==/${_param:power_user}/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/maas_machines.yml;
-    sed -i 's/==IPMI_PASS==/${_param:power_password}/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/maas_machines.yml;
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-- description: "Add user/password for IPMI access"
-  cmd: |
-    set -e;
-    set -x;
-    . /root/venv-reclass-tools/bin/activate;
-    reclass-tools add-key parameters._param.power_user {{ IPMI_USER }} /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/maas_machines.yml;
-    reclass-tools add-key parameters._param.power_password {{ IPMI_PASS }} /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/maas_machines.yml;
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-bm-queens-contrail-maas.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-bm-queens-contrail-maas.yaml
deleted file mode 100644
index 0ff7c82..0000000
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-bm-queens-contrail-maas.yaml
+++ /dev/null
@@ -1,70 +0,0 @@
-{% from 'cookied-model-generator/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-model-generator/underlay.yaml' import DOMAIN_NAME with context %}
-
-{% set LAB_CONFIG_NAME = 'cookied-cicd-bm-queens-contrail-maas' %}
-# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
-{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME','cookied-cicd-bm-queens-contrail-maas') %}
-# Path to the context files used to render Cluster and Environment models
-{%- set CLUSTER_CONTEXT_NAME = 'salt-context-cookiecutter-contrail.yaml' %}
-{%- set ENVIRONMENT_CONTEXT_NAMES = ['salt-context-environment.yaml','lab04-physical-inventory.yaml', 'salt-context-cookiecutter-contrail.yaml'] %}
-{%- set CONTROL_VLAN = os_env('CONTROL_VLAN', '2422') %}
-{%- set TENANT_VLAN = os_env('TENANT_VLAN', '2423') %}
-
-{%- set IPMI_USER = os_env('IPMI_USER', 'mcp-qa') %}
-{%- set IPMI_PASS = os_env('IPMI_PASS', 'password') %}
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CFG01) }}
-
-{{ SHARED.MACRO_INSTALL_FORMULAS_FROM_UPDATE() }}
-
-{{ SHARED.MACRO_INSTALL_FORMULAS('\*') }}
-
-{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL(CONTROL_VLAN=CONTROL_VLAN, TENANT_VLAN=TENANT_VLAN) }}
-
-{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
-
-- description: Temporary WR for cinder backend defined by default in reclass.system
-  cmd: |
-    sed -i 's/backend\:\ {}//g' /srv/salt/reclass/classes/system/cinder/control/cluster.yml;
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Temporary WR for correct bridge name according to envoronment templates
-  cmd: |
-    sed -i 's/br\-ctl/br\_ctl/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
-    sed -i 's/br\-mgm/br\_mgm/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-{{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}
-
-- description: Defining username and password params for IPMI access
-  cmd: |
-    sed -i 's/==IPMI_USER==/${_param:power_user}/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/maas_machines.yml;
-    sed -i 's/==IPMI_PASS==/${_param:power_password}/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/maas_machines.yml;
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-- description: "Add user/password for IPMI access"
-  cmd: |
-    set -e;
-    set -x;
-    . /root/venv-reclass-tools/bin/activate;
-    reclass-tools add-key parameters._param.power_user {{ IPMI_USER }} /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/maas_machines.yml;
-    reclass-tools add-key parameters._param.power_password {{ IPMI_PASS }} /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/maas_machines.yml;
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Temporary workaround !! Fix or debug
-  cmd: |
-    sed -i 's/pg_num: 128/pg_num: 4/g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/ceph/setup.yml;
-    sed -i 's/pgp_num: 128/pgp_num: 4/g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/ceph/setup.yml;
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: true
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-k8s-calico-sl.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-k8s-calico-sl.yaml
deleted file mode 100644
index 5da87d1..0000000
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-k8s-calico-sl.yaml
+++ /dev/null
@@ -1,25 +0,0 @@
-{% from 'cookied-model-generator/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-model-generator/underlay.yaml' import DOMAIN_NAME with context %}
-
-{% set LAB_CONFIG_NAME = 'cookied-cicd-k8s-calico-sl' %}
-# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
-{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME', LAB_CONFIG_NAME) %}
-# Path to the context files used to render Cluster and Environment models
-{%- set CLUSTER_CONTEXT_NAME = 'cookiecutter-context-k8s-sl.yaml' %}
-{%- set ENVIRONMENT_CONTEXT_NAMES = ['environment-context-k8s-sl.yaml', 'cookiecutter-context-k8s-sl.yaml'] %}
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CFG01) }}
-
-{{ SHARED.MACRO_INSTALL_FORMULAS_FROM_UPDATE() }}
-
-{{ SHARED.MACRO_INSTALL_FORMULAS('\*') }}
-
-{{ SHARED.MACRO_UPLOAD_AND_IMPORT_GPG_ENCRYPTION_KEY() }}
-
-{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL() }}
-
-{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
-
-{{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-k8s-genie.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-k8s-genie.yaml
deleted file mode 100644
index 4233f9c..0000000
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-k8s-genie.yaml
+++ /dev/null
@@ -1,23 +0,0 @@
-{% from 'cookied-model-generator/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-model-generator/underlay.yaml' import DOMAIN_NAME with context %}
-
-{% set LAB_CONFIG_NAME = 'cookied-cicd-k8s-genie' %}
-# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
-{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME', LAB_CONFIG_NAME) %}
-# Path to the context files used to render Cluster and Environment models
-{%- set CLUSTER_CONTEXT_NAME = 'cookiecutter-context-k8s-genie.yaml' %}
-{%- set ENVIRONMENT_CONTEXT_NAMES = ['environment-context-k8s-genie.yaml', 'cookiecutter-context-k8s-genie.yaml'] %}
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CFG01) }}
-
-{{ SHARED.MACRO_INSTALL_FORMULAS_FROM_UPDATE() }}
-
-{{ SHARED.MACRO_INSTALL_FORMULAS('\*') }}
-
-{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL() }}
-
-{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
-
-{{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-k8s-system.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-k8s-system.yaml
deleted file mode 100644
index 4b0c57d..0000000
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-k8s-system.yaml
+++ /dev/null
@@ -1,23 +0,0 @@
-{% from 'cookied-model-generator/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-model-generator/underlay.yaml' import DOMAIN_NAME with context %}
-
-{% set LAB_CONFIG_NAME = 'cookied-cicd-k8s-system' %}
-# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
-{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME', LAB_CONFIG_NAME) %}
-# Path to the context files used to render Cluster and Environment models
-{%- set CLUSTER_CONTEXT_NAME = 'cookiecutter-context-k8s-system.yaml' %}
-{%- set ENVIRONMENT_CONTEXT_NAMES = ['environment-context-k8s-system.yaml', 'cookiecutter-context-k8s-system.yaml'] %}
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CFG01) }}
-
-{{ SHARED.MACRO_INSTALL_FORMULAS_FROM_UPDATE() }}
-
-{{ SHARED.MACRO_INSTALL_FORMULAS('\*') }}
-
-{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL() }}
-
-{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
-
-{{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-ovs-maas.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-ovs-maas.yaml
deleted file mode 100644
index 4d554c4..0000000
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-ovs-maas.yaml
+++ /dev/null
@@ -1,60 +0,0 @@
-{% from 'cookied-model-generator/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-model-generator/underlay.yaml' import DOMAIN_NAME with context %}
-
-# Other salt model repository parameters see in shared-salt.yaml
-{% set LAB_CONFIG_NAME = 'cookied-cicd-ovs-maas' %}
-# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
-{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME','cookied-cicd-ovs-maas') %}
-# Path to the context files used to render Cluster and Environment models
-{%- set CLUSTER_CONTEXT_NAME = 'salt-context-cookiecutter-openstack_ovs.yaml' %}
-{%- set ENVIRONMENT_CONTEXT_NAMES = ['salt-context-vcp-environment.yaml', 'salt-context-environment.yaml','salt-context-cookiecutter-openstack_ovs.yaml'] %}
-{%- set CONTROL_VLAN = os_env('CONTROL_VLAN', '2404') %}
-{%- set TENANT_VLAN = os_env('TENANT_VLAN', '2406') %}
-
-{%- set IPMI_USER = os_env('IPMI_USER', 'mcp-qa') %}
-{%- set IPMI_PASS = os_env('IPMI_PASS', 'password') %}
-
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CFG01) }}
-
-{{ SHARED.MACRO_INSTALL_FORMULAS_FROM_UPDATE() }}
-
-{{ SHARED.MACRO_INSTALL_FORMULAS('\*') }}
-
-{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL(CONTROL_VLAN=CONTROL_VLAN, TENANT_VLAN=TENANT_VLAN) }}
-
-{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
-
-
-- description: Temporary WR for correct bridge name according to envoronment templates
-  cmd: |
-    sed -i 's/br\-ctl/br\_ctl/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
-    sed -i 's/br\-mgm/br\_mgm/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
-    sed -i 's/br\-ctl/br\_ctl/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/gateway.yml;
-    salt '*' saltutil.refresh_pillar;
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-{{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}
-
-- description: Defining username and password params for IPMI access
-  cmd: |
-    sed -i 's/==IPMI_USER==/${_param:power_user}/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/maas_machines.yml;
-    sed -i 's/==IPMI_PASS==/${_param:power_password}/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/maas_machines.yml;
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-- description: "Add user/password for IPMI access"
-  cmd: |
-    set -e;
-    set -x;
-    . /root/venv-reclass-tools/bin/activate;
-    reclass-tools add-key parameters._param.power_user {{ IPMI_USER }} /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/maas_machines.yml;
-    reclass-tools add-key parameters._param.power_password {{ IPMI_PASS }} /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/maas_machines.yml;
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-pike-dvr-ceph.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-pike-dvr-ceph.yaml
deleted file mode 100644
index 4b2b12f..0000000
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-pike-dvr-ceph.yaml
+++ /dev/null
@@ -1,57 +0,0 @@
-{% from 'cookied-model-generator/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-model-generator/underlay.yaml' import DOMAIN_NAME with context %}
-
-{% set LAB_CONFIG_NAME = 'cookied-cicd-pike-dvr-ceph' %}
-# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
-{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME', LAB_CONFIG_NAME) %}
-# Path to the context files used to render Cluster and Environment models
-{%- set CLUSTER_CONTEXT_NAME = 'cookiecutter-context-dvr-ceph.yaml' %}
-{%- set ENVIRONMENT_CONTEXT_NAMES = ['environment_context.yaml', 'cookiecutter-context-dvr-ceph.yaml'] %}
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CFG01) }}
-
-{{ SHARED.MACRO_INSTALL_FORMULAS_FROM_UPDATE() }}
-
-{{ SHARED.MACRO_INSTALL_FORMULAS('\*') }}
-
-{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL() }}
-
-{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
-
-- description: "Workaround for combined roles: remove unnecessary classes"
-  cmd: |
-    set -e;
-    sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
-    sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
-    sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
-    sed -i '/system.salt.control.placement.openstack.compact/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/kvm.yml;
-
-    . /root/venv-reclass-tools/bin/activate;
-    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
-    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
-    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
-
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Temporary workaround !! Fix or debug
-  cmd: |
-    sed -i 's/pg_num: 128/pg_num: 4/g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/ceph/setup.yml;
-    sed -i 's/pgp_num: 128/pgp_num: 4/g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/ceph/setup.yml;
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: true
-
-{{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}
-
-
-
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-queens-dvr-sl.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-queens-dvr-sl.yaml
deleted file mode 100644
index 3264b5c..0000000
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-queens-dvr-sl.yaml
+++ /dev/null
@@ -1,60 +0,0 @@
-{% from 'cookied-model-generator/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-model-generator/underlay.yaml' import DOMAIN_NAME with context %}
-
-{% set LAB_CONFIG_NAME = 'cookied-cicd-queens-dvr-sl' %}
-# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
-{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME', LAB_CONFIG_NAME) %}
-# Path to the context files used to render Cluster and Environment models
-{%- set CLUSTER_CONTEXT_NAME = 'cookiecutter-context-queens-dvr-sl.yaml' %}
-{%- set ENVIRONMENT_CONTEXT_NAMES = ['environment_context.yaml', 'cookiecutter-context-queens-dvr-sl.yaml'] %}
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CFG01) }}
-
-{{ SHARED.MACRO_INSTALL_FORMULAS_FROM_UPDATE() }}
-
-{{ SHARED.MACRO_INSTALL_FORMULAS('\*') }}
-
-{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL() }}
-
-{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
-
-- description: "Workaround for combined roles: remove unnecessary classes"
-  cmd: |
-    set -e;
-    sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
-    sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
-    sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
-    sed -i '/system.salt.control.placement.openstack.compact/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/kvm.yml;
-
-    # salt-call reclass.cluster_meta_set name='openstack_dns_node01_address' value='{{ SHARED.IPV4_NET_CONTROL_PREFIX }}.111' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
-    # salt-call reclass.cluster_meta_set name='openstack_dns_node02_address' value='{{ SHARED.IPV4_NET_CONTROL_PREFIX }}.112' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
-    # Workaround of missing reclass.system for dns role
-    # salt-call reclass.cluster_meta_set name='salt_master_host' value='${_param:infra_config_deploy_address}' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
-
-    . /root/venv-reclass-tools/bin/activate;
-    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
-    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
-    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
-    # reclass-tools del-key parameters.reclass.storage.node.stacklight_log_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
-
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Temporary workaround !! Fix or debug
-  cmd: |
-    sed -i 's/pg_num: 128/pg_num: 4/g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/ceph/setup.yml;
-    sed -i 's/pgp_num: 128/pgp_num: 4/g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/ceph/setup.yml;
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: true
-
-{{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-small-mcp-ironic.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-small-mcp-ironic.yaml
deleted file mode 100644
index c32d229..0000000
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-small-mcp-ironic.yaml
+++ /dev/null
@@ -1,66 +0,0 @@
-{% from 'cookied-model-generator/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-model-generator/underlay.yaml' import DOMAIN_NAME with context %}
-
-{% set LAB_CONFIG_NAME = 'cookied-small-mcp-ironic' %}
-# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
-{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME', LAB_CONFIG_NAME) %}
-# Path to the context files used to render Cluster and Environment models
-{%- set CLUSTER_CONTEXT_NAME = 'cookiecutter-context-small-mcp-ironic.yaml' %}
-{%- set ENVIRONMENT_CONTEXT_NAMES = ['environment-context.yaml', 'cookiecutter-context-small-mcp-ironic.yaml'] %}
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CFG01) }}
-
-{{ SHARED.MACRO_INSTALL_FORMULAS('\*') }}
-
-{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL() }}
-
-{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
-
-- description: "Workaround for combined roles: remove unnecessary classes"
-  cmd: |
-    set -e;
-    sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
-    sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
-    sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
-    sed -i '/system.salt.control.placement.openstack.compact/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/kvm.yml;
-
-    . /root/venv-reclass-tools/bin/activate;
-    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
-    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
-    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
-
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-#- description: "Workaround for using glusterfs on single node"
-#  cmd: |
-#    set -e;
-#    find /srv/salt/reclass/classes/system/glusterfs/ -type f -exec sed -i 's/replica: .*//g' {} +
-#    find /srv/salt/reclass/classes/system/glusterfs/ -type f -exec sed -i 's/.*cluster_node02_address.*//g' {} +
-#    find /srv/salt/reclass/classes/system/glusterfs/ -type f -exec sed -i 's/.*cluster_node03_address.*//g' {} +
-#    find /srv/salt/reclass/classes/system/glusterfs/ -type f -exec sed -i 's/opts: .*/opts: \"defaults,backup-volfile-servers=${_param:glusterfs_node01_address}\"/g' {} +
-#
-# node_name: {{ HOSTNAME_CFG01 }}
-#  retry: {count: 1, delay: 5}
-#  skip_fail: false
-
-#- description: "Workaround for changing services to single mode"
-#  cmd: |
-#    set -e;
-#    sed -i 's/- system.keystone.server.cluster/- system.keystone.server.single/g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
-#    sed -i 's/- system.rabbitmq.server.cluster/- system.rabbitmq.server.single/g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/message_queue.yml;
-#
-#  node_name: {{ HOSTNAME_CFG01 }}
-#  retry: {count: 1, delay: 5}
-#  skip_fail: false
-
-{{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}
diff --git a/tcp_tests/templates/cookied-small-mcp-ironic/cookiecutter-context-small-mcp-ironic.yaml b/tcp_tests/templates/cookied-small-mcp-ironic/cookiecutter-context-small-mcp-ironic.yaml
deleted file mode 100644
index 106b0a9..0000000
--- a/tcp_tests/templates/cookied-small-mcp-ironic/cookiecutter-context-small-mcp-ironic.yaml
+++ /dev/null
@@ -1,324 +0,0 @@
-default_context:
-  alertmanager_notification_email_enabled: 'False'
-  auditd_enabled: 'False'
-  backend_network_netmask: ''
-  backup_private_key: '-----BEGIN RSA PRIVATE KEY-----
-
-    MIIEpAIBAAKCAQEA0YjCIcsHq6Jmai5de19wOERdbMMP1kvaHAK2j5tCiywJrmsN
-
-    Y+nzGzFKqUiuW/HlvHCY6UNdlDyz6H7WdhzmRoFgVIzWGWR1rJk70D2JdN+QrKhA
-
-    BqAhJ/zIOn0btx3y30VaIHAR6V9P6sezYLtaQAP9LLuHV+9tJH0dY3sisYDd9PdS
-
-    Hn2o0CYpe2Ojt1vaKhhIbtzxlaXM4XelxVbaahlIyzW6ooXYL5zwccG+g11T0R9z
-
-    6YNym+pI1arFjOTobeEiCkDUX+oFd/ICm3ntt6LP69gZKcGSOFB/8n17nBZfmqQf
-
-    puSwZKqwp6pca4VaT2uJx0jW9mBbMgyhoyKuPwIDAQABAoIBAQCAxfCxPgrfw7X3
-
-    ablP+i6IlhNopVTVWdaYwl4aUcwKpizpAafAEOa0fo2pDh8CKN8M+ZIwQZOAZ/IV
-
-    X+ZDvq0TBa4DKG7oOiJLyfzFlRwmNMPAKML4j27xGVyg/YSL/J7n8sJaDyYi6828
-
-    t7CZMWtczlbJKBMcyuujsjTej62ZskAz9S9LC3A8ppLYe2/8WUZueLehXVLfy3rO
-
-    c/7LU+zQ9kcP/nEuFgMYzcLxI8bJligI5JdtXL5baK3DX/9UsWzDouHePCCYo07k
-
-    xInodc9WCHKeAriV6qCctOm6TIhB30hDNQl+rnF2c+Ead5hyP1UneUW+8D8RSxe7
-
-    CT27o3IpAoGBAP8e4N+dbUxAAlRx+20Dgad7/g/zBb+HJyUIsmpxF5RebJNLp03o
-
-    8bOS1we/lS1HIQFolveR/pcoWowJUpDkPJLXC8Wnfjs5NvKRsqV5OLp+m9DynQ1y
-
-    xZmTfHJV4aluZvUd5Azw0lOdAgNu97fLS4IYS6hRtuEncSwWbDHIN9GlAoGBANJB
-
-    p2Z4h49XJ0YigUw7S/VyuU9vSA6nLQwehBMyAl6hmvnCg3DB8sNuOQYZcqr+aUyc
-
-    gicyiEwvwW8Qbm837eqv/8CJSkfQq/8JVg6F4vRweNI5eYv2N/ZInmSn1opYzqEd
-
-    J4TFalwwgUqbLer+dhCjfcn6mdkRyrnW1GepvXMTAoGBAJcJIdpg6gcdUgPKYy5b
-
-    yBNuna+1kW6dRfhv2KiZgXsuF5twS4EdapKEWVdV/awLkyexUscIoK++jTSghEgR
-
-    RycrtuVyTpIjQjuDND8wr/wA3qBqMb53dzJ/lUpfO7TCDqQI6S0cGXi02C9OL8uY
-
-    yIAhgBELJ3jOj/qo367tONadAoGAKz8l14XjHDCo+1wNjEiGDy5Rv2z8PUdVlLCS
-
-    KgCXTC+hWM4RixfZfykkwYRqeZFqxz9J5hYWwtTvzJBspqOyZBtfV5LlnG/ncMXS
-
-    1ZnkXLLlpxf7UDaMvDOjoMCE+F/b4HfGsSCKB/xSG65fe35renCmZu0MyAFI2YC+
-
-    n7PiK9ECgYAZ/9KBk0FQmOpc13+oqHyMPOUHPnfFkmVVJgaJQkLWst3x6+Mx0uQW
-
-    KFddR3UNh8V+oOP/WujT85WtueM2E3M4/C+koeVlaDFh8g4qglnpSuT4CTqLpmPb
-
-    KYWKD0IElw7/4ny4VTTMe6KbnDV0A154tFNvsTX6ELvy4V8OFuPfnQ==
-
-    -----END RSA PRIVATE KEY-----'
-  backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDRiMIhyweromZqLl17X3A4RF1sww/WS9ocAraPm0KLLAmuaw1j6fMbMUqpSK5b8eW8cJjpQ12UPLPoftZ2HOZGgWBUjNYZZHWsmTvQPYl035CsqEAGoCEn/Mg6fRu3HfLfRVogcBHpX0/qx7Ngu1pAA/0su4dX720kfR1jeyKxgN3091IefajQJil7Y6O3W9oqGEhu3PGVpczhd6XFVtpqGUjLNbqihdgvnPBxwb6DXVPRH3Ppg3Kb6kjVqsWM5Oht4SIKQNRf6gV38gKbee23os/r2BkpwZI4UH/yfXucFl+apB+m5LBkqrCnqlxrhVpPa4nHSNb2YFsyDKGjIq4/
-  barbican_backend: dogtag
-  barbican_enabled: 'False'
-  bmk_enabled: 'False'
-  ceph_enabled: 'False'
-  cfg_failsafe_ssh_public_key: '1'
-  cicd_enabled: 'False'
-  cicd_private_key: '-----BEGIN RSA PRIVATE KEY-----
-
-    MIIEpAIBAAKCAQEA0EnXqmZqVb+/ebPURO9wb3A8ntUfQmQOC1WNJv/hU7XathNA
-
-    kDmDSMCn9a7m7WbuANpVhZkmstebNgHFoCtGSL4JJYRrBB5QKN0QrpFKiGaetCE2
-
-    eGDAmvvFx9hgbQUwtuJvAhUg4sSq6WY0yblcID3fLX4YKuDVvS8fisB6i1xrQ/tQ
-
-    qEF8S5rsoXUigYnZZwpnkRgSuRtECY8OFSXH4sv+JPEVhrx5GD7CtwVuYliIg1mw
-
-    nF7J5X9wPNmNnm8g4Nz07zpA2FMoF5+QuPZrV7t3Xm2hcSmMZbsGG4zy2pqbwDvR
-
-    5FgQLfaPBYcqrrIr/kCnCajDzpZdBxIjdXPK9QIDAQABAoIBAQCEiVhIP58OLVND
-
-    t+AsdUfQQVTxpJ+deVlOcQO+ezgAMkmXbiy5GT+mvaCivsaG7eYoeMXOmyN6zaMf
-
-    /ISqZJ72jqX3T4lhN4C+X9zLl/wbS2FVMYUdmEM221qAzfidpp3W4cLNSnCAm7A9
-
-    kCGq8t3iTjyDECeEsUiQdznU6qGPpvqRC9E2wlatbUowYT8VSbtc9aDGkZNMnZAP
-
-    ypBzGQOhIIIs3V3m0XqF5dsqxq+IjZmBjkJ8TBEyRre+Hu25r4ksQk42Qw8Lj1yI
-
-    W/+XTJiI04XLbCubeknQuTy3baku1i58gEVuJcYdeC3pCF4nu1PvBRxgVE1TU1xq
-
-    mIL2rBrJAoGBAPjSOvM/XmBfwW3znJ7xknDoLFq2yoI5bAr0ehr0/VLbplAybY1b
-
-    6mWcpiWcNPnCwAUXUjI8FRklQwMrCNvdXBlixZM5Au1Bsg1JjuYrQScc8dRFDWH5
-
-    8YDFxrR9ijFkYmhTHOMfm2vk5BxaOgIvAwv5XN43Li1nKAjlwU3euPZTAoGBANZM
-
-    PbSHJ3Y8llOWNwFqqYC9VGVAC5K+kKKmJmUKBluZpHyYYwdGqF+ItM8LzLilW/jj
-
-    CZF9KU4lwovbTHDsvOvYPJFO+nVfXYpiqFG95X5w9L1qnfLhWNfbNjp1JgzOadGb
-
-    RIPKktjjJEde9ShNPRfHWRzgxAvj+57moq3v64CXAoGAAlyMbq6VSLjf7xk2zVNh
-
-    nmy1rw65EU9WNSxo1ESq+tRW2cAAxiyvJtO7x/OZlR1CEUpNX2iukpSF9Eu+Q8fw
-
-    DdWgJmuOGY5cEEA4ePrEHYjqyqb1H47tudkmr6PZYeqf/Hl9drJgGUAM4jABCPBF
-
-    SSHOvdUsPQYTnTIBCaopez0CgYEAsj9YVADXYhGjOIOJ3TPLKbpRqKZM+hJoW+G3
-
-    rfNYtnhlyP034aV0B4K+Yjl+X1er2KmAG/Pvl4DxAUO3oXZI7iM+vd7jlR1twN2v
-
-    A87gRPvPln5IQu4N5/3+bUFkFOLcQezUYIru+lh0pKjnYk5cspquFMcgSoOnl9Rf
-
-    HC6jxKECgYBxh0hH+vmudh36zuhqghZSI+DLd6WHpzpnnQeSkI2sHPB7EGTZ8+Ql
-
-    Rykt8XGiZvkuc9geH5Sc6aIKJr7WWTxgwmhj7T6iBHLFLyGfcAvUAGcLMOnjNFcL
-
-    qEaNVOfzXB9ZBN1h8wRbxoKx+o2c78agrQyetEyiz7wkYFQKj8xq4Q==
-
-    -----END RSA PRIVATE KEY-----'
-  cicd_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDQSdeqZmpVv795s9RE73BvcDye1R9CZA4LVY0m/+FTtdq2E0CQOYNIwKf1rubtZu4A2lWFmSay15s2AcWgK0ZIvgklhGsEHlAo3RCukUqIZp60ITZ4YMCa+8XH2GBtBTC24m8CFSDixKrpZjTJuVwgPd8tfhgq4NW9Lx+KwHqLXGtD+1CoQXxLmuyhdSKBidlnCmeRGBK5G0QJjw4VJcfiy/4k8RWGvHkYPsK3BW5iWIiDWbCcXsnlf3A82Y2ebyDg3PTvOkDYUygXn5C49mtXu3debaFxKYxluwYbjPLampvAO9HkWBAt9o8Fhyqusiv+QKcJqMPOll0HEiN1c8r1
-  cluster_domain: small_cloud.local
-  cluster_name: small_cloud
-  compute_bond_mode: active-backup
-  compute_padding_with_zeros: 'True'
-  compute_primary_first_nic: eth1
-  compute_primary_second_nic: eth2
-  context_seed: 88zA67wwzN74hI8Vzpy7CCEDXPGfKGUv37965C5bKeIZM8436V73PhAT9yaLDUYV0xj8zpidxbmh0FMN83dzNWAA
-  control_network_netmask: 255.255.255.0
-  control_network_subnet: ==IPV4_NET_CONTROL_PREFIX==.0/24
-  control_vlan: '10'
-  cookiecutter_template_branch: ''
-  cookiecutter_template_credentials: gerrit
-  cookiecutter_template_url: ssh://gerrit.mcp.mirantis.net:29418/mk/cookiecutter-templates.git
-  deploy_network_gateway: ==IPV4_NET_ADMIN_PREFIX==.1
-  deploy_network_netmask: 255.255.255.0
-  deploy_network_subnet: ==IPV4_NET_ADMIN_PREFIX==.0/24
-  deployment_type: physical
-  designate_backend: powerdns
-  designate_enabled: 'False'
-  openstack_create_public_network: 'True'
-  openstack_public_neutron_subnet_gateway: ==IPV4_NET_EXTERNAL_PREFIX==.129
-  openstack_public_neutron_subnet_cidr: ==IPV4_NET_EXTERNAL_PREFIX==.128/26
-  openstack_public_neutron_subnet_allocation_start: ==IPV4_NET_EXTERNAL_PREFIX==.170
-  openstack_public_neutron_subnet_allocation_end: ==IPV4_NET_EXTERNAL_PREFIX==.190
-  dns_server01: 172.18.176.6
-  dns_server02: 172.18.208.44
-  email_address: sgudz@mirantis.com
-  gainsight_service_enabled: 'False'
-  galera_ssl_enabled: 'False'
-  gateway_primary_first_nic: eth1
-  gateway_primary_second_nic: eth2
-  gnocchi_aggregation_storage: file
-  infra_bond_mode: active-backup
-  infra_deploy_nic: eth0
-  infra_kvm01_control_address: ==IPV4_NET_CONTROL_PREFIX==.11
-  infra_kvm01_hostname: kvm01
-  infra_kvm02_control_address: ==IPV4_NET_CONTROL_PREFIX==.12
-  infra_kvm02_hostname: kvm02
-  infra_kvm03_control_address: ==IPV4_NET_CONTROL_PREFIX==.13
-  infra_kvm03_hostname: kvm03
-  infra_kvm_vip_address: ==IPV4_NET_CONTROL_PREFIX==.10
-  infra_primary_first_nic: eth1
-  infra_primary_second_nic: eth2
-  internal_proxy_enabled: 'False'
-  ironic_enabled: 'True'
-  kubernetes_ctl_on_kvm: 'False'
-  kubernetes_enabled: 'False'
-  local_repositories: 'False'
-  maas_enabled: 'False'
-  manila_enabled: 'False'
-  manila_lvm_devices: /dev/sdb,/dev/sdc
-  manila_lvm_volume_name: manila-volume
-  manila_share_backend: lvm
-  mcp_common_scripts_branch: ''
-  mcp_version: 2019.2.0
-  motd_company_name: MirantisTestDeployment
-  no_platform: 'False'
-  nova_vnc_tls_enabled: 'False'
-  octavia_private_key: '-----BEGIN RSA PRIVATE KEY-----
-
-    MIIEpAIBAAKCAQEAuiHjkWRkhIWdhiRPqdE/rYIbcYsMkDSIZrlu0yzicGBxyNOH
-
-    qwzW48zQr5y/q4HaaPC5HB4LGylnEnyM4vSII+kiT9E8T1sr/XNi9GqYonPVEDUu
-
-    aE8EmZ1RnVppTqpkbGpRnF6GmBtGSdthM3pYt97/UeaviFJye3G3tz47pYwXND4j
-
-    6maElcSoUEntpHkJ5esBy+G3yr2Y9mF9EOV6ZNxN1jIc2ufxFTQruhqxx8ug1EWF
-
-    9JlEByfML6gwHq3FgSz2MHWX+to+LRJALv5KY4UAworAPzafY4/557c6ggqvvakk
-
-    wqju59z5QWqBV8Vu+30VTdbQd8xsnYlPdAUziwIDAQABAoIBAAkF5YtBy6Gk/Irp
-
-    Lbd0vlqB6SSq8fP3p/ka2iWAkoWSF1H99yCyoaQvZJpuxdfhlp3ptX+IcU9ixW1e
-
-    /lFuOz9KaPBnmz3ZlJtXwZ9Jn5aYeOatPU+vLPDpcHBpVpuKpQ2gZc5nwqyS/ehM
-
-    qzycnLplJAlrXm0EWipjy90lNi0DNV57hpaKKH5Ti/UGQnGTAfpFXf/YMu4PVVFj
-
-    GHx9VyTAX7dM1yRHMIF4/3qFAnbig8erCeRAEeUhLjMs6T1KidgBltyr7dz4mYoX
-
-    3MdgwTL4rnBCFFopN8vwcK+7bkj/6D1g2a6RnExa6ZB4QJBf28iBfn+i7qoNvt+1
-
-    iwgo9DECgYEA3IpFCAOMYiHagRrv3jTT0qKgCYBdt1La2Sjj7hCZUYJBp1it5zjr
-
-    VbT2J4xH38fr8OflEwcdpJnOVPKQA3AgRrixF8wG5WxIaDxBruOYE6QoqWSwnNYJ
-
-    egcKIDHbZH0C9y+P45IGbHEUEKJIdtSHo6uJYEQ+JSJdlR/BuTQDCFkCgYEA2A9Z
-
-    oxu7gwc1y01Y0FskU0guLm1XyDQEUf7uyFOMu7Fou/Nb4mKLVH5WmM5MQSBt77ow
-
-    yBOZliT7ucSzQYkZ7xENAJ4QNzLAsQ9nBOgAjpnSCM6KZXdklntdl0yc5vy3CrkK
-
-    QxcRURJdvrCNh+yGON7gh2tN3hmHldz9mIe5noMCgYBZL5WBCyZPXQ9nHISaYNS0
-
-    ns2HaiD8DQaxwUaDtLS78KgqS8Nv+WW6MEdHcQRz6/5vWugZjMwhD44QblNtAIAc
-
-    +X2sePbk+qhBdvS9DA8pCj7jWfPOSQRmE8i1glQNzDxRyCsxRZFRc11A6M/TNllw
-
-    B+OzSYW0MLSVpNUL14vOaQKBgQCx8IJsOKzHjqqZEsxwXH+gLzQJfHftJBnixcg2
-
-    J8kh00xkblpgKbSCE8KR+mUGxgSEiJ0gSjQVWcQPDJZtBNLc9vf0aDkGnL/hksPP
-
-    YJsE1l4Kbr/ALQIkhNlhf/FAsyS0qgxvkJHsaOnX4GPPa9ZnA/d6z77DidcGb4hT
-
-    lIQkrQKBgQCMKuX59yLIOEHgiaBNnM1/nunSyinlTbEU7mJuYKPdScJ8qi/CouN8
-
-    UBeSH9OEKUDqxj1V8BeHKCPcHrLRwTsYONpR+6VQ2n0Y7UUHwz4ZY+ljAUHhD/4B
-
-    d6GOUmhxa7PCcr2g4RwsGyDwvwm7fyQb0cCPW0aLeiLeVsdUBd5gbg==
-
-    -----END RSA PRIVATE KEY-----'
-  octavia_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC6IeORZGSEhZ2GJE+p0T+tghtxiwyQNIhmuW7TLOJwYHHI04erDNbjzNCvnL+rgdpo8LkcHgsbKWcSfIzi9Igj6SJP0TxPWyv9c2L0apiic9UQNS5oTwSZnVGdWmlOqmRsalGcXoaYG0ZJ22Ezeli33v9R5q+IUnJ7cbe3PjuljBc0PiPqZoSVxKhQSe2keQnl6wHL4bfKvZj2YX0Q5Xpk3E3WMhza5/EVNCu6GrHHy6DURYX0mUQHJ8wvqDAercWBLPYwdZf62j4tEkAu/kpjhQDCisA/Np9jj/nntzqCCq+9qSTCqO7n3PlBaoFXxW77fRVN1tB3zGydiU90BTOL
-  offline_deployment: 'False'
-  opencontrail_compute_iface_mask: '24'
-  opencontrail_enabled: 'False'
-  openscap_enabled: 'False'
-  openssh_groups: ''
-  openstack_benchmark_node01_address: ==IPV4_NET_CONTROL_PREFIX==.95
-  openstack_benchmark_node01_hostname: bmk01
-  openstack_cluster_size: compact
-  openstack_compute_control_address_ranges: ==IPV4_NET_CONTROL_PREFIX==.101-==IPV4_NET_CONTROL_PREFIX==.200
-  openstack_compute_count: '100'
-  openstack_compute_rack01_hostname: cmp
-  openstack_compute_tenant_address_ranges: ==IPV4_NET_TENANT_PREFIX==.101-==IPV4_NET_TENANT_PREFIX==.200
-  openstack_control_address: ==IPV4_NET_CONTROL_PREFIX==.10
-  openstack_control_hostname: ctl
-  openstack_control_node01_address: ==IPV4_NET_CONTROL_PREFIX==.11
-  openstack_control_node01_hostname: ctl01
-  openstack_control_node02_address: ==IPV4_NET_CONTROL_PREFIX==.12
-  openstack_control_node02_hostname: ctl02
-  openstack_control_node03_address: ==IPV4_NET_CONTROL_PREFIX==.13
-  openstack_control_node03_hostname: ctl03
-  openstack_database_address: ==IPV4_NET_CONTROL_PREFIX==.10
-  openstack_database_hostname: ctl
-  openstack_database_node01_address: ==IPV4_NET_CONTROL_PREFIX==.11
-  openstack_database_node01_hostname: ctl01
-  openstack_database_node02_address: ==IPV4_NET_CONTROL_PREFIX==.12
-  openstack_database_node02_hostname: ctl02
-  openstack_database_node03_address: ==IPV4_NET_CONTROL_PREFIX==.13
-  openstack_database_node03_hostname: ctl03
-  openstack_enabled: 'True'
-  openstack_gateway_node01_address: ==IPV4_NET_CONTROL_PREFIX==.224
-  openstack_gateway_node01_hostname: gtw01
-  openstack_gateway_node01_tenant_address: ==IPV4_NET_TENANT_PREFIX==.6
-  openstack_gateway_node02_address: ==IPV4_NET_CONTROL_PREFIX==.225
-  openstack_gateway_node02_hostname: gtw02
-  openstack_gateway_node02_tenant_address: ==IPV4_NET_TENANT_PREFIX==.7
-  openstack_gateway_node03_address: ==IPV4_NET_CONTROL_PREFIX==.226
-  openstack_gateway_node03_hostname: gtw03
-  openstack_gateway_node03_tenant_address: ==IPV4_NET_TENANT_PREFIX==.8
-  openstack_internal_protocol: http
-  openstack_memcache_security_enabled: 'False'
-  openstack_message_queue_address: ==IPV4_NET_CONTROL_PREFIX==.10
-  openstack_message_queue_hostname: ctl
-  openstack_message_queue_node01_address: ==IPV4_NET_CONTROL_PREFIX==.11
-  openstack_message_queue_node01_hostname: ctl01
-  openstack_message_queue_node02_address: ==IPV4_NET_CONTROL_PREFIX==.12
-  openstack_message_queue_node02_hostname: ctl02
-  openstack_message_queue_node03_address: ==IPV4_NET_CONTROL_PREFIX==.13
-  openstack_message_queue_node03_hostname: ctl03
-  openstack_network_engine: ovs
-  openstack_neutron_bgp_vpn: 'False'
-  openstack_neutron_bgp_vpn_driver: bagpipe
-  openstack_neutron_qos: 'False'
-  openstack_neutron_vlan_aware_vms: 'False'
-  openstack_nfv_dpdk_enabled: 'False'
-  openstack_nfv_sriov_enabled: 'False'
-  openstack_nova_compute_nfv_req_enabled: 'False'
-  openstack_nova_compute_reserved_host_memory_mb: '900'
-  openstack_octavia_enabled: 'False'
-  openstack_ovs_dvr_enabled: 'False'
-  openstack_ovs_encapsulation_type: vxlan
-  openstack_proxy_address: ==IPV4_NET_CONTROL_PREFIX==.80
-  openstack_proxy_hostname: prx
-  openstack_proxy_node01_address: ==IPV4_NET_CONTROL_PREFIX==.81
-  openstack_proxy_node01_hostname: prx01
-  openstack_proxy_node02_address: ==IPV4_NET_CONTROL_PREFIX==.82
-  openstack_proxy_node02_hostname: prx02
-  openstack_upgrade_node01_address: ==IPV4_NET_CONTROL_PREFIX==.19
-  openstack_version: queens
-  platform: openstack_enabled
-  publication_method: email
-  rabbitmq_ssl_enabled: 'False'
-  reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
-  salt_api_password: 8UWALgLbADCp18pAj8hCIzJE2ZWPXgK7E23n8W44ji0A
-  salt_api_password_hash: $6$FaHYNcaGIBJF$n4hvLCOhR0/IrbPVgWCfddYilsmXF8T1hj38VJ2auL5Y8DdY2TG2/wc6KNCivYe8uQ68L0keoDfcu1eio.WbS1
-  salt_master_address: ==IPV4_NET_CONTROL_PREFIX==.15
-  salt_master_hostname: cfg01
-  salt_master_management_address: ==IPV4_NET_ADMIN_PREFIX==.15
-  secrets_encryption_enabled: 'False'
-  secrets_encryption_private_key: '1'
-  sf_notifications_enabled: 'False'
-  shared_reclass_branch: ''
-  shared_reclass_url: https://github.com/Mirantis/reclass-system-salt-model.git
-  sriov_network_subnet: 10.55.0.0/16
-  stacklight_enabled: 'False'
-  static_ips_on_deploy_network_enabled: 'False'
-  tenant_network_gateway: ==IPV4_NET_TENANT_PREFIX==.1
-  tenant_network_netmask: 255.255.255.0
-  tenant_network_subnet: ==IPV4_NET_TENANT_PREFIX==.0/24
-  tenant_telemetry_enabled: 'False'
-  tenant_vlan: '20'
-  upstream_proxy_enabled: 'False'
-  use_default_network_scheme: 'True'
-  vnf_onboarding_enabled: 'False'
-  xtrabackup_client_throttle: '0'
diff --git a/tcp_tests/templates/cookied-small-mcp-ironic/environment-context.yaml b/tcp_tests/templates/cookied-small-mcp-ironic/environment-context.yaml
deleted file mode 100644
index ccc417d..0000000
--- a/tcp_tests/templates/cookied-small-mcp-ironic/environment-context.yaml
+++ /dev/null
@@ -1,154 +0,0 @@
-nodes:
-    cfg01:
-      reclass_storage_name: infra_config_node01
-      roles:
-      - infra_config
-      - linux_system_codename_xenial
-      - features_runtest
-      #classes:
-      #- environment.cookied-small-mcp-ironic.override_ntp_virtual
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_static_ctl
-
-    ctl01:
-      reclass_storage_name: openstack_control_node01
-      roles:
-      - openstack_control_leader
-      - openstack_database_leader
-      - openstack_message_queue
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl
-
-    ctl02:
-      reclass_storage_name: openstack_control_node02
-      roles:
-      - openstack_control
-      - openstack_database
-      - openstack_message_queue
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl
-
-    ctl03:
-      reclass_storage_name: openstack_control_node03
-      roles:
-      - openstack_control
-      - openstack_database
-      - openstack_message_queue
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl
-
-    prx01:
-      reclass_storage_name: openstack_proxy_node01
-      roles:
-      - openstack_proxy  # another VIP interface used
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl
-        ens5:
-          role: single_external
-          external_address: 172.17.16.121
-          external_network_netmask: 255.255.255.0
-
-    gtw01:
-      reclass_storage_name: openstack_gateway_node01
-      roles:
-      - linux_system_codename_xenial
-      classes:
-      - system.linux.system.repo.mcp.apt_mirantis.docker
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl
-        ens5:
-          role: bond0_ab_ovs_vxlan_mesh
-        ens6:
-          role: bond1_ab_ovs_floating
-        ens7:
-          role: single_ovs_br_baremetal
-
-    bmt01:
-      reclass_storage_name: openstack_baremetal_node01
-      roles:
-      - openstack_baremetal
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl
-        ens7:
-          role: single_baremetal
-
-    kvm01.cookied-cicd-bm-os-contrail40-maas.local:
-      reclass_storage_name: infra_kvm_node01
-      roles:
-      - infra_kvm
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: bond0_ab_ovs_vlan_ctl
-        ens5:
-          role: single_mgm_manual
-
-    kvm02.cookied-cicd-bm-os-contrail40-maas.local:
-      reclass_storage_name: infra_kvm_node02
-      roles:
-      - infra_kvm
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: bond0_ab_ovs_vlan_ctl
-        ens5:
-          role: single_mgm_manual
-
-    kvm03.cookied-cicd-bm-os-contrail40-maas.local:
-      reclass_storage_name: infra_kvm_node03
-      roles:
-      - infra_kvm
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: bond0_ab_ovs_vlan_ctl
-        ens5:
-          role: single_mgm_manual
-
-    cmp<<count>>:
-      reclass_storage_name: openstack_compute_rack01
-      roles:
-      - openstack_compute
-      - features_lvm_backend_volume_vdb
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl
-        ens5:
-          role: bond0_ab_ovs_vxlan_mesh
-        ens6:
-          role: bond1_ab_ovs_floating
diff --git a/tcp_tests/templates/cookied-small-mcp-ironic/salt.yaml b/tcp_tests/templates/cookied-small-mcp-ironic/salt.yaml
deleted file mode 100644
index b82e26c..0000000
--- a/tcp_tests/templates/cookied-small-mcp-ironic/salt.yaml
+++ /dev/null
@@ -1,26 +0,0 @@
-{% from 'cookied-small-mcp-ironic/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-small-mcp-ironic/underlay.yaml' import LAB_CONFIG_NAME with context %}
-{% from 'cookied-small-mcp-ironic/underlay.yaml' import DOMAIN_NAME with context %}
-
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
-# Other salt model repository parameters see in shared-salt.yaml
-
-#- description: "Workaround for using glusterfs on single node"
-#  cmd: |
-#    set -e;
-#    find /srv/salt/reclass/classes/system/glusterfs/ -type f -exec sed -i 's/replica: .*//g' {} +
-#    find /srv/salt/reclass/classes/system/glusterfs/ -type f -exec sed -i 's/.*cluster_node02_address.*//g' {} +
-#    find /srv/salt/reclass/classes/system/glusterfs/ -type f -exec sed -i 's/.*cluster_node03_address.*//g' {} +
-#    find /srv/salt/reclass/classes/system/glusterfs/ -type f -exec sed -i 's/opts: .*/opts: \"defaults,backup-volfile-servers=${_param:glusterfs_node01_address}\"/g' {} +
-#
-#  node_name: {{ HOSTNAME_CFG01 }}
-#  retry: {count: 1, delay: 5}
-#  skip_fail: false
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
diff --git a/tcp_tests/templates/cookied-small-mcp-ironic/underlay--meta-data.yaml b/tcp_tests/templates/cookied-small-mcp-ironic/underlay--meta-data.yaml
deleted file mode 100644
index 3699401..0000000
--- a/tcp_tests/templates/cookied-small-mcp-ironic/underlay--meta-data.yaml
+++ /dev/null
@@ -1,4 +0,0 @@
-| # All the data below will be stored as a string object
-  instance-id: iid-local1
-  hostname: {hostname}
-  local-hostname: {hostname}
diff --git a/tcp_tests/templates/cookied-small-mcp-ironic/underlay--user-data1604-swp.yaml b/tcp_tests/templates/cookied-small-mcp-ironic/underlay--user-data1604-swp.yaml
deleted file mode 100644
index 81936a4..0000000
--- a/tcp_tests/templates/cookied-small-mcp-ironic/underlay--user-data1604-swp.yaml
+++ /dev/null
@@ -1,84 +0,0 @@
-| # All the data below will be stored as a string object
-  #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
-  ssh_pwauth: True
-  users:
-   - name: root
-     sudo: ALL=(ALL) NOPASSWD:ALL
-     shell: /bin/bash
-     ssh_authorized_keys:
-     {% for key in config.underlay.ssh_keys %}
-      - ssh-rsa {{ key['public'] }}
-     {% endfor %}
-
-  disable_root: false
-  chpasswd:
-   list: |
-    root:r00tme
-   expire: False
-
-  bootcmd:
-   # Enable root access
-   - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
-   - service sshd restart
-  output:
-    all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
-  runcmd:
-   - if lvs vg0; then pvresize /dev/vda3; fi
-   - if lvs vg0; then /usr/bin/growlvm.py --image-layout-file /usr/share/growlvm/image-layout.yml; fi
-
-   - export TERM=linux
-   - export LANG=C
-   # Configure dhclient
-   - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
-   - sudo resolvconf -u
-
-   # Enable grub menu using updated config below
-   - update-grub
-
-   # Prepare network connection
-   - sudo ifup ens3
-   #- sudo route add default gw {gateway} {interface_name}
-
-   # Create swap
-   - fallocate -l 16G /swapfile
-   - chmod 600 /swapfile
-   - mkswap /swapfile
-   - swapon /swapfile
-   - echo "/swapfile   none    swap    defaults   0   0" >> /etc/fstab
-
-  write_files:
-   - path: /etc/default/grub.d/97-enable-grub-menu.cfg
-     content: |
-         GRUB_RECORDFAIL_TIMEOUT=30
-         GRUB_TIMEOUT=3
-         GRUB_TIMEOUT_STYLE=menu
-
-   - path: /etc/network/interfaces
-     content: |
-          auto ens3
-          iface ens3 inet dhcp
-
-   - path: /usr/share/growlvm/image-layout.yml
-     content: |
-       root:
-         size: '30%VG'
-       home:
-         size: '1G'
-       var_log:
-         size: '11%VG'
-       var_log_audit:
-         size: '5G'
-       var_tmp:
-         size: '11%VG'
-       tmp:
-         size: '5G'
-     owner: root:root
-
-  growpart:
-    mode: auto
-    devices:
-      - '/'
-      - '/dev/vda3'
-    ignore_growroot_disabled: false
diff --git a/tcp_tests/templates/cookied-small-mcp-ironic/underlay.yaml b/tcp_tests/templates/cookied-small-mcp-ironic/underlay.yaml
deleted file mode 100644
index 4eb1506..0000000
--- a/tcp_tests/templates/cookied-small-mcp-ironic/underlay.yaml
+++ /dev/null
@@ -1,397 +0,0 @@
-# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
-{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
-
-{% import 'cookied-small-mcp-ironic/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
-{% import 'cookied-small-mcp-ironic/underlay--user-data1604-swp.yaml' as CLOUDINIT_USER_DATA_1604_SWP with context %}
-
----
-aliases:
- - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
- - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
- - &cloudinit_user_data_1604_swp {{ CLOUDINIT_USER_DATA_1604_SWP }}
-
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-small-mcp-ironic') %}
-{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME) + '.local' %}
-{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_BMT01 = os_env('HOSTNAME_BMT01', 'bmt01.' + DOMAIN_NAME) %}
-
-template:
-  devops_settings:
-    env_name: {{ os_env('ENV_NAME', 'cookied-small-mcp-ironic_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
-
-    address_pools:
-      private-pool01:
-        net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.60.0.0/16:24') }}
-        params:
-          ip_reserved:
-            gateway: +1
-            l2_network_device: +1
-            default_{{ HOSTNAME_CFG01 }}: +15
-            default_{{ HOSTNAME_CTL01 }}: +101
-            default_{{ HOSTNAME_CTL02 }}: +102
-            default_{{ HOSTNAME_CTL03 }}: +103
-            default_{{ HOSTNAME_GTW01 }}: +110
-            default_{{ HOSTNAME_PRX01 }}: +121
-            default_{{ HOSTNAME_BMT01 }}: +76
-          ip_ranges:
-            dhcp: [+90, -10]
-
-      admin-pool01:
-        net: {{ os_env('ADMIN_ADDRESS_POOL01', '10.70.0.0/16:24') }}
-        params:
-          ip_reserved:
-            gateway: +1
-            l2_network_device: +1
-            default_{{ HOSTNAME_CFG01 }}: +15
-            default_{{ HOSTNAME_CTL01 }}: +101
-            default_{{ HOSTNAME_CTL02 }}: +102
-            default_{{ HOSTNAME_CTL03 }}: +103
-            default_{{ HOSTNAME_GTW01 }}: +110
-            default_{{ HOSTNAME_PRX01 }}: +121
-            default_{{ HOSTNAME_BMT01 }}: +76
-          ip_ranges:
-            dhcp: [+90, -10]
-
-      tenant-pool01:
-        net: {{ os_env('TENANT_ADDRESS_POOL01', '10.80.0.0/16:24') }}
-        params:
-          ip_reserved:
-            gateway: +1
-            l2_network_device: +1
-            default_{{ HOSTNAME_CFG01 }}: +15
-            default_{{ HOSTNAME_CTL01 }}: +101
-            default_{{ HOSTNAME_CTL02 }}: +102
-            default_{{ HOSTNAME_CTL03 }}: +103
-            default_{{ HOSTNAME_GTW01 }}: +110
-            default_{{ HOSTNAME_PRX01 }}: +121
-          ip_ranges:
-            dhcp: [+10, -10]
-
-      external-pool01:
-        net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '10.90.0.0/16:24') }}
-        params:
-          ip_reserved:
-            gateway: +1
-            l2_network_device: +1
-            default_{{ HOSTNAME_CFG01 }}: +15
-            default_{{ HOSTNAME_CTL01 }}: +101
-            default_{{ HOSTNAME_CTL02 }}: +102
-            default_{{ HOSTNAME_CTL03 }}: +103
-            default_{{ HOSTNAME_GTW01 }}: +110
-            default_{{ HOSTNAME_PRX01 }}: +121
-          ip_ranges:
-            dhcp: [+180, +220]
-
-    groups:
-      - name: default
-        driver:
-          name: devops.driver.libvirt
-          params:
-            connection_string: !os_env CONNECTION_STRING, qemu:///system
-            storage_pool_name: !os_env STORAGE_POOL_NAME, default
-            stp: False
-            hpet: False
-            enable_acpi: true
-            use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
-            use_hugepages: !os_env DRIVER_USE_HUGEPAGES, false
-
-        network_pools:
-          admin: admin-pool01
-          private: private-pool01
-          tenant: tenant-pool01
-          external: external-pool01
-
-        l2_network_devices:
-          private:
-            address_pool: private-pool01
-            dhcp: false
-            forward:
-              mode: route
-
-          admin:
-            address_pool: admin-pool01
-            dhcp: true
-            forward:
-              mode: nat
-
-          tenant:
-            address_pool: tenant-pool01
-            dhcp: false
-
-          external:
-            address_pool: external-pool01
-            dhcp: false
-            forward:
-              mode: route
-
-          baremetal:
-            parent_iface:
-              phys_dev: {{ os_env('IFACE_IRONIC', 'veth-contdpdk1') }}
-
-
-        group_volumes:
-         - name: cloudimage1604    # This name is used for 'backing_store' option for node volumes.
-           source_image: {{ os_env('MCP_IMAGE_PATH1604') }}  # http://ci.mcp.mirantis.net:8085/images/ubuntu-16-04-x64-mcpproposed.qcow2
-           format: qcow2
-         - name: cfg01_day01_image               # Pre-configured day01 image
-           source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01') }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
-           format: qcow2
-         - name: mcp_ubuntu_1604_image           # Pre-configured image for control plane
-           source_image: !os_env MCP_IMAGE_PATH1604
-           format: qcow2
-
-        nodes:
-          - name: {{ HOSTNAME_CFG01 }}
-            role: salt_master
-            params:
-              vcpu: {{ os_env('CFG_NODE_CPU', 3) }}
-              memory: {{ os_env('CFG_NODE_MEMORY', 8192) }}
-              boot:
-                - hd
-              volumes:
-                - name: system
-                  capacity: {{ os_env('CFG_NODE_VOLUME_SIZE', 150) }}
-                  backing_store: cfg01_day01_image
-                  format: qcow2
-                - name: config
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  # source_image: !os_env CFG01_CONFIG_PATH # no source image required.
-                                                            # it will be uploaded after config drive generation
-              interfaces:
-                - label: ens3
-                  l2_network_device: admin
-                  interface_model: *interface_model
-                - label: ens4
-                  l2_network_device: private
-                  interface_model: *interface_model
-              network_config:
-                ens3:
-                  networks:
-                    - admin
-                ens4:
-                  networks:
-                    - private
-
-          - name: {{ HOSTNAME_CTL01 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 3
-              memory: !os_env SLAVE_NODE_MEMORY, 16384
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: mcp_ubuntu_1604_image
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604_swp
-
-              interfaces: &interfaces
-                - label: ens3
-                  l2_network_device: admin
-                  interface_model: *interface_model
-                - label: ens4
-                  l2_network_device: private
-                  interface_model: *interface_model
-              network_config: &network_config
-                ens3:
-                  networks:
-                    - admin
-                ens4:
-                  networks:
-                    - private
-
-          - name: {{ HOSTNAME_CTL02 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 3
-              memory: !os_env SLAVE_NODE_MEMORY, 16384
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: mcp_ubuntu_1604_image
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604_swp
-
-              interfaces: *interfaces
-              network_config: *network_config
-
-          - name: {{ HOSTNAME_CTL03 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 3
-              memory: !os_env SLAVE_NODE_MEMORY, 16384
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: mcp_ubuntu_1604_image
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604_swp
-
-              interfaces: *interfaces
-              network_config: *network_config
-
-          - name: {{ HOSTNAME_PRX01 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 1
-              memory: !os_env SLAVE_NODE_MEMORY, 2048
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: mcp_ubuntu_1604_image
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604_swp
-
-              interfaces:
-                - label: ens3
-                  l2_network_device: admin
-                  interface_model: *interface_model
-                - label: ens4
-                  l2_network_device: private
-                  interface_model: *interface_model
-                - label: ens5
-                  l2_network_device: external
-                  interface_model: *interface_model
-              network_config:
-                ens3:
-                  networks:
-                    - admin
-                ens4:
-                  networks:
-                    - private
-                ens5:
-                  networks:
-                    - external
-
-          - name: {{ HOSTNAME_GTW01 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 2048
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: cloudimage1604
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604_swp
-
-              interfaces: &all_gtw_interfaces
-                - label: ens3
-                  l2_network_device: admin
-                  interface_model: *interface_model
-                - label: ens4
-                  l2_network_device: private
-                  interface_model: *interface_model
-                - label: ens5
-                  l2_network_device: tenant
-                  interface_model: *interface_model
-                - label: ens6
-                  l2_network_device: external
-                  interface_model: *interface_model
-                - label: ens7
-                  l2_network_device: baremetal
-                  interface_model: *interface_model
-              network_config: &all_gtw_network_config
-                ens3:
-                  networks:
-                    - admin
-                ens4:
-                  networks:
-                    - private
-                ens5:
-                  networks:
-                    - tenant
-                ens6:
-                  networks:
-                    - external
-                ens7:
-                  networks:
-                    - baremetal
-
-          - name: {{ HOSTNAME_BMT01 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: cloudimage1604
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604_swp
-
-              interfaces: *all_gtw_interfaces
-              network_config: *all_gtw_network_config