fix underlay. Fix steps

Change-Id: Ie6e65afca93d2f8cd0061aa97be5d61b7259f96d
diff --git a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/k8s.yaml b/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/k8s.yaml
index d559d73..308051a 100644
--- a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/k8s.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/k8s.yaml
@@ -1,4 +1,17 @@
 {% from 'cookied-bm-mcp-pike-k8s-contrail/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-bm-mcp-pike-k8s-contrail/underlay.yaml' import HOSTNAME_CTL01 with context %}
+
+{%- macro MACRO_CHECK_SYSTEMCTL() %}
+{#######################################}
+- description: Check systemctl on compute
+  cmd: |
+    set -ex;
+    salt 'cmp*' cmd.run "systemctl --version";
+    salt 'cmp*' cmd.run "journalctl -u dbus";
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 1}
+  skip_fail: true
+{%- endmacro %}
 
 - description:  Install keepalived on primary controller
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False
@@ -25,21 +38,86 @@
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False
     -C 'I@etcd:server' state.sls etcd.server.service
   node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
+  retry: {count: 3, delay: 30}
   skip_fail: false
 
 - description: Install certs
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@etcd:server' salt.minion -b 1
+    -C 'I@etcd:server' state.sls salt.minion -b 1
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 2, delay: 5}
   skip_fail: false
 
-- description: Install etcd
+# Install opencontrail database services
+- description: Install opencontrail database services for 01
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@etcd:server' state.sls etcd.server.service
+    -C 'I@opencontrail:database and *01*' state.sls opencontrail.database
   node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
+  retry: {count: 2, delay: 30}
+  skip_fail: false
+
+- description: Install opencontrail database services
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@opencontrail:database' state.sls opencontrail.database
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 30}
+  skip_fail: false
+
+# Install opencontrail control services
+- description: Install opencontrail services for 01
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@opencontrail:control and *01*' state.sls opencontrail exclude=opencontrail.client
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 30}
+  skip_fail: false
+
+- description: Install opencontrail services
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@opencontrail:control' state.sls opencontrail exclude=opencontrail.client
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 30}
+  skip_fail: false
+
+- description: Install docker host
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@kubernetes:master' state.sls salt.minion.cert
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 30}
+  skip_fail: false
+
+- description: Install docker host
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@docker:host' state.sls docker.host
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 30}
+  skip_fail: false
+
+  #- description: Configure OpenContrail as an add-on for Kubernetes
+  #  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+  #    -C 'I@kubernetes:master' state.sls kubernetes.master.kube-addons
+  #  node_name: {{ HOSTNAME_CFG01 }}
+  #  retry: {count: 1, delay: 5}
+  #  skip_fail: false
+
+- description: Install Kubernetes components
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@kubernetes:master' state.sls kubernetes.pool
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: "Run k8s master at *01* to simplify namespaces creation"
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+     -C 'I@kubernetes:master and *01*' state.sls kubernetes.master exclude=kubernetes.master.setup,kubernetes.master.kube-addons
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 15}
+  skip_fail: false
+
+- description: Run k8s without master.setup
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+     -C 'I@kubernetes:master' state.sls kubernetes exclude=kubernetes.master.setup,kubernetes.master.kube-addons
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 15}
   skip_fail: false
 
 - description: Check the etcd health
@@ -49,51 +127,88 @@
   retry: {count: 1, delay: 5}
   skip_fail: false
 
-- description: Install Kubernetes Addons
+{{ MACRO_CHECK_SYSTEMCTL() }}
+
+- description: Run Kubernetes master setup
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@kubernetes:master' state.sls kubernetes.master.kube-addons
+     -C 'I@kubernetes:master and *01*' state.sls kubernetes.master.setup
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
   skip_fail: false
 
-- description: Install Kubernetes components
+- description: Restart Kubelet
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@kubernetes:pool' state.sls kubernetes.pool
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 5, delay: 60}
-  skip_fail: false
-
-# Opencontrail Control Plane
-
-- description: Create configuration files for OpenContrail
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@opencontrail:control' state.sls opencontrail exclude=opencontrail.client
+    -C 'I@kubernetes:master' service.restart 'kubelet'
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
   skip_fail: false
 
-- description: Configure OpenContrail as an add-on for Kubernetes
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@kubernetes:master' state.sls kubernetes.master.kube-addons
-  node_name: {{ HOSTNAME_CFG01 }}
+- description: Waiting for contrail-containers up. opencontrail.client state should be run only after that
+  cmd: |
+    sleep 30;
+    total_pods=`kubectl get pods --all-namespaces | awk '/opencontrail/ {print $3}' | cut -d "/" -f2`
+    for i in `seq 1 10`; do
+      ready_pods=`kubectl get pods --all-namespaces | awk '/opencontrail/ {print $3}' | cut -d "/" -f1`
+      if [ "$ready_pods" == "$total_pods" ];then
+        echo "containers are ready. Going to the next step"
+        break
+      elif [ "$i" -ne "10" ]; then
+        echo "Opencontrail containers is not ready. $ready_pods from $total_pods is ready."
+        sleep 60
+        continue
+      else
+        echo "Failed to up contrail containers in 10 minutes"
+        exit 1
+      fi
+    done
+  node_name: {{ HOSTNAME_CTL01 }}
   retry: {count: 1, delay: 5}
   skip_fail: true
 
-- description: Verify the status of the OpenContrail service
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@opencontrail:database' cmd.run 'doctrail all contrail-status'
+- description: Check all pods
+  cmd: |
+     salt 'ctl*' cmd.run "kubectl -o wide get pods --all-namespaces";
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
-  skip_fail: true
+  skip_fail: false
 
+# Install contrail computes
 - description: Set up the OpenContrail resources
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False
     -C 'I@opencontrail:database:id:1' state.sls opencontrail.client
   node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 3, delay: 60}
+  skip_fail: false
+
+- description: Apply opencontrail.client on contrail computes
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@opencontrail:compute' state.sls opencontrail exclude=opencontrail.client
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 5}
+  skip_fail: false
+
+- description: Reboot contrail computes
+  cmd: |
+    salt --async -C 'I@opencontrail:compute' system.reboot;
+    sleep 450;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: true
+
+- description: Apply opencontrail.client on contrail computes
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@opencontrail:compute' state.sls opencontrail.client
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 5}
+  skip_fail: false
+
+- description: Apply opencontrail.client on contrail computes
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@opencontrail:compute' state.sls opencontrail
+  node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
   skip_fail: false
 
-# OpenContrail vrouters
 - description: Refresh pillars on cmp*
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False
     -C 'cmp*' saltutil.refresh_pillar
@@ -108,56 +223,56 @@
   retry: {count: 1, delay: 5}
   skip_fail: false
 
-- description: Apply highstate on contrail computes
+- description: Install docker host
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@opencontrail:compute' state.highstate exclude=opencontrail.client
+    -C 'I@kubernetes:master' state.sls salt.minion.cert
   node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
+  retry: {count: 2, delay: 30}
   skip_fail: false
 
-- description: Reboot contrail computes
-  cmd: salt --timeout=600 -C 'I@opencontrail:compute' system.reboot
+- description: Install Kubernetes components
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@kubernetes:pool and not I@kubernetes:master' state.sls kubernetes.pool
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 30}
-  skip_fail: true
-
-- description: Apply opencontrail.client on contrail computes
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@opencontrail:compute' state.sls opencontrail.client
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
   skip_fail: false
 
-- description: Run Kubernetes master without setup
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-     -C 'I@kubernetes:master' state.sls kubernetes exclude=kubernetes.master.setup
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 3, delay: 5}
-  skip_fail: true
-
-- description: Run Kubernetes master setup
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-     -C 'I@kubernetes:master' --subset 1 state.sls kubernetes.master.setup
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: true
-
 - description: Restart Kubelet
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@kubernetes:pool' service.restart 'kubelet'
+    -C 'I@kubernetes:pool and not I@kubernetes:master' service.restart 'kubelet'
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
-  skip_fail: true
+  skip_fail: false
 
-- description: Check nodes registrations
+- description: Configure OpenContrail as an add-on for Kubernetes
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@kubernetes:pool' cmd.run 'sleep 60; kubectl get nodes'
+    -C 'I@kubernetes:master' state.sls kubernetes.master.kube-addons
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
-  skip_fail: true
+  skip_fail: false
 
 - description: Renew hosts file on a whole cluster
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C '*' state.sls linux.network.host;
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
   skip_fail: false
+
+- description: Final check all pods
+  cmd: |
+     sleep 60;
+     salt 'ctl*' cmd.run "kubectl -o wide get pods --all-namespaces";
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Check contrail status on all pods
+  cmd: |
+     pods=`kubectl get pods --all-namespaces | awk '/opencontrail/ {print $2}'`
+     for i in $pods; do
+       kubectl exec $i -c opencontrail-controller -n kube-system contrail-status;
+       kubectl exec $i -c opencontrail-analytics -n kube-system contrail-status;
+       kubectl exec $i -c opencontrail-analyticsdb -n kube-system contrail-status;
+     done
+  node_name: {{ HOSTNAME_CTL01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
diff --git a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/lab04-upgrade-physical-inventory.yaml b/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/lab04-upgrade-physical-inventory.yaml
index c5648a8..9cf1366 100644
--- a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/lab04-upgrade-physical-inventory.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/lab04-upgrade-physical-inventory.yaml
@@ -18,7 +18,7 @@
         enp9s0f0:
           role: single_mgm
         enp9s0f1:
-          role: single_ctl
+          role: single_vlan_ctl
 
     kvm02.bm-mcp-pike-k8s-contrail.local:
       reclass_storage_name: infra_kvm_node02
@@ -29,7 +29,7 @@
         enp9s0f0:
           role: single_mgm
         enp9s0f1:
-          role: single_ctl
+          role: single_vlan_ctl
 
     kvm03.bm-mcp-pike-k8s-contrail.local:
       reclass_storage_name: infra_kvm_node03
@@ -40,7 +40,7 @@
         enp9s0f0:
           role: single_mgm
         enp9s0f1:
-          role: single_ctl
+          role: single_vlan_ctl
 
     ctl01.bm-mcp-pike-k8s-contrail.local:
       reclass_storage_name: kubernetes_control_node01
@@ -52,7 +52,7 @@
           role: single_mgm
           deploy_address: 172.17.41.9
         enp2s0f1:
-          role: single_ctl
+          role: single_vlan_ctl
           single_address: 10.167.8.239
 
     ctl02.bm-mcp-pike-k8s-contrail.local:
@@ -65,7 +65,7 @@
           role: single_mgm
           deploy_address: 172.17.41.10
         enp2s0f1:
-          role: single_ctl
+          role: single_vlan_ctl
           single_address: 10.167.8.238
 
     ctl03.bm-mcp-pike-k8s-contrail.local:
@@ -78,41 +78,21 @@
           role: single_mgm
           deploy_address: 172.17.41.11
         enp2s0f1:
-          role: single_ctl
+          role: single_vlan_ctl
           single_address: 10.167.8.237
 
-    # prx01.bm-mcp-pike-k8s-contrail.local:
-    #   reclass_storage_name: kubernetes_proxy_node01
-    #   roles:
-    #   - kubernetes_proxy
-    #   # - infra_proxy
-    #   # - stacklight_proxy
-    #   - salt_master_host
-    #   - linux_system_codename_xenial
-    #   interfaces:
-    #     enp9s0f0:
-    #       role: single_mgm
-    #       deploy_address: 172.17.41.8
-    #     enp9s0f1:
-    #       role: single_ctl
-    #       single_address: 10.167.8.81
-
     cmp001.bm-mcp-pike-k8s-contrail.local:
       reclass_storage_name: kubernetes_compute_node001
       roles:
       - linux_system_codename_xenial
       - kubernetes_compute_contrail
       - salt_master_host
-        #- features_lvm_backend
       interfaces:
         enp9s0f0:
           role: single_dhcp
-        ens11f0:
-          role: bond0_ab_contrail
-          tenant_address: 192.168.0.101
         ens11f1:
-          role: single_ctl
-          single_address: 10.167.8.101
+          role: k8s_oc40_only_vhost_on_control_vlan
+          single_address: 10.167.8.103
 
     cmp002.bm-mcp-pike-k8s-contrail.local:
       reclass_storage_name: kubernetes_compute_node002
@@ -120,30 +100,9 @@
       - linux_system_codename_xenial
       - kubernetes_compute_contrail
       - salt_master_host
-        #- features_lvm_backend
       interfaces:
         enp9s0f0:
           role: single_dhcp
-        ens11f0:
-          role: bond0_ab_contrail
-          tenant_address: 192.168.0.102
         ens11f1:
-          role: single_ctl
-          single_address: 10.167.8.102
-
-    # cmp002.bm-mcp-pike-k8s-contrail.local:
-    #   reclass_storage_name: kubernetes_compute_node02
-    #   roles:
-    #   - features_lvm_backend
-    #   - linux_system_codename_xenial
-    #   - kubernetes_compute_contrail
-    #   interfaces:
-    #     enp2s0f1:
-    #       role: single_mgm
-    #       deploy_address: 172.16.49.74
-    #     enp5s0f0:
-    #       role: bond0_ab_contrail
-    #       tenant_address: 192.168.0.102
-    #     enp5s0f1:
-    #       role: single_vlan_ctl
-    #       single_address: 10.167.8.102
+          role: k8s_oc40_only_vhost_on_control_vlan
+          single_address: 10.167.8.104
diff --git a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/salt-context-cookiecutter-k8s-contrail.yaml b/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/salt-context-cookiecutter-k8s-contrail.yaml
index 88aef93..4c4f1db 100644
--- a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/salt-context-cookiecutter-k8s-contrail.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/salt-context-cookiecutter-k8s-contrail.yaml
@@ -32,6 +32,8 @@
   bmk_enabled: 'False'
   ceph_enabled: 'False'
   auditd_enabled: 'False'
+  kubernetes_coredns_enabled: False
+  kubernetes_kubedns_enabled: True
   cicd_control_node01_address: 10.167.8.91
   cicd_control_node01_hostname: cid01
   cicd_control_node02_address: 10.167.8.92
@@ -85,7 +87,7 @@
   cookiecutter_template_url: https://gerrit.mcp.mirantis.net/mk/cookiecutter-templates.git
   deploy_network_gateway: 172.17.41.2
   deploy_network_netmask: 255.255.255.192
-  deploy_network_subnet: 172.16.49.64/26
+  deploy_network_subnet: 172.17.41.0/26
   deployment_type: physical
   dns_server01: 172.17.41.2
   dns_server02: 172.17.41.2
@@ -108,7 +110,7 @@
   kubernetes_enabled: 'True'
   kubernetes_compute_count: 2
   kubernetes_compute_rack01_single_subnet: 10.167.8
-  kubernetes_compute_rack01_tenant_subnet: 192.168.0
+  kubernetes_compute_rack01_tenant_subnet: 10.167.8
   kubernetes_network_opencontrail_enabled: 'True'
   local_repositories: 'False'
   maas_deploy_address: 172.16.49.66
@@ -131,15 +133,16 @@
   kubernetes_control_node03_address: 10.167.8.237
   kubernetes_control_node03_hostname: ctl03
   linux_repo_contrail_component: oc40
-  opencontrail_analytics_address: 10.167.8.30
   opencontrail_analytics_hostname: ctl
-  opencontrail_analytics_node01_address: 10.167.8.31
   opencontrail_analytics_node01_hostname: ctl01
-  opencontrail_analytics_node02_address: 10.167.8.32
   opencontrail_analytics_node02_hostname: ctl02
-  opencontrail_analytics_node03_address: 10.167.8.33
   opencontrail_analytics_node03_hostname: ctl03
+  opencontrail_analytics_address: ${_param:opencontrail_control_address}
+  opencontrail_analytics_node01_address: ${_param:opencontrail_control_node01_address}
+  opencontrail_analytics_node02_address: ${_param:opencontrail_control_node02_address}
+  opencontrail_analytics_node03_address: ${_param:opencontrail_control_node03_address}
   opencontrail_compute_iface_mask: '24'
+  opencontrail_compute_iface: ens11f1
   opencontrail_control_address: 10.167.8.236
   opencontrail_control_hostname: ctl
   opencontrail_control_node01_address: 10.167.8.239
@@ -153,6 +156,7 @@
   opencontrail_router01_hostname: rtr01
   opencontrail_router02_address: 10.167.8.101
   opencontrail_router02_hostname: rtr02
+  opencontrail_public_ip_range: 172.17.41.128/26
   opencontrail_version: '4.0'
   openstack_enabled: 'False'
   openssh_groups: ''
@@ -204,10 +208,10 @@
   stacklight_telemetry_node03_hostname: mtr03
   stacklight_version: '2'
   static_ips_on_deploy_network_enabled: 'False'
-  tenant_network_gateway: 192.168.0.1
+  tenant_network_gateway: 10.167.8.1
   tenant_network_netmask: 255.255.255.0
-  tenant_network_subnet: 192.168.0.0/24
-  tenant_vlan: '2411'
+  tenant_network_subnet: 10.167.8.0/24
+  tenant_vlan: '2410'
   upstream_proxy_enabled: 'False'
   use_default_network_scheme: 'True'
   vnf_onboarding_enabled: 'False'
diff --git a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/salt-context-environment.yaml b/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/salt-context-environment.yaml
index 18032a1..47e12c8 100644
--- a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/salt-context-environment.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/salt-context-environment.yaml
@@ -1,58 +1,4 @@
 nodes:
-    # Virtual Control Plane nodes
-# commented as ctl is bm
-#    ctl01.cookied-bm-mcp-ocata-contrail.local:
-#      reclass_storage_name: kubernetes_control_node01
-#      roles:
-#      - kubernetes_control_contrail
-#      - linux_system_codename_xenial
-#      interfaces:
-#        ens3:
-#          role: single_ctl
-#
-#    ctl02.cookied-bm-mcp-ocata-contrail.local:
-#      reclass_storage_name: kubernetes_control_node02
-#      roles:
-#      - kubernetes_control_contrail
-#      - linux_system_codename_xenial
-#      interfaces:
-#        ens3:
-#          role: single_ctl
-#
-#    ctl03.cookied-bm-mcp-ocata-contrail.local:
-#      reclass_storage_name: kubernetes_control_node03
-#      roles:
-#      - kubernetes_control_contrail
-#      - linux_system_codename_xenial
-#      interfaces:
-#        ens3:
-#          role: single_ctl
-
-#    commented as there is no k8s proxy nodes in this setup
-#    prx01.cookied-bm-mcp-ocata-contrail.local:
-#      reclass_storage_name: kubernetes_proxy_node01
-#      roles:
-#      - kubernetes_proxy
-#      # - infra_proxy
-#      # - stacklight_proxy
-#      - salt_master_host
-#      - linux_system_codename_xenial
-#      interfaces:
-#        ens3:
-#          role: single_ctl
-
-#    prx02.cookied-bm-mcp-ocata-contrail.local:
-#      reclass_storage_name: kubernetes_proxy_node02
-#      roles:
-#      - kubernetes_proxy
-#      # - infra_proxy
-#      # - stacklight_proxy
-#      - salt_master_host
-#      - linux_system_codename_xenial
-#      interfaces:
-#        ens3:
-#          role: single_ctl
-
     mon01.cookied-bm-mcp-ocata-contrail.local:
       reclass_storage_name: stacklight_server_node01
       roles:
@@ -80,73 +26,6 @@
         ens3:
           role: single_ctl
 
-#   commented as shpuld be in pod
-#   nal01.cookied-bm-mcp-ocata-contrail.local:
-#      reclass_storage_name: opencontrail_analytics_node01
-#      roles:
-#      - opencontrail_analytics
-#      - linux_system_codename_xenial
-#      - salt_master_host
-#      interfaces:
-#        ens3:
-#          role: single_ctl
-#          single_address: 10.167.8.31
-#
-#    nal02.cookied-bm-mcp-ocata-contrail.local:
-#      reclass_storage_name: opencontrail_analytics_node02
-#      roles:
-#      - opencontrail_analytics
-#      - linux_system_codename_xenial
-#      - salt_master_host
-#      interfaces:
-#        ens3:
-#          role: single_ctl
-#          single_address: 10.167.8.32
-#
-#    nal03.cookied-bm-mcp-ocata-contrail.local:
-#      reclass_storage_name: opencontrail_analytics_node03
-#      roles:
-#      - opencontrail_analytics
-#      - linux_system_codename_xenial
-#      - salt_master_host
-#      interfaces:
-#        ens3:
-#          role: single_ctl
-#          single_address: 10.167.8.33
-#
-#    ntw01.cookied-bm-mcp-ocata-contrail.local:
-#      reclass_storage_name: opencontrail_control_node01
-#      roles:
-#      - opencontrail_control
-#      - linux_system_codename_xenial
-#      - salt_master_host
-#      interfaces:
-#        ens3:
-#          role: single_ctl
-#          single_address: 10.167.8.21
-#
-#    ntw02.cookied-bm-mcp-ocata-contrail.local:
-#      reclass_storage_name: opencontrail_control_node02
-#      roles:
-#      - opencontrail_control
-#      - linux_system_codename_xenial
-#      - salt_master_host
-#      interfaces:
-#        ens3:
-#          role: single_ctl
-#          single_address: 10.167.8.22
-#
-#    ntw03.cookied-bm-mcp-ocata-contrail.local:
-#      reclass_storage_name: opencontrail_control_node03
-#      roles:
-#      - opencontrail_control
-#      - linux_system_codename_xenial
-#      - salt_master_host
-#      interfaces:
-#        ens3:
-#          role: single_ctl
-#          single_address: 10.167.8.23
-
     mtr01.cookied-bm-mcp-ocata-contrail.local:
       reclass_storage_name: stacklight_telemetry_node01
       roles:
@@ -201,11 +80,29 @@
         ens3:
           role: single_ctl
 
-#    bmk01.cookied-bm-mcp-ocata-contrail.local:
-#      reclass_storage_name: openstack_benchmark_node01
-#      roles:
-#      - openstack_benchmark
-#      - linux_system_codename_xenial
-#      interfaces:
-#        ens3:
-#          role: single_ctl
+    cid01.cookied-bm-mcp-ocata-contrail.local:
+      reclass_storage_name: cicd_control_node01
+      roles:
+      - cicd_control_leader
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_ctl
+
+    cid02.cookied-bm-mcp-ocata-contrail.local:
+      reclass_storage_name: cicd_control_node02
+      roles:
+      - cicd_control_manager
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_ctl
+
+    cid03.cookied-bm-mcp-ocata-contrail.local:
+      reclass_storage_name: cicd_control_node03
+      roles:
+      - cicd_control_manager
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_ctl
diff --git a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/salt.yaml b/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/salt.yaml
index 951075e..0d6bd52 100644
--- a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/salt.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/salt.yaml
@@ -21,7 +21,7 @@
 
 {{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
 
-{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL(CONTROL_VLAN=CONTROL_VLAN, TENANT_VLAN=TENANT_VLAN) }}
+{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL(CONTROL_VLAN=CONTROL_VLAN, TENANT_VLAN=TENANT_VLAN, CLUSTER_PRODUCT_MODELS='cicd infra kubernetes opencontrail stacklight2') }}
 
 {{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
 
@@ -50,6 +50,13 @@
   retry: {count: 1, delay: 10}
   skip_fail: false
 
+- description: Delete proxy inclusion from kvm
+  cmd: |
+    sed -i 's/- system.salt.control.cluster.kubernetes_proxy_cluster//g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
 - description: Temporary WR for correct bridge name according to envoronment templates
   cmd: |
     sed -i 's/br\-ctl/br\_ctl/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
@@ -58,6 +65,24 @@
   retry: {count: 1, delay: 10}
   skip_fail: false
 
+- description: "Excluding tenant network from cluster"
+  cmd: |
+    set -e;
+    . /root/venv-reclass-tools/bin/activate;
+    reclass-tools add-key parameters._param.opencontrail_compute_address '${_param:single_address}' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/opencontrail/compute.yml;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: "Use correct compute interface"
+  cmd: |
+    set -e;
+    . /root/venv-reclass-tools/bin/activate;
+    reclass-tools add-key parameters._param.opencontrail_compute_iface 'ens11f1.${_param:control_vlan}' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/opencontrail/init.yml;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
 - description: Rerun openssh after env model is generated
   cmd: |
     salt-call state.sls openssh
@@ -80,7 +105,7 @@
 
 - description: Update minion information
   cmd: |
-    salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.sync_grains &&
+    salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.sync_all &&
     salt --hard-crash --state-output=mixed --state-verbose=False '*' mine.update &&
     salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.refresh_pillar && sleep 10
   node_name: {{ HOSTNAME_CFG01 }}
diff --git a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/sl.yaml b/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/sl.yaml
index 0b559a8..9dcb4f6 100644
--- a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/sl.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/sl.yaml
@@ -1,4 +1,5 @@
 {% from 'cookied-bm-mcp-pike-k8s-contrail/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% import 'shared-sl-tests.yaml' as SHARED_SL_TESTS with context %}
 
 # Install docker swarm
 - description: Configure docker service
@@ -65,36 +66,31 @@
   retry: {count: 1, delay: 5}
   skip_fail: false
 
+- description: Install keepalived on mon nodes
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@glusterfs:client' state.sls glusterfs.client
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 15}
+  skip_fail: false
+
 # Install slv2 infra
 #Launch containers
 - description: Install Mongo if target matches
   cmd: |
     if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' match.pillar 'mongodb:server' ; then
-      salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' state.sls mongodb
+      salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' state.sls mongodb.server
     fi
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
-- description: Configure Alerta if it is exists
+- description: Install Mongo if target matches
   cmd: |
-    if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:alerta' match.pillar 'prometheus:alerta' ; then
-      salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:alerta' state.sls prometheus.alerta
+    if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' match.pillar 'mongodb:server' ; then
+      salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' state.sls mongodb.cluster
     fi
   node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-- description: launch prometheus containers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master and I@prometheus:server' state.sls docker.client
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 10}
-  skip_fail: false
-
-- description: Check docker ps
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:server' cmd.run "docker ps"
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 10}
+  retry: {count: 5, delay: 20}
   skip_fail: false
 
 - description: Install telegraf
@@ -113,19 +109,31 @@
   skip_fail: false
 
 - description: Install elasticsearch server
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:server' state.sls elasticsearch.server -b 1
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:server:enabled:true and *01*' state.sls elasticsearch.server
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Install elasticsearch server
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:server:enabled:true' state.sls elasticsearch.server
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: Install kibana server
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:server' state.sls kibana.server -b 1
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:server:enabled:true and *01*' state.sls kibana.server
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Install kibana server
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:server:enabled:true' state.sls kibana.server
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: Install elasticsearch client
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:client' state.sls elasticsearch.client
+  cmd: sleep 30; salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:client' state.sls elasticsearch.client
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 2, delay: 30}
   skip_fail: false
@@ -141,23 +149,12 @@
     INFLUXDB_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' test.ping 1>/dev/null 2>&1 && echo true`;
     echo "Influxdb service presence: ${INFLUXDB_SERVICE}";
     if [[ "$INFLUXDB_SERVICE" == "true" ]]; then
-        salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' state.sls influxdb
+        salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server and *01*' state.sls influxdb;
+        salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' state.sls influxdb;
     fi
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
-  skip_fail: true
-
-# Install Prometheus LTS(optional if set in model)
-- description: Prometheus LTS(optional if set in model)
-  cmd: |
-    PROMETHEUS_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:relay' test.ping 1>/dev/null 2>&1 && echo true`;
-    echo "PROMETHEUS rely service presence: ${PROMETHEUS_SERVICE}";
-    if [[ "$PROMETHEUS_SERVICE" == "true" ]]; then
-        salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:relay' state.sls prometheus
-    fi
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: true
+  skip_fail: false
 
 # Install service for the log collection
 - description: Configure fluentd
@@ -186,72 +183,85 @@
   retry: {count: 1, delay: 10}
   skip_fail: false
 
-# Collect grains needed to configure the services
+  ######################################
+  ######################################
+  ######################################
 
-- description: Get grains
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' state.sls salt.minion.grains
+- description: Install Galera on first server
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@galera:master' state.sls galera
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 5}
+  skip_fail: false
+
+- description: Install Galera on other servers
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@galera:slave' state.sls galera -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 5}
+  skip_fail: false
+
+- description: Check Galera on first server
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@galera:master' mysql.status
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: true
+
+- description: Check Galera on other servers
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@galera:slave' mysql.status
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: true
+
+- description: Collect Grains
+  cmd: |
+    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' state.sls salt.minion.grains;
+    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' saltutil.refresh_modules;
+    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' mine.update
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
-- description: Sync modules
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' saltutil.refresh_modules
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-- description: Update mine
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' mine.update; sleep 5;
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 5, delay: 15}
-  skip_fail: false
-
-# Configure the services running in Docker Swarm
-- description: Configure prometheus in docker swarm
+- description: Check docker ps
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:server' state.sls prometheus
   node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-- description: Configure Remote Collector in Docker Swarm for Openstack deployments
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:server' state.sls heka.remote_collector
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-- description: Install sphinx
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@sphinx:server' state.sls sphinx
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-
-#- description: Install prometheus alertmanager
-#  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls prometheus,heka.remote_collector -b 1
-#  node_name: {{ HOSTNAME_CFG01 }}
-#  retry: {count: 1, delay: 10}
-#  skip_fail: false
-
-#- description: run docker state
-#  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker
-#  node_name: {{ HOSTNAME_CFG01 }}
-#  retry: {count: 1, delay: 10}
-#  skip_fail: false
-#
-#- description: docker ps
-#  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' dockerng.ps
-#  node_name: {{ HOSTNAME_CFG01 }}
-#  retry: {count: 1, delay: 10}
-#  skip_fail: false
-
-- description: Configure Grafana dashboards and datasources
-  cmd: sleep 30;  salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@grafana:client' state.sls grafana.client
-  node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 2, delay: 10}
   skip_fail: false
 
-- description: Run salt minion to create cert files
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False "*" state.sls salt.minion
+- description: launch prometheus containers
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master and I@prometheus:server' state.sls docker.client
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 10}
+  skip_fail: false
+
+- description: Check docker ps
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:server' cmd.run "docker ps"
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
+
+- description: Configure Grafana dashboards and datasources
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@grafana:client' state.sls grafana.client
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 5, delay: 60}
+  skip_fail: false
+
+- description: Configure Alerta if it is exists
+  cmd: |
+    if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:alerta' match.pillar 'prometheus:alerta' ; then
+      salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:alerta' state.sls prometheus.alerta
+    fi
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Run salt minion to create cert files
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False "*" state.sls salt.minion.cert
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: true
+
+{{  SHARED_SL_TESTS.MACRO_CLONE_SL_TESTS() }}
+{{  SHARED_SL_TESTS.MACRO_CONFIGURE_TESTS() }}
diff --git a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/underlay.yaml b/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/underlay.yaml
index da6afea..7832675 100644
--- a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/underlay.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/underlay.yaml
@@ -179,96 +179,6 @@
 
         nodes:
 
-        #  - name: {{ HOSTNAME_CFG01 }}
-        #    role: salt_master
-        #    params:
-        #      ipmi_user: !os_env IPMI_USER
-        #      ipmi_password: !os_env IPMI_PASSWORD
-        #      ipmi_previlegies: OPERATOR
-        #      ipmi_host: !os_env IPMI_HOST_CFG01  # hostname or IP address
-        #      ipmi_lan_interface: lanplus
-        #      ipmi_port: 623
-
-        #      root_volume_name: system     # see 'volumes' below
-        #      cloud_init_volume_name: iso  # see 'volumes' below
-        #      cloud_init_iface_up: enp3s0f1  # see 'interfaces' below.
-        #      volumes:
-        #        - name: system
-        #          capacity: !os_env NODE_VOLUME_SIZE, 200
-
-        #          # The same as for agent URL, here is an URL to the image that should be
-        #          # used for deploy the node. It should also be accessible from deploying
-        #          # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
-        #          source_image: !os_env IRONIC_SOURCE_IMAGE_URL
-        #          source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
-
-        #        - name: iso  # Volume with name 'iso' will be used
-        #                     # for store image with cloud-init metadata.
-
-        #          cloudinit_meta_data: *cloudinit_meta_data
-        #          cloudinit_user_data: *cloudinit_user_data_cfg01
-
-        #      interfaces:
-        #        - label: enp3s0f0  # Infra interface
-        #          mac_address: !os_env ETH0_MAC_ADDRESS_CFG01
-        #        - label: enp3s0f1
-        #          l2_network_device: admin
-        #          mac_address: !os_env ETH1_MAC_ADDRESS_CFG01
-
-        #      network_config:
-        #        enp3s0f0:
-        #          networks:
-        #           - infra
-        #        enp3s0f1:
-        #          networks:
-        #           - admin
-          # - name: {{ HOSTNAME_PRX01 }}
-          #   role: salt_minion
-          #   params:
-          #     ipmi_user: !os_env IPMI_USER
-          #     ipmi_password: !os_env IPMI_PASSWORD
-          #     ipmi_previlegies: OPERATOR
-          #     ipmi_host: !os_env IPMI_HOST_PRX01  # hostname or IP address
-          #     ipmi_lan_interface: lanplus
-          #     ipmi_port: 623
-
-          #     root_volume_name: system     # see 'volumes' below
-          #     cloud_init_volume_name: iso  # see 'volumes' below
-          #     cloud_init_iface_up: enp9s0f0  # see 'interfaces' below.
-          #     volumes:
-          #       - name: system
-          #         capacity: !os_env NODE_VOLUME_SIZE, 200
-
-          #         # The same as for agent URL, here is an URL to the image that should be
-          #         # used for deploy the node. It should also be accessible from deploying
-          #         # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
-          #         source_image: !os_env IRONIC_SOURCE_IMAGE_URL
-          #         source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
-
-          #       - name: iso  # Volume with name 'iso' will be used
-          #                    # for store image with cloud-init metadata.
-
-          #         cloudinit_meta_data: *cloudinit_meta_data
-          #         cloudinit_user_data: *cloudinit_user_data
-
-          #     interfaces:
-          #       - label: enp9s0f0
-          #         l2_network_device: admin
-          #         mac_address: !os_env ETH0_MAC_ADDRESS_PRX01
-          #       - label: enp9s0f1
-          #         mac_address: !os_env ETH1_MAC_ADDRESS_PRX01
-
-          #     network_config:
-          #       enp9s0f0:
-          #         networks:
-          #          - admin
-          #       bond0:
-          #         networks:
-          #          - control
-          #         aggregation: active-backup
-          #         parents:
-          #          - enp9s0f1
-
           - name: {{ HOSTNAME_KVM01 }}
             role: salt_minion
             params:
@@ -415,7 +325,7 @@
                    - enp9s0f1
 
           - name: {{ HOSTNAME_CTL01 }}
-            role: salt_minion
+            role: k8s_controller
             params:
               ipmi_user: !os_env IPMI_USER
               ipmi_password: !os_env IPMI_PASSWORD
@@ -591,12 +501,6 @@
                 enp9s0f0:
                   networks:
                    - admin
-                bond0:
-                  networks:
-                   - control
-                  aggregation: active-backup
-                  parents:
-                   - enp9s0f1
 
           - name: {{ HOSTNAME_CMP002 }}
             role: salt_minion
@@ -638,9 +542,3 @@
                 enp9s0f0:
                   networks:
                    - admin
-                bond0:
-                  networks:
-                   - control
-                  aggregation: active-backup
-                  parents:
-                   - enp9s0f1