Merge ""Remove creating networks from openstack.yaml Q3""
diff --git a/tcp_tests/templates/cookied-bm-contrail40-nfv/master_config.sh b/tcp_tests/templates/cookied-bm-contrail40-nfv/master_config.sh
deleted file mode 100644
index 0fc5723..0000000
--- a/tcp_tests/templates/cookied-bm-contrail40-nfv/master_config.sh
+++ /dev/null
@@ -1,144 +0,0 @@
-#!/bin/bash -xe
-
-export SALT_MASTER_DEPLOY_IP=${SALT_MASTER_DEPLOY_IP:-"172.16.49.66"}
-export SALT_MASTER_MINION_ID=${SALT_MASTER_MINION_ID:-"cfg01.cookied-bm-contrail40-nfv.local"}
-export DEPLOY_NETWORK_GW=${DEPLOY_NETWORK_GW:-"172.16.49.65"}
-export DEPLOY_NETWORK_NETMASK=${DEPLOY_NETWORK_NETMASK:-"255.255.255.192"}
-export DNS_SERVERS=${DNS_SERVERS:-"172.18.208.44"}
-export http_proxy=${http_proxy:-""}
-export https_proxy=${https_proxy:-""}
-export PIPELINES_FROM_ISO=${PIPELINES_FROM_ISO:-"false"}
-export PIPELINE_REPO_URL=${PIPELINE_REPO_URL:-"https://github.com/Mirantis"}
-export MCP_VERSION=${MCP_VERSION:-"proposed"}
-export MCP_SALT_REPO_KEY=${MCP_SALT_REPO_KEY:-"http://apt.mirantis.com/public.gpg"}
-export MCP_SALT_REPO_URL=${MCP_SALT_REPO_URL:-"http://apt.mirantis.com/xenial"}
-export MCP_SALT_REPO="deb [arch=amd64] $MCP_SALT_REPO_URL $MCP_VERSION salt"
-export FORMULAS="salt-formula-*"
-# Not avaible in 2018.4 and pre.
-export LOCAL_REPOS=false
-#for cloning from aptly image use port 8088
-#export PIPELINE_REPO_URL=http://172.16.47.182:8088
-
-function _apt_cfg(){
-  # TODO remove those function after 2018.4 release
-  echo "Acquire::CompressionTypes::Order gz;" >/etc/apt/apt.conf.d/99compression-workaround-salt
-  echo "Acquire::EnableSrvRecords false;" >/etc/apt/apt.conf.d/99enablesrvrecords-false
-  echo "Acquire::http::Pipeline-Depth 0;" > /etc/apt/apt.conf.d/99aws-s3-mirrors-workaround-salt
-  echo "APT::Install-Recommends false;" > /etc/apt/apt.conf.d/99dont_install_recommends-salt
-  echo "APT::Install-Suggests false;" > /etc/apt/apt.conf.d/99dont_install_suggests-salt
-  echo "Acquire::Languages none;" > /etc/apt/apt.conf.d/99dont_acquire_all_languages-salt
-  echo "APT::Periodic::Update-Package-Lists 0;" > /etc/apt/apt.conf.d/99dont_update_package_list-salt
-  echo "APT::Periodic::Download-Upgradeable-Packages 0;" > /etc/apt/apt.conf.d/99dont_update_download_upg_packages-salt
-  echo "APT::Periodic::Unattended-Upgrade 0;" > /etc/apt/apt.conf.d/99disable_unattended_upgrade-salt
-  echo "INFO: cleaning sources lists"
-  rm -rv /etc/apt/sources.list.d/* || true
-  echo > /etc/apt/sources.list  || true
-}
-
-function _post_maas_cfg(){
-  local PROFILE=mirantis
-  # TODO: remove those check, and use only new version, adfter 2018.4 release
-  if [[ -f /var/lib/maas/.maas_login.sh ]]; then
-    /var/lib/maas/.maas_login.sh
-  else
-    echo "WARNING: Attempt to use old maas login schema.."
-    TOKEN=$(cat /var/lib/maas/.maas_credentials);
-    maas list | cut -d' ' -f1 | xargs -I{} maas logout {}
-    maas login $PROFILE http://127.0.0.1:5240/MAAS/api/2.0/ "${TOKEN}"
-  fi
-  # disable backports for maas enlist pkg repo
-  maas ${PROFILE} package-repository update 1 "disabled_pockets=backports"
-  maas ${PROFILE} package-repository update 1 "arches=amd64"
-  # Download ubuntu image from MAAS local mirror
-  if [[ "$LOCAL_REPOS" == "true" ]] ; then
-    maas ${PROFILE} boot-source-selections create 2 os="ubuntu" release="xenial" arches="amd64" subarches="*" labels="*"
-    echo "WARNING: Removing default MAAS stream:"
-    maas ${PROFILE} boot-source read 1
-    maas ${PROFILE} boot-source delete 1
-    maas ${PROFILE} boot-resources import
-    # TODO wait for finish,and stop import.
-  else
-    maas ${PROFILE} boot-source-selections create 1 os="ubuntu" release="xenial" arches="amd64" subarches="*" labels="*"
-    maas ${PROFILE} boot-resources import
-  fi
-  while [ ! -d /var/lib/maas/boot-resources/current/ubuntu/amd64/generic/xenial ]
-  do
-    sleep 10
-    echo "WARNING: Image is still not ready"
-  done
-}
-
-### Body
-echo "Preparing metadata model"
-mount /dev/cdrom /mnt/
-cp -rT /mnt/model/model /srv/salt/reclass
-chown -R root:root /srv/salt/reclass/*
-chown -R root:root /srv/salt/reclass/.git* || true
-chmod -R 644 /srv/salt/reclass/classes/cluster/* || true
-chmod -R 644 /srv/salt/reclass/classes/system/*  || true
-
-echo "Configuring salt"
-#service salt-master restart
-envsubst < /root/minion.conf > /etc/salt/minion.d/minion.conf
-service salt-minion restart
-while true; do
-    salt-key | grep "$SALT_MASTER_MINION_ID" && break
-    sleep 5
-done
-sleep 5
-for i in $(salt-key -l accepted | grep -v Accepted | grep -v "$SALT_MASTER_MINION_ID"); do
-    salt-key -d $i -y
-done
-
-find /var/lib/jenkins/jenkins.model.JenkinsLocationConfiguration.xml -type f -print0 | xargs -0 sed -i -e 's/10.167.4.15/'$SALT_MASTER_DEPLOY_IP'/g'
-
-echo "updating git repos"
-if [[ "$PIPELINES_FROM_ISO" == "true" ]] ; then
-  cp -r /mnt/mk-pipelines/* /home/repo/mk/mk-pipelines/
-  cp -r /mnt/pipeline-library/* /home/repo/mcp-ci/pipeline-library/
-  umount /dev/cdrom || true
-  chown -R git:www-data /home/repo/mk/mk-pipelines/*
-  chown -R git:www-data /home/repo/mcp-ci/pipeline-library/*
-else
-  umount /dev/cdrom || true
-  git clone --mirror "${PIPELINE_REPO_URL}/mk-pipelines.git" /home/repo/mk/mk-pipelines/
-  git clone --mirror "${PIPELINE_REPO_URL}/pipeline-library.git" /home/repo/mcp-ci/pipeline-library/
-  chown -R git:www-data /home/repo/mk/mk-pipelines/*
-  chown -R git:www-data /home/repo/mcp-ci/pipeline-library/*
-fi
-
-echo "installing formulas"
-_apt_cfg
-curl -s $MCP_SALT_REPO_KEY | sudo apt-key add -
-echo $MCP_SALT_REPO > /etc/apt/sources.list.d/mcp_salt.list
-apt-get update
-apt-get install -y $FORMULAS
-rm -rf /srv/salt/reclass/classes/service/*
-cd /srv/salt/reclass/classes/service/;ls /usr/share/salt-formulas/reclass/service/ -1 | xargs -I{} ln -s /usr/share/salt-formulas/reclass/service/{};cd /root
-
-salt-call saltutil.refresh_pillar
-salt-call saltutil.sync_all
-if ! $(reclass -n ${SALT_MASTER_MINION_ID} > /dev/null ) ; then
-  echo "ERROR: Reclass render failed!"
-  exit 1
-fi
-
-salt-call state.sls linux.network,linux,openssh,salt
-salt-call -t5 pkg.install salt-master,salt-minion
-sleep 5
-salt-call state.sls salt
-# Sometimes, maas can stuck :(
-salt-call state.sls maas.cluster,maas.region || salt-call state.sls maas.cluster,maas.region
-salt-call state.sls reclass,ntp
-
-_post_maas_cfg
-salt-call state.sls maas.cluster,maas.region || salt-call state.sls maas.cluster,maas.region
-
-ssh-keyscan cfg01 > /var/lib/jenkins/.ssh/known_hosts || true
-
-pillar=$(salt-call pillar.data jenkins:client)
-
-if [[ $pillar == *"job"* ]]; then
-  salt-call state.sls jenkins.client
-fi
-
diff --git a/tcp_tests/templates/cookied-bm-contrail40-nfv/openstack.yaml b/tcp_tests/templates/cookied-bm-contrail40-nfv/openstack.yaml
index fb217b2..d881a3c 100644
--- a/tcp_tests/templates/cookied-bm-contrail40-nfv/openstack.yaml
+++ b/tcp_tests/templates/cookied-bm-contrail40-nfv/openstack.yaml
@@ -16,7 +16,14 @@
 
 {{ SHARED_OPENSTACK.MACRO_INSTALL_NOVA() }}
 
-{{ SHARED_OPENSTACK.MACRO_INSTALL_CINDER(INSTALL_VOLUME=true) }}
+{{ SHARED_OPENSTACK.MACRO_INSTALL_CINDER(INSTALL_VOLUME=false) }}
+
+- description: WR Install cinder volume
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@cinder:volume' state.sls cinder
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 5}
+  skip_fail: false
 
 {{ SHARED_OPENSTACK.MACRO_INSTALL_NEUTRON(INSTALL_GATEWAY=false) }}
 
diff --git a/tcp_tests/templates/cookied-bm-contrail40-nfv/salt-context-cookiecutter-contrail-nfv.yaml b/tcp_tests/templates/cookied-bm-contrail40-nfv/salt-context-cookiecutter-contrail-ocata.yaml
similarity index 98%
rename from tcp_tests/templates/cookied-bm-contrail40-nfv/salt-context-cookiecutter-contrail-nfv.yaml
rename to tcp_tests/templates/cookied-bm-contrail40-nfv/salt-context-cookiecutter-contrail-ocata.yaml
index 8cb8de7..50da2d4 100644
--- a/tcp_tests/templates/cookied-bm-contrail40-nfv/salt-context-cookiecutter-contrail-nfv.yaml
+++ b/tcp_tests/templates/cookied-bm-contrail40-nfv/salt-context-cookiecutter-contrail-ocata.yaml
@@ -30,6 +30,7 @@
   backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCmrlbGRqC+Q1Y7zkW1LUHIcMqQ1aGYV2lj0Pj11mtLC4T1cZD5Zbv0XYAbAqSLY3aPWLJ+kwQpEgQhv/lxuXp9nNMZOd2F3snGTHrEZ2FmRf1Uzo2qi1o7hySPs2x+JkpPCnnz9hJGTPciLVc+m3Q1Cpku40DC6zgGX17VaTl03D6Ac3ebgmYBzMltDzWWtBRELt/d3bGOgWRdZcLYUqQxLoA6XWPzsBN55Ib3F4cts68jIHDsjA/0wUDm9S0eXwPOx2bNaZVFpt7ZUy0ZrVhrklNOArydea1hbd28RD8O2DNwo8nZ87fVzN70tGkNJLQTp39whIGDOw98Em5QIYdN
   bmk_enabled: 'False'
   ceph_enabled: 'False'
+  opencontrail_compute_iface: enp5s0f0
   openstack_nfv_dpdk_enabled: 'True'
   openstack_nfv_sriov_enabled: 'True'
   openstack_nfv_sriov_network: physnet1
@@ -153,7 +154,6 @@
   opencontrail_router01_hostname: rtr01
   opencontrail_router02_address: 10.167.8.101
   opencontrail_router02_hostname: rtr02
-  openldap_enabled: 'False'
   openssh_groups: ''
   openstack_benchmark_node01_address: 10.167.8.95
   openstack_benchmark_node01_hostname: bmk01
@@ -190,9 +190,6 @@
   openstack_network_engine: opencontrail
   openstack_neutron_bgp_vpn: 'False'
   openstack_neutron_bgp_vpn_driver: bagpipe
-  openstack_nfv_dpdk_enabled: 'False'
-  openstack_nfv_sriov_enabled: 'False'
-  openstack_nova_compute_nfv_req_enabled: 'False'
   openstack_nova_compute_reserved_host_memory_mb: '900'
   openstack_proxy_address: 10.167.8.80
   openstack_proxy_hostname: prx
@@ -201,7 +198,7 @@
   openstack_proxy_node02_address: 10.167.8.82
   openstack_proxy_node02_hostname: prx02
   openstack_upgrade_node01_address: 10.167.8.19
-  openstack_version: pike
+  openstack_version: ocata
   oss_enabled: 'False'
   oss_node03_address: ${_param:stacklight_monitor_node03_address}
   oss_webhook_app_id: '24'
diff --git a/tcp_tests/templates/cookied-bm-contrail40-nfv/salt.yaml b/tcp_tests/templates/cookied-bm-contrail40-nfv/salt.yaml
index 1ce4082..f8a1d9a 100644
--- a/tcp_tests/templates/cookied-bm-contrail40-nfv/salt.yaml
+++ b/tcp_tests/templates/cookied-bm-contrail40-nfv/salt.yaml
@@ -8,7 +8,7 @@
 # Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
 {% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME','physical-cookied-bm-contrail40-nfv') %}
 # Path to the context files used to render Cluster and Environment models
-{%- set CLUSTER_CONTEXT_NAME = 'salt-context-cookiecutter-contrail.yaml' %}
+{%- set CLUSTER_CONTEXT_NAME = os_env('CLUSTER_CONTEXT_NAME', 'salt-context-cookiecutter-contrail.yaml') %}
 {%- set ENVIRONMENT_CONTEXT_NAMES = ['salt-context-environment.yaml','lab04-physical-inventory.yaml'] %}
 {%- set CONTROL_VLAN = os_env('CONTROL_VLAN', '2422') %}
 {%- set TENANT_VLAN = os_env('TENANT_VLAN', '2423') %}
diff --git a/tcp_tests/templates/cookied-bm-contrail40/master_config.sh b/tcp_tests/templates/cookied-bm-contrail40/master_config.sh
deleted file mode 100644
index a250862..0000000
--- a/tcp_tests/templates/cookied-bm-contrail40/master_config.sh
+++ /dev/null
@@ -1,144 +0,0 @@
-#!/bin/bash -xe
-
-export SALT_MASTER_DEPLOY_IP=${SALT_MASTER_DEPLOY_IP:-"172.16.49.66"}
-export SALT_MASTER_MINION_ID=${SALT_MASTER_MINION_ID:-"cfg01.cookied-bm-contrail40.local"}
-export DEPLOY_NETWORK_GW=${DEPLOY_NETWORK_GW:-"172.16.49.65"}
-export DEPLOY_NETWORK_NETMASK=${DEPLOY_NETWORK_NETMASK:-"255.255.255.192"}
-export DNS_SERVERS=${DNS_SERVERS:-"172.18.208.44"}
-export http_proxy=${http_proxy:-""}
-export https_proxy=${https_proxy:-""}
-export PIPELINES_FROM_ISO=${PIPELINES_FROM_ISO:-"false"}
-export PIPELINE_REPO_URL=${PIPELINE_REPO_URL:-"https://github.com/Mirantis"}
-export MCP_VERSION=${MCP_VERSION:-"proposed"}
-export MCP_SALT_REPO_KEY=${MCP_SALT_REPO_KEY:-"http://apt.mirantis.com/public.gpg"}
-export MCP_SALT_REPO_URL=${MCP_SALT_REPO_URL:-"http://apt.mirantis.com/xenial"}
-export MCP_SALT_REPO="deb [arch=amd64] $MCP_SALT_REPO_URL $MCP_VERSION salt"
-export FORMULAS="salt-formula-*"
-# Not avaible in 2018.4 and pre.
-export LOCAL_REPOS=false
-#for cloning from aptly image use port 8088
-#export PIPELINE_REPO_URL=http://172.16.47.182:8088
-
-function _apt_cfg(){
-  # TODO remove those function after 2018.4 release
-  echo "Acquire::CompressionTypes::Order gz;" >/etc/apt/apt.conf.d/99compression-workaround-salt
-  echo "Acquire::EnableSrvRecords false;" >/etc/apt/apt.conf.d/99enablesrvrecords-false
-  echo "Acquire::http::Pipeline-Depth 0;" > /etc/apt/apt.conf.d/99aws-s3-mirrors-workaround-salt
-  echo "APT::Install-Recommends false;" > /etc/apt/apt.conf.d/99dont_install_recommends-salt
-  echo "APT::Install-Suggests false;" > /etc/apt/apt.conf.d/99dont_install_suggests-salt
-  echo "Acquire::Languages none;" > /etc/apt/apt.conf.d/99dont_acquire_all_languages-salt
-  echo "APT::Periodic::Update-Package-Lists 0;" > /etc/apt/apt.conf.d/99dont_update_package_list-salt
-  echo "APT::Periodic::Download-Upgradeable-Packages 0;" > /etc/apt/apt.conf.d/99dont_update_download_upg_packages-salt
-  echo "APT::Periodic::Unattended-Upgrade 0;" > /etc/apt/apt.conf.d/99disable_unattended_upgrade-salt
-  echo "INFO: cleaning sources lists"
-  rm -rv /etc/apt/sources.list.d/* || true
-  echo > /etc/apt/sources.list  || true
-}
-
-function _post_maas_cfg(){
-  local PROFILE=mirantis
-  # TODO: remove those check, and use only new version, adfter 2018.4 release
-  if [[ -f /var/lib/maas/.maas_login.sh ]]; then
-    /var/lib/maas/.maas_login.sh
-  else
-    echo "WARNING: Attempt to use old maas login schema.."
-    TOKEN=$(cat /var/lib/maas/.maas_credentials);
-    maas list | cut -d' ' -f1 | xargs -I{} maas logout {}
-    maas login $PROFILE http://127.0.0.1:5240/MAAS/api/2.0/ "${TOKEN}"
-  fi
-  # disable backports for maas enlist pkg repo
-  maas ${PROFILE} package-repository update 1 "disabled_pockets=backports"
-  maas ${PROFILE} package-repository update 1 "arches=amd64"
-  # Download ubuntu image from MAAS local mirror
-  if [[ "$LOCAL_REPOS" == "true" ]] ; then
-    maas ${PROFILE} boot-source-selections create 2 os="ubuntu" release="xenial" arches="amd64" subarches="*" labels="*"
-    echo "WARNING: Removing default MAAS stream:"
-    maas ${PROFILE} boot-source read 1
-    maas ${PROFILE} boot-source delete 1
-    maas ${PROFILE} boot-resources import
-    # TODO wait for finish,and stop import.
-  else
-    maas ${PROFILE} boot-source-selections create 1 os="ubuntu" release="xenial" arches="amd64" subarches="*" labels="*"
-    maas ${PROFILE} boot-resources import
-  fi
-  while [ ! -d /var/lib/maas/boot-resources/current/ubuntu/amd64/generic/xenial ]
-  do
-    sleep 10
-    echo "WARNING: Image is still not ready"
-  done
-}
-
-### Body
-echo "Preparing metadata model"
-mount /dev/cdrom /mnt/
-cp -rT /mnt/model/model /srv/salt/reclass
-chown -R root:root /srv/salt/reclass/*
-chown -R root:root /srv/salt/reclass/.git* || true
-chmod -R 644 /srv/salt/reclass/classes/cluster/* || true
-chmod -R 644 /srv/salt/reclass/classes/system/*  || true
-
-echo "Configuring salt"
-#service salt-master restart
-envsubst < /root/minion.conf > /etc/salt/minion.d/minion.conf
-service salt-minion restart
-while true; do
-    salt-key | grep "$SALT_MASTER_MINION_ID" && break
-    sleep 5
-done
-sleep 5
-for i in $(salt-key -l accepted | grep -v Accepted | grep -v "$SALT_MASTER_MINION_ID"); do
-    salt-key -d $i -y
-done
-
-find /var/lib/jenkins/jenkins.model.JenkinsLocationConfiguration.xml -type f -print0 | xargs -0 sed -i -e 's/10.167.4.15/'$SALT_MASTER_DEPLOY_IP'/g'
-
-echo "updating git repos"
-if [[ "$PIPELINES_FROM_ISO" == "true" ]] ; then
-  cp -r /mnt/mk-pipelines/* /home/repo/mk/mk-pipelines/
-  cp -r /mnt/pipeline-library/* /home/repo/mcp-ci/pipeline-library/
-  umount /dev/cdrom || true
-  chown -R git:www-data /home/repo/mk/mk-pipelines/*
-  chown -R git:www-data /home/repo/mcp-ci/pipeline-library/*
-else
-  umount /dev/cdrom || true
-  git clone --mirror "${PIPELINE_REPO_URL}/mk-pipelines.git" /home/repo/mk/mk-pipelines/
-  git clone --mirror "${PIPELINE_REPO_URL}/pipeline-library.git" /home/repo/mcp-ci/pipeline-library/
-  chown -R git:www-data /home/repo/mk/mk-pipelines/*
-  chown -R git:www-data /home/repo/mcp-ci/pipeline-library/*
-fi
-
-echo "installing formulas"
-_apt_cfg
-curl -s $MCP_SALT_REPO_KEY | sudo apt-key add -
-echo $MCP_SALT_REPO > /etc/apt/sources.list.d/mcp_salt.list
-apt-get update
-apt-get install -y $FORMULAS
-rm -rf /srv/salt/reclass/classes/service/*
-cd /srv/salt/reclass/classes/service/;ls /usr/share/salt-formulas/reclass/service/ -1 | xargs -I{} ln -s /usr/share/salt-formulas/reclass/service/{};cd /root
-
-salt-call saltutil.refresh_pillar
-salt-call saltutil.sync_all
-if ! $(reclass -n ${SALT_MASTER_MINION_ID} > /dev/null ) ; then
-  echo "ERROR: Reclass render failed!"
-  exit 1
-fi
-
-salt-call state.sls linux.network,linux,openssh,salt
-salt-call -t5 pkg.install salt-master,salt-minion
-sleep 5
-salt-call state.sls salt
-# Sometimes, maas can stuck :(
-salt-call state.sls maas.cluster,maas.region || salt-call state.sls maas.cluster,maas.region
-salt-call state.sls reclass,ntp
-
-_post_maas_cfg
-salt-call state.sls maas.cluster,maas.region || salt-call state.sls maas.cluster,maas.region
-
-ssh-keyscan cfg01 > /var/lib/jenkins/.ssh/known_hosts || true
-
-pillar=$(salt-call pillar.data jenkins:client)
-
-if [[ $pillar == *"job"* ]]; then
-  salt-call state.sls jenkins.client
-fi
-
diff --git a/tcp_tests/templates/cookied-bm-contrail40/openstack.yaml b/tcp_tests/templates/cookied-bm-contrail40/openstack.yaml
index 1bd831a..511fb1f 100644
--- a/tcp_tests/templates/cookied-bm-contrail40/openstack.yaml
+++ b/tcp_tests/templates/cookied-bm-contrail40/openstack.yaml
@@ -16,7 +16,14 @@
 
 {{ SHARED_OPENSTACK.MACRO_INSTALL_NOVA() }}
 
-{{ SHARED_OPENSTACK.MACRO_INSTALL_CINDER(INSTALL_VOLUME=true) }}
+{{ SHARED_OPENSTACK.MACRO_INSTALL_CINDER(INSTALL_VOLUME=false) }}
+
+- description: WR Install cinder volume
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@cinder:volume' state.sls cinder
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 5}
+  skip_fail: false
 
 {{ SHARED_OPENSTACK.MACRO_INSTALL_NEUTRON(INSTALL_GATEWAY=false) }}
 
diff --git a/tcp_tests/templates/cookied-bm-contrail40-nfv/salt-context-cookiecutter-contrail-nfv.yaml b/tcp_tests/templates/cookied-bm-contrail40/salt-context-cookiecutter-contrail-ocata.yaml
similarity index 95%
copy from tcp_tests/templates/cookied-bm-contrail40-nfv/salt-context-cookiecutter-contrail-nfv.yaml
copy to tcp_tests/templates/cookied-bm-contrail40/salt-context-cookiecutter-contrail-ocata.yaml
index 8cb8de7..c4470e6 100644
--- a/tcp_tests/templates/cookied-bm-contrail40-nfv/salt-context-cookiecutter-contrail-nfv.yaml
+++ b/tcp_tests/templates/cookied-bm-contrail40/salt-context-cookiecutter-contrail-ocata.yaml
@@ -30,15 +30,6 @@
   backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCmrlbGRqC+Q1Y7zkW1LUHIcMqQ1aGYV2lj0Pj11mtLC4T1cZD5Zbv0XYAbAqSLY3aPWLJ+kwQpEgQhv/lxuXp9nNMZOd2F3snGTHrEZ2FmRf1Uzo2qi1o7hySPs2x+JkpPCnnz9hJGTPciLVc+m3Q1Cpku40DC6zgGX17VaTl03D6Ac3ebgmYBzMltDzWWtBRELt/d3bGOgWRdZcLYUqQxLoA6XWPzsBN55Ib3F4cts68jIHDsjA/0wUDm9S0eXwPOx2bNaZVFpt7ZUy0ZrVhrklNOArydea1hbd28RD8O2DNwo8nZ87fVzN70tGkNJLQTp39whIGDOw98Em5QIYdN
   bmk_enabled: 'False'
   ceph_enabled: 'False'
-  openstack_nfv_dpdk_enabled: 'True'
-  openstack_nfv_sriov_enabled: 'True'
-  openstack_nfv_sriov_network: physnet1
-  openstack_nfv_sriov_numvfs: '7'
-  openstack_nfv_sriov_pf_nic: enp5s0f1
-  openstack_nova_compute_hugepages_count: '16'
-  openstack_nova_compute_nfv_req_enabled: 'True'
-  openstack_nova_cpu_pinning: 6,7,8,9,10,11
-
   cicd_control_node01_address: 10.167.8.91
   cicd_control_node01_hostname: cid01
   cicd_control_node02_address: 10.167.8.92
@@ -91,7 +82,7 @@
   control_vlan: '2422'
   cookiecutter_template_branch: ''
   cookiecutter_template_credentials: gerrit
-  cookiecutter_template_url: https://gerrit.mcp.mirantis.com/mk/cookiecutter-templates.git
+  cookiecutter_template_url: https://gerrit.mcp.mirantis.net/mk/cookiecutter-templates.git
   deploy_network_gateway: 172.16.49.65
   deploy_network_netmask: 255.255.255.192
   deploy_network_subnet: 172.16.49.64/26
@@ -201,7 +192,7 @@
   openstack_proxy_node02_address: 10.167.8.82
   openstack_proxy_node02_hostname: prx02
   openstack_upgrade_node01_address: 10.167.8.19
-  openstack_version: pike
+  openstack_version: ocata
   oss_enabled: 'False'
   oss_node03_address: ${_param:stacklight_monitor_node03_address}
   oss_webhook_app_id: '24'
@@ -216,7 +207,7 @@
   salt_master_hostname: cfg01
   salt_master_management_address: 172.16.49.66
   shared_reclass_branch: ''
-  shared_reclass_url: https://gerrit.mcp.mirantis.com/salt-models/reclass-system.git
+  shared_reclass_url: https://gerrit.mcp.mirantis.net/salt-models/reclass-system.git
   stacklight_enabled: 'True'
   stacklight_log_address: 10.167.8.60
   stacklight_log_hostname: log
@@ -226,7 +217,7 @@
   stacklight_log_node02_hostname: log02
   stacklight_log_node03_address: 10.167.8.63
   stacklight_log_node03_hostname: log03
-  stacklight_long_term_storage_type: influxdb
+  stacklight_long_term_storage_type: prometheus
   stacklight_monitor_address: 10.167.8.70
   stacklight_monitor_hostname: mon
   stacklight_monitor_node01_address: 10.167.8.71
diff --git a/tcp_tests/templates/cookied-bm-contrail40/salt.yaml b/tcp_tests/templates/cookied-bm-contrail40/salt.yaml
index 81c3307..9319634 100644
--- a/tcp_tests/templates/cookied-bm-contrail40/salt.yaml
+++ b/tcp_tests/templates/cookied-bm-contrail40/salt.yaml
@@ -8,7 +8,7 @@
 # Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
 {% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME','physical-cookied-bm-contrail40') %}
 # Path to the context files used to render Cluster and Environment models
-{%- set CLUSTER_CONTEXT_NAME = 'salt-context-cookiecutter-contrail.yaml' %}
+{%- set CLUSTER_CONTEXT_NAME = os_env('CLUSTER_CONTEXT_NAME', 'salt-context-cookiecutter-contrail.yaml') %}
 {%- set ENVIRONMENT_CONTEXT_NAMES = ['salt-context-environment.yaml','lab04-physical-inventory.yaml'] %}
 {%- set CONTROL_VLAN = os_env('CONTROL_VLAN', '2422') %}
 {%- set TENANT_VLAN = os_env('TENANT_VLAN', '2423') %}
diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/openstack.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/openstack.yaml
index 089a255..cc658ba 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/openstack.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/openstack.yaml
@@ -15,7 +15,14 @@
 
 {{ SHARED_OPENSTACK.MACRO_INSTALL_NOVA() }}
 
-{{ SHARED_OPENSTACK.MACRO_INSTALL_CINDER(INSTALL_VOLUME=true) }}
+{{ SHARED_OPENSTACK.MACRO_INSTALL_CINDER(INSTALL_VOLUME=false) }}
+
+- description: WR Install cinder volume
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@cinder:volume' state.sls cinder
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 5}
+  skip_fail: false
 
 {{ SHARED_OPENSTACK.MACRO_INSTALL_NEUTRON(INSTALL_GATEWAY=false) }}
 
diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/sl.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/sl.yaml
index 37082f6..5ba2e3f 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/sl.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/sl.yaml
@@ -1,4 +1,5 @@
 {% from 'cookied-bm-mcp-ocata-contrail-nfv/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% import 'shared-sl-tests.yaml' as SHARED_SL_TESTS with context %}
 
 # Install docker swarm
 - description: Configure docker service
@@ -65,36 +66,31 @@
   retry: {count: 1, delay: 5}
   skip_fail: false
 
+- description: Install keepalived on mon nodes
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@glusterfs:client' state.sls glusterfs.client
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 15}
+  skip_fail: false
+
 # Install slv2 infra
 #Launch containers
 - description: Install Mongo if target matches
   cmd: |
     if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' match.pillar 'mongodb:server' ; then
-      salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' state.sls mongodb
+      salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' state.sls mongodb.server
     fi
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
-- description: Configure Alerta if it is exists
+- description: Install Mongo if target matches
   cmd: |
-    if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:alerta' match.pillar 'prometheus:alerta' ; then
-      salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:alerta' state.sls prometheus.alerta
+    if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' match.pillar 'mongodb:server' ; then
+      salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' state.sls mongodb.cluster
     fi
   node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-- description: launch prometheus containers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master and I@prometheus:server' state.sls docker.client
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 10}
-  skip_fail: false
-
-- description: Check docker ps
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:server' cmd.run "docker ps"
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 10}
+  retry: {count: 5, delay: 20}
   skip_fail: false
 
 - description: Install telegraf
@@ -113,19 +109,31 @@
   skip_fail: false
 
 - description: Install elasticsearch server
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:server' state.sls elasticsearch.server -b 1
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:server:enabled:true and *01*' state.sls elasticsearch.server
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Install elasticsearch server
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:server:enabled:true' state.sls elasticsearch.server
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: Install kibana server
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:server' state.sls kibana.server -b 1
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:server:enabled:true and *01*' state.sls kibana.server
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Install kibana server
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:server:enabled:true' state.sls kibana.server
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: Install elasticsearch client
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:client' state.sls elasticsearch.client
+  cmd: sleep 30; salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:client' state.sls elasticsearch.client
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 2, delay: 30}
   skip_fail: false
@@ -141,23 +149,12 @@
     INFLUXDB_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' test.ping 1>/dev/null 2>&1 && echo true`;
     echo "Influxdb service presence: ${INFLUXDB_SERVICE}";
     if [[ "$INFLUXDB_SERVICE" == "true" ]]; then
-        salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' state.sls influxdb
+        salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server and *01*' state.sls influxdb;
+        salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' state.sls influxdb;
     fi
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
-  skip_fail: true
-
-# Install Prometheus LTS(optional if set in model)
-- description: Prometheus LTS(optional if set in model)
-  cmd: |
-    PROMETHEUS_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:relay' test.ping 1>/dev/null 2>&1 && echo true`;
-    echo "PROMETHEUS rely service presence: ${PROMETHEUS_SERVICE}";
-    if [[ "$PROMETHEUS_SERVICE" == "true" ]]; then
-        salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:relay' state.sls prometheus
-    fi
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: true
+  skip_fail: false
 
 # Install service for the log collection
 - description: Configure fluentd
@@ -186,31 +183,23 @@
   retry: {count: 1, delay: 10}
   skip_fail: false
 
-# Collect grains needed to configure the services
+  ######################################
+  ######################################
+  ######################################
 
-- description: Get grains
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' state.sls salt.minion.grains
+- description: Collect Grains
+  cmd: |
+    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' state.sls salt.minion.grains;
+    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' saltutil.refresh_modules;
+    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' mine.update
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
-- description: Sync modules
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' saltutil.refresh_modules
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-- description: Update mine
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' mine.update; sleep 5;
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 5, delay: 15}
-  skip_fail: false
-
-# Configure the services running in Docker Swarm
-- description: Configure prometheus in docker swarm
+- description: Check docker ps
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:server' state.sls prometheus
   node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
+  retry: {count: 2, delay: 10}
   skip_fail: false
 
 - description: Configure Remote Collector in Docker Swarm for Openstack deployments
@@ -219,39 +208,38 @@
   retry: {count: 1, delay: 10}
   skip_fail: false
 
-- description: Install sphinx
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@sphinx:server' state.sls sphinx
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-
-#- description: Install prometheus alertmanager
-#  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls prometheus,heka.remote_collector -b 1
-#  node_name: {{ HOSTNAME_CFG01 }}
-#  retry: {count: 1, delay: 10}
-#  skip_fail: false
-
-#- description: run docker state
-#  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker
-#  node_name: {{ HOSTNAME_CFG01 }}
-#  retry: {count: 1, delay: 10}
-#  skip_fail: false
-#
-#- description: docker ps
-#  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' dockerng.ps
-#  node_name: {{ HOSTNAME_CFG01 }}
-#  retry: {count: 1, delay: 10}
-#  skip_fail: false
-
-- description: Configure Grafana dashboards and datasources
-  cmd: sleep 30;  salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@grafana:client' state.sls grafana.client
+- description: launch prometheus containers
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master and I@prometheus:server' state.sls docker.client
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 2, delay: 10}
   skip_fail: false
 
-- description: Run salt minion to create cert files
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False "*" state.sls salt.minion
+- description: Check docker ps
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:server' cmd.run "docker ps"
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
+
+- description: Configure Grafana dashboards and datasources
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@grafana:client' state.sls grafana.client
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 5, delay: 60}
+  skip_fail: false
+
+- description: Configure Alerta if it is exists
+  cmd: |
+    if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:alerta' match.pillar 'prometheus:alerta' ; then
+      salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:alerta' state.sls prometheus.alerta
+    fi
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Run salt minion to create cert files
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False "*" state.sls salt.minion.cert
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: true
+
+{{  SHARED_SL_TESTS.MACRO_CLONE_SL_TESTS() }}
+{{  SHARED_SL_TESTS.MACRO_CONFIGURE_TESTS() }}
diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/openstack.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/openstack.yaml
index 013fb29..c6a9449 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/openstack.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/openstack.yaml
@@ -15,7 +15,14 @@
 
 {{ SHARED_OPENSTACK.MACRO_INSTALL_NOVA() }}
 
-{{ SHARED_OPENSTACK.MACRO_INSTALL_CINDER(INSTALL_VOLUME=true) }}
+{{ SHARED_OPENSTACK.MACRO_INSTALL_CINDER(INSTALL_VOLUME=false) }}
+
+- description: WR Install cinder volume
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@cinder:volume' state.sls cinder
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 5}
+  skip_fail: false
 
 {{ SHARED_OPENSTACK.MACRO_INSTALL_NEUTRON(INSTALL_GATEWAY=false) }}
 
diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/sl.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/sl.yaml
index 925c795..0a3867a 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/sl.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/sl.yaml
@@ -1,4 +1,5 @@
 {% from 'cookied-bm-mcp-ocata-contrail/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% import 'shared-sl-tests.yaml' as SHARED_SL_TESTS with context %}
 
 # Install docker swarm
 - description: Configure docker service
@@ -65,36 +66,31 @@
   retry: {count: 1, delay: 5}
   skip_fail: false
 
+- description: Install keepalived on mon nodes
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@glusterfs:client' state.sls glusterfs.client
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 15}
+  skip_fail: false
+
 # Install slv2 infra
 #Launch containers
 - description: Install Mongo if target matches
   cmd: |
     if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' match.pillar 'mongodb:server' ; then
-      salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' state.sls mongodb
-    fi
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 30}
-  skip_fail: false
-
-- description: Configure Alerta if it is exists
-  cmd: |
-    if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:alerta' match.pillar 'prometheus:alerta' ; then
-      salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:alerta' state.sls prometheus.alerta
+      salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' state.sls mongodb.server
     fi
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
-- description: launch prometheus containers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master and I@prometheus:server' state.sls docker.client
+- description: Install Mongo if target matches
+  cmd: |
+    if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' match.pillar 'mongodb:server' ; then
+      salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' state.sls mongodb.cluster
+    fi
   node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 10}
-  skip_fail: false
-
-- description: Check docker ps
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:server' cmd.run "docker ps"
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 10}
+  retry: {count: 5, delay: 20}
   skip_fail: false
 
 - description: Install telegraf
@@ -112,29 +108,32 @@
   retry: {count: 1, delay: 10}
   skip_fail: false
 
-- description: Configure Prometheus exporters, if pillar 'prometheus:collector' exists on any server
-  cmd: |
-    if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:collector' match.pillar 'prometheus:collector' ; then
-      salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:collector' state.sls prometheus.collector
-    fi
+- description: Install elasticsearch server
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:server:enabled:true and *01*' state.sls elasticsearch.server
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: Install elasticsearch server
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:server' state.sls elasticsearch.server -b 1
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:server:enabled:true' state.sls elasticsearch.server
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: Install kibana server
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:server' state.sls kibana.server -b 1
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:server:enabled:true and *01*' state.sls kibana.server
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Install kibana server
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:server:enabled:true' state.sls kibana.server
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: Install elasticsearch client
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:client' state.sls elasticsearch.client
+  cmd: sleep 30; salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:client' state.sls elasticsearch.client
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 2, delay: 30}
   skip_fail: false
@@ -150,23 +149,12 @@
     INFLUXDB_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' test.ping 1>/dev/null 2>&1 && echo true`;
     echo "Influxdb service presence: ${INFLUXDB_SERVICE}";
     if [[ "$INFLUXDB_SERVICE" == "true" ]]; then
-        salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' state.sls influxdb
+        salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server and *01*' state.sls influxdb;
+        salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' state.sls influxdb;
     fi
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
-  skip_fail: true
-
-# Install Prometheus LTS(optional if set in model)
-- description: Prometheus LTS(optional if set in model)
-  cmd: |
-    PROMETHEUS_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:relay' test.ping 1>/dev/null 2>&1 && echo true`;
-    echo "PROMETHEUS rely service presence: ${PROMETHEUS_SERVICE}";
-    if [[ "$PROMETHEUS_SERVICE" == "true" ]]; then
-        salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:relay' state.sls prometheus
-    fi
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: true
+  skip_fail: false
 
 # Install service for the log collection
 - description: Configure fluentd
@@ -195,31 +183,23 @@
   retry: {count: 1, delay: 10}
   skip_fail: false
 
-# Collect grains needed to configure the services
+  ######################################
+  ######################################
+  ######################################
 
-- description: Get grains
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' state.sls salt.minion.grains
+- description: Collect Grains
+  cmd: |
+    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' state.sls salt.minion.grains;
+    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' saltutil.refresh_modules;
+    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' mine.update
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
-- description: Sync modules
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' saltutil.refresh_modules
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-- description: Update mine
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' mine.update; sleep 5;
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 5, delay: 15}
-  skip_fail: false
-
-# Configure the services running in Docker Swarm
-- description: Configure prometheus in docker swarm
+- description: Check docker ps
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:server' state.sls prometheus
   node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
+  retry: {count: 2, delay: 10}
   skip_fail: false
 
 - description: Configure Remote Collector in Docker Swarm for Openstack deployments
@@ -228,39 +208,38 @@
   retry: {count: 1, delay: 10}
   skip_fail: false
 
-- description: Install sphinx
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@sphinx:server' state.sls sphinx
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-
-#- description: Install prometheus alertmanager
-#  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls prometheus,heka.remote_collector -b 1
-#  node_name: {{ HOSTNAME_CFG01 }}
-#  retry: {count: 1, delay: 10}
-#  skip_fail: false
-
-#- description: run docker state
-#  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker
-#  node_name: {{ HOSTNAME_CFG01 }}
-#  retry: {count: 1, delay: 10}
-#  skip_fail: false
-#
-#- description: docker ps
-#  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' dockerng.ps
-#  node_name: {{ HOSTNAME_CFG01 }}
-#  retry: {count: 1, delay: 10}
-#  skip_fail: false
-
-- description: Configure Grafana dashboards and datasources
-  cmd: sleep 30;  salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@grafana:client' state.sls grafana.client
+- description: launch prometheus containers
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master and I@prometheus:server' state.sls docker.client
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 2, delay: 10}
   skip_fail: false
 
-- description: Run salt minion to create cert files
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False "*" state.sls salt.minion
+- description: Check docker ps
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:server' cmd.run "docker ps"
   node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 3, delay: 15}
+  retry: {count: 1, delay: 10}
   skip_fail: false
+
+- description: Configure Grafana dashboards and datasources
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@grafana:client' state.sls grafana.client
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 5, delay: 60}
+  skip_fail: false
+
+- description: Configure Alerta if it is exists
+  cmd: |
+    if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:alerta' match.pillar 'prometheus:alerta' ; then
+      salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:alerta' state.sls prometheus.alerta
+    fi
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Run salt minion to create cert files
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False "*" state.sls salt.minion.cert
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: true
+
+{{  SHARED_SL_TESTS.MACRO_CLONE_SL_TESTS() }}
+{{  SHARED_SL_TESTS.MACRO_CONFIGURE_TESTS() }}
diff --git a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/k8s.yaml b/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/k8s.yaml
index d559d73..308051a 100644
--- a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/k8s.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/k8s.yaml
@@ -1,4 +1,17 @@
 {% from 'cookied-bm-mcp-pike-k8s-contrail/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-bm-mcp-pike-k8s-contrail/underlay.yaml' import HOSTNAME_CTL01 with context %}
+
+{%- macro MACRO_CHECK_SYSTEMCTL() %}
+{#######################################}
+- description: Check systemctl on compute
+  cmd: |
+    set -ex;
+    salt 'cmp*' cmd.run "systemctl --version";
+    salt 'cmp*' cmd.run "journalctl -u dbus";
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 1}
+  skip_fail: true
+{%- endmacro %}
 
 - description:  Install keepalived on primary controller
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False
@@ -25,21 +38,86 @@
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False
     -C 'I@etcd:server' state.sls etcd.server.service
   node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
+  retry: {count: 3, delay: 30}
   skip_fail: false
 
 - description: Install certs
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@etcd:server' salt.minion -b 1
+    -C 'I@etcd:server' state.sls salt.minion -b 1
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 2, delay: 5}
   skip_fail: false
 
-- description: Install etcd
+# Install opencontrail database services
+- description: Install opencontrail database services for 01
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@etcd:server' state.sls etcd.server.service
+    -C 'I@opencontrail:database and *01*' state.sls opencontrail.database
   node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
+  retry: {count: 2, delay: 30}
+  skip_fail: false
+
+- description: Install opencontrail database services
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@opencontrail:database' state.sls opencontrail.database
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 30}
+  skip_fail: false
+
+# Install opencontrail control services
+- description: Install opencontrail services for 01
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@opencontrail:control and *01*' state.sls opencontrail exclude=opencontrail.client
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 30}
+  skip_fail: false
+
+- description: Install opencontrail services
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@opencontrail:control' state.sls opencontrail exclude=opencontrail.client
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 30}
+  skip_fail: false
+
+- description: Install docker host
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@kubernetes:master' state.sls salt.minion.cert
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 30}
+  skip_fail: false
+
+- description: Install docker host
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@docker:host' state.sls docker.host
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 30}
+  skip_fail: false
+
+  #- description: Configure OpenContrail as an add-on for Kubernetes
+  #  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+  #    -C 'I@kubernetes:master' state.sls kubernetes.master.kube-addons
+  #  node_name: {{ HOSTNAME_CFG01 }}
+  #  retry: {count: 1, delay: 5}
+  #  skip_fail: false
+
+- description: Install Kubernetes components
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@kubernetes:master' state.sls kubernetes.pool
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: "Run k8s master at *01* to simplify namespaces creation"
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+     -C 'I@kubernetes:master and *01*' state.sls kubernetes.master exclude=kubernetes.master.setup,kubernetes.master.kube-addons
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 15}
+  skip_fail: false
+
+- description: Run k8s without master.setup
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+     -C 'I@kubernetes:master' state.sls kubernetes exclude=kubernetes.master.setup,kubernetes.master.kube-addons
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 15}
   skip_fail: false
 
 - description: Check the etcd health
@@ -49,51 +127,88 @@
   retry: {count: 1, delay: 5}
   skip_fail: false
 
-- description: Install Kubernetes Addons
+{{ MACRO_CHECK_SYSTEMCTL() }}
+
+- description: Run Kubernetes master setup
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@kubernetes:master' state.sls kubernetes.master.kube-addons
+     -C 'I@kubernetes:master and *01*' state.sls kubernetes.master.setup
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
   skip_fail: false
 
-- description: Install Kubernetes components
+- description: Restart Kubelet
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@kubernetes:pool' state.sls kubernetes.pool
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 5, delay: 60}
-  skip_fail: false
-
-# Opencontrail Control Plane
-
-- description: Create configuration files for OpenContrail
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@opencontrail:control' state.sls opencontrail exclude=opencontrail.client
+    -C 'I@kubernetes:master' service.restart 'kubelet'
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
   skip_fail: false
 
-- description: Configure OpenContrail as an add-on for Kubernetes
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@kubernetes:master' state.sls kubernetes.master.kube-addons
-  node_name: {{ HOSTNAME_CFG01 }}
+- description: Waiting for contrail-containers up. opencontrail.client state should be run only after that
+  cmd: |
+    sleep 30;
+    total_pods=`kubectl get pods --all-namespaces | awk '/opencontrail/ {print $3}' | cut -d "/" -f2`
+    for i in `seq 1 10`; do
+      ready_pods=`kubectl get pods --all-namespaces | awk '/opencontrail/ {print $3}' | cut -d "/" -f1`
+      if [ "$ready_pods" == "$total_pods" ];then
+        echo "containers are ready. Going to the next step"
+        break
+      elif [ "$i" -ne "10" ]; then
+        echo "Opencontrail containers is not ready. $ready_pods from $total_pods is ready."
+        sleep 60
+        continue
+      else
+        echo "Failed to up contrail containers in 10 minutes"
+        exit 1
+      fi
+    done
+  node_name: {{ HOSTNAME_CTL01 }}
   retry: {count: 1, delay: 5}
   skip_fail: true
 
-- description: Verify the status of the OpenContrail service
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@opencontrail:database' cmd.run 'doctrail all contrail-status'
+- description: Check all pods
+  cmd: |
+     salt 'ctl*' cmd.run "kubectl -o wide get pods --all-namespaces";
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
-  skip_fail: true
+  skip_fail: false
 
+# Install contrail computes
 - description: Set up the OpenContrail resources
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False
     -C 'I@opencontrail:database:id:1' state.sls opencontrail.client
   node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 3, delay: 60}
+  skip_fail: false
+
+- description: Apply opencontrail.client on contrail computes
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@opencontrail:compute' state.sls opencontrail exclude=opencontrail.client
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 5}
+  skip_fail: false
+
+- description: Reboot contrail computes
+  cmd: |
+    salt --async -C 'I@opencontrail:compute' system.reboot;
+    sleep 450;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: true
+
+- description: Apply opencontrail.client on contrail computes
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@opencontrail:compute' state.sls opencontrail.client
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 5}
+  skip_fail: false
+
+- description: Apply opencontrail.client on contrail computes
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@opencontrail:compute' state.sls opencontrail
+  node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
   skip_fail: false
 
-# OpenContrail vrouters
 - description: Refresh pillars on cmp*
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False
     -C 'cmp*' saltutil.refresh_pillar
@@ -108,56 +223,56 @@
   retry: {count: 1, delay: 5}
   skip_fail: false
 
-- description: Apply highstate on contrail computes
+- description: Install docker host
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@opencontrail:compute' state.highstate exclude=opencontrail.client
+    -C 'I@kubernetes:master' state.sls salt.minion.cert
   node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
+  retry: {count: 2, delay: 30}
   skip_fail: false
 
-- description: Reboot contrail computes
-  cmd: salt --timeout=600 -C 'I@opencontrail:compute' system.reboot
+- description: Install Kubernetes components
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@kubernetes:pool and not I@kubernetes:master' state.sls kubernetes.pool
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 30}
-  skip_fail: true
-
-- description: Apply opencontrail.client on contrail computes
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@opencontrail:compute' state.sls opencontrail.client
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
   skip_fail: false
 
-- description: Run Kubernetes master without setup
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-     -C 'I@kubernetes:master' state.sls kubernetes exclude=kubernetes.master.setup
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 3, delay: 5}
-  skip_fail: true
-
-- description: Run Kubernetes master setup
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-     -C 'I@kubernetes:master' --subset 1 state.sls kubernetes.master.setup
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: true
-
 - description: Restart Kubelet
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@kubernetes:pool' service.restart 'kubelet'
+    -C 'I@kubernetes:pool and not I@kubernetes:master' service.restart 'kubelet'
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
-  skip_fail: true
+  skip_fail: false
 
-- description: Check nodes registrations
+- description: Configure OpenContrail as an add-on for Kubernetes
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@kubernetes:pool' cmd.run 'sleep 60; kubectl get nodes'
+    -C 'I@kubernetes:master' state.sls kubernetes.master.kube-addons
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
-  skip_fail: true
+  skip_fail: false
 
 - description: Renew hosts file on a whole cluster
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C '*' state.sls linux.network.host;
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
   skip_fail: false
+
+- description: Final check all pods
+  cmd: |
+     sleep 60;
+     salt 'ctl*' cmd.run "kubectl -o wide get pods --all-namespaces";
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Check contrail status on all pods
+  cmd: |
+     pods=`kubectl get pods --all-namespaces | awk '/opencontrail/ {print $2}'`
+     for i in $pods; do
+       kubectl exec $i -c opencontrail-controller -n kube-system contrail-status;
+       kubectl exec $i -c opencontrail-analytics -n kube-system contrail-status;
+       kubectl exec $i -c opencontrail-analyticsdb -n kube-system contrail-status;
+     done
+  node_name: {{ HOSTNAME_CTL01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
diff --git a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/lab04-upgrade-physical-inventory.yaml b/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/lab04-upgrade-physical-inventory.yaml
index c5648a8..9cf1366 100644
--- a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/lab04-upgrade-physical-inventory.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/lab04-upgrade-physical-inventory.yaml
@@ -18,7 +18,7 @@
         enp9s0f0:
           role: single_mgm
         enp9s0f1:
-          role: single_ctl
+          role: single_vlan_ctl
 
     kvm02.bm-mcp-pike-k8s-contrail.local:
       reclass_storage_name: infra_kvm_node02
@@ -29,7 +29,7 @@
         enp9s0f0:
           role: single_mgm
         enp9s0f1:
-          role: single_ctl
+          role: single_vlan_ctl
 
     kvm03.bm-mcp-pike-k8s-contrail.local:
       reclass_storage_name: infra_kvm_node03
@@ -40,7 +40,7 @@
         enp9s0f0:
           role: single_mgm
         enp9s0f1:
-          role: single_ctl
+          role: single_vlan_ctl
 
     ctl01.bm-mcp-pike-k8s-contrail.local:
       reclass_storage_name: kubernetes_control_node01
@@ -52,7 +52,7 @@
           role: single_mgm
           deploy_address: 172.17.41.9
         enp2s0f1:
-          role: single_ctl
+          role: single_vlan_ctl
           single_address: 10.167.8.239
 
     ctl02.bm-mcp-pike-k8s-contrail.local:
@@ -65,7 +65,7 @@
           role: single_mgm
           deploy_address: 172.17.41.10
         enp2s0f1:
-          role: single_ctl
+          role: single_vlan_ctl
           single_address: 10.167.8.238
 
     ctl03.bm-mcp-pike-k8s-contrail.local:
@@ -78,41 +78,21 @@
           role: single_mgm
           deploy_address: 172.17.41.11
         enp2s0f1:
-          role: single_ctl
+          role: single_vlan_ctl
           single_address: 10.167.8.237
 
-    # prx01.bm-mcp-pike-k8s-contrail.local:
-    #   reclass_storage_name: kubernetes_proxy_node01
-    #   roles:
-    #   - kubernetes_proxy
-    #   # - infra_proxy
-    #   # - stacklight_proxy
-    #   - salt_master_host
-    #   - linux_system_codename_xenial
-    #   interfaces:
-    #     enp9s0f0:
-    #       role: single_mgm
-    #       deploy_address: 172.17.41.8
-    #     enp9s0f1:
-    #       role: single_ctl
-    #       single_address: 10.167.8.81
-
     cmp001.bm-mcp-pike-k8s-contrail.local:
       reclass_storage_name: kubernetes_compute_node001
       roles:
       - linux_system_codename_xenial
       - kubernetes_compute_contrail
       - salt_master_host
-        #- features_lvm_backend
       interfaces:
         enp9s0f0:
           role: single_dhcp
-        ens11f0:
-          role: bond0_ab_contrail
-          tenant_address: 192.168.0.101
         ens11f1:
-          role: single_ctl
-          single_address: 10.167.8.101
+          role: k8s_oc40_only_vhost_on_control_vlan
+          single_address: 10.167.8.103
 
     cmp002.bm-mcp-pike-k8s-contrail.local:
       reclass_storage_name: kubernetes_compute_node002
@@ -120,30 +100,9 @@
       - linux_system_codename_xenial
       - kubernetes_compute_contrail
       - salt_master_host
-        #- features_lvm_backend
       interfaces:
         enp9s0f0:
           role: single_dhcp
-        ens11f0:
-          role: bond0_ab_contrail
-          tenant_address: 192.168.0.102
         ens11f1:
-          role: single_ctl
-          single_address: 10.167.8.102
-
-    # cmp002.bm-mcp-pike-k8s-contrail.local:
-    #   reclass_storage_name: kubernetes_compute_node02
-    #   roles:
-    #   - features_lvm_backend
-    #   - linux_system_codename_xenial
-    #   - kubernetes_compute_contrail
-    #   interfaces:
-    #     enp2s0f1:
-    #       role: single_mgm
-    #       deploy_address: 172.16.49.74
-    #     enp5s0f0:
-    #       role: bond0_ab_contrail
-    #       tenant_address: 192.168.0.102
-    #     enp5s0f1:
-    #       role: single_vlan_ctl
-    #       single_address: 10.167.8.102
+          role: k8s_oc40_only_vhost_on_control_vlan
+          single_address: 10.167.8.104
diff --git a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/salt-context-cookiecutter-k8s-contrail.yaml b/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/salt-context-cookiecutter-k8s-contrail.yaml
index 49e3ddb..6f390af 100644
--- a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/salt-context-cookiecutter-k8s-contrail.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/salt-context-cookiecutter-k8s-contrail.yaml
@@ -32,6 +32,8 @@
   bmk_enabled: 'False'
   ceph_enabled: 'False'
   auditd_enabled: 'False'
+  kubernetes_coredns_enabled: False
+  kubernetes_kubedns_enabled: True
   cicd_control_node01_address: 10.167.8.91
   cicd_control_node01_hostname: cid01
   cicd_control_node02_address: 10.167.8.92
@@ -85,7 +87,7 @@
   cookiecutter_template_url: https://gerrit.mcp.mirantis.com/mk/cookiecutter-templates.git
   deploy_network_gateway: 172.17.41.2
   deploy_network_netmask: 255.255.255.192
-  deploy_network_subnet: 172.16.49.64/26
+  deploy_network_subnet: 172.17.41.0/26
   deployment_type: physical
   dns_server01: 172.17.41.2
   dns_server02: 172.17.41.2
@@ -108,7 +110,7 @@
   kubernetes_enabled: 'True'
   kubernetes_compute_count: 2
   kubernetes_compute_rack01_single_subnet: 10.167.8
-  kubernetes_compute_rack01_tenant_subnet: 192.168.0
+  kubernetes_compute_rack01_tenant_subnet: 10.167.8
   kubernetes_network_opencontrail_enabled: 'True'
   local_repositories: 'False'
   maas_deploy_address: 172.16.49.66
@@ -131,15 +133,16 @@
   kubernetes_control_node03_address: 10.167.8.237
   kubernetes_control_node03_hostname: ctl03
   linux_repo_contrail_component: oc40
-  opencontrail_analytics_address: 10.167.8.30
   opencontrail_analytics_hostname: ctl
-  opencontrail_analytics_node01_address: 10.167.8.31
   opencontrail_analytics_node01_hostname: ctl01
-  opencontrail_analytics_node02_address: 10.167.8.32
   opencontrail_analytics_node02_hostname: ctl02
-  opencontrail_analytics_node03_address: 10.167.8.33
   opencontrail_analytics_node03_hostname: ctl03
+  opencontrail_analytics_address: ${_param:opencontrail_control_address}
+  opencontrail_analytics_node01_address: ${_param:opencontrail_control_node01_address}
+  opencontrail_analytics_node02_address: ${_param:opencontrail_control_node02_address}
+  opencontrail_analytics_node03_address: ${_param:opencontrail_control_node03_address}
   opencontrail_compute_iface_mask: '24'
+  opencontrail_compute_iface: ens11f1
   opencontrail_control_address: 10.167.8.236
   opencontrail_control_hostname: ctl
   opencontrail_control_node01_address: 10.167.8.239
@@ -153,6 +156,7 @@
   opencontrail_router01_hostname: rtr01
   opencontrail_router02_address: 10.167.8.101
   opencontrail_router02_hostname: rtr02
+  opencontrail_public_ip_range: 172.17.41.128/26
   opencontrail_version: '4.0'
   openstack_enabled: 'False'
   openssh_groups: ''
@@ -204,10 +208,10 @@
   stacklight_telemetry_node03_hostname: mtr03
   stacklight_version: '2'
   static_ips_on_deploy_network_enabled: 'False'
-  tenant_network_gateway: 192.168.0.1
+  tenant_network_gateway: 10.167.8.1
   tenant_network_netmask: 255.255.255.0
-  tenant_network_subnet: 192.168.0.0/24
-  tenant_vlan: '2411'
+  tenant_network_subnet: 10.167.8.0/24
+  tenant_vlan: '2410'
   upstream_proxy_enabled: 'False'
   use_default_network_scheme: 'True'
   vnf_onboarding_enabled: 'False'
diff --git a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/salt-context-environment.yaml b/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/salt-context-environment.yaml
index 18032a1..47e12c8 100644
--- a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/salt-context-environment.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/salt-context-environment.yaml
@@ -1,58 +1,4 @@
 nodes:
-    # Virtual Control Plane nodes
-# commented as ctl is bm
-#    ctl01.cookied-bm-mcp-ocata-contrail.local:
-#      reclass_storage_name: kubernetes_control_node01
-#      roles:
-#      - kubernetes_control_contrail
-#      - linux_system_codename_xenial
-#      interfaces:
-#        ens3:
-#          role: single_ctl
-#
-#    ctl02.cookied-bm-mcp-ocata-contrail.local:
-#      reclass_storage_name: kubernetes_control_node02
-#      roles:
-#      - kubernetes_control_contrail
-#      - linux_system_codename_xenial
-#      interfaces:
-#        ens3:
-#          role: single_ctl
-#
-#    ctl03.cookied-bm-mcp-ocata-contrail.local:
-#      reclass_storage_name: kubernetes_control_node03
-#      roles:
-#      - kubernetes_control_contrail
-#      - linux_system_codename_xenial
-#      interfaces:
-#        ens3:
-#          role: single_ctl
-
-#    commented as there is no k8s proxy nodes in this setup
-#    prx01.cookied-bm-mcp-ocata-contrail.local:
-#      reclass_storage_name: kubernetes_proxy_node01
-#      roles:
-#      - kubernetes_proxy
-#      # - infra_proxy
-#      # - stacklight_proxy
-#      - salt_master_host
-#      - linux_system_codename_xenial
-#      interfaces:
-#        ens3:
-#          role: single_ctl
-
-#    prx02.cookied-bm-mcp-ocata-contrail.local:
-#      reclass_storage_name: kubernetes_proxy_node02
-#      roles:
-#      - kubernetes_proxy
-#      # - infra_proxy
-#      # - stacklight_proxy
-#      - salt_master_host
-#      - linux_system_codename_xenial
-#      interfaces:
-#        ens3:
-#          role: single_ctl
-
     mon01.cookied-bm-mcp-ocata-contrail.local:
       reclass_storage_name: stacklight_server_node01
       roles:
@@ -80,73 +26,6 @@
         ens3:
           role: single_ctl
 
-#   commented as shpuld be in pod
-#   nal01.cookied-bm-mcp-ocata-contrail.local:
-#      reclass_storage_name: opencontrail_analytics_node01
-#      roles:
-#      - opencontrail_analytics
-#      - linux_system_codename_xenial
-#      - salt_master_host
-#      interfaces:
-#        ens3:
-#          role: single_ctl
-#          single_address: 10.167.8.31
-#
-#    nal02.cookied-bm-mcp-ocata-contrail.local:
-#      reclass_storage_name: opencontrail_analytics_node02
-#      roles:
-#      - opencontrail_analytics
-#      - linux_system_codename_xenial
-#      - salt_master_host
-#      interfaces:
-#        ens3:
-#          role: single_ctl
-#          single_address: 10.167.8.32
-#
-#    nal03.cookied-bm-mcp-ocata-contrail.local:
-#      reclass_storage_name: opencontrail_analytics_node03
-#      roles:
-#      - opencontrail_analytics
-#      - linux_system_codename_xenial
-#      - salt_master_host
-#      interfaces:
-#        ens3:
-#          role: single_ctl
-#          single_address: 10.167.8.33
-#
-#    ntw01.cookied-bm-mcp-ocata-contrail.local:
-#      reclass_storage_name: opencontrail_control_node01
-#      roles:
-#      - opencontrail_control
-#      - linux_system_codename_xenial
-#      - salt_master_host
-#      interfaces:
-#        ens3:
-#          role: single_ctl
-#          single_address: 10.167.8.21
-#
-#    ntw02.cookied-bm-mcp-ocata-contrail.local:
-#      reclass_storage_name: opencontrail_control_node02
-#      roles:
-#      - opencontrail_control
-#      - linux_system_codename_xenial
-#      - salt_master_host
-#      interfaces:
-#        ens3:
-#          role: single_ctl
-#          single_address: 10.167.8.22
-#
-#    ntw03.cookied-bm-mcp-ocata-contrail.local:
-#      reclass_storage_name: opencontrail_control_node03
-#      roles:
-#      - opencontrail_control
-#      - linux_system_codename_xenial
-#      - salt_master_host
-#      interfaces:
-#        ens3:
-#          role: single_ctl
-#          single_address: 10.167.8.23
-
     mtr01.cookied-bm-mcp-ocata-contrail.local:
       reclass_storage_name: stacklight_telemetry_node01
       roles:
@@ -201,11 +80,29 @@
         ens3:
           role: single_ctl
 
-#    bmk01.cookied-bm-mcp-ocata-contrail.local:
-#      reclass_storage_name: openstack_benchmark_node01
-#      roles:
-#      - openstack_benchmark
-#      - linux_system_codename_xenial
-#      interfaces:
-#        ens3:
-#          role: single_ctl
+    cid01.cookied-bm-mcp-ocata-contrail.local:
+      reclass_storage_name: cicd_control_node01
+      roles:
+      - cicd_control_leader
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_ctl
+
+    cid02.cookied-bm-mcp-ocata-contrail.local:
+      reclass_storage_name: cicd_control_node02
+      roles:
+      - cicd_control_manager
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_ctl
+
+    cid03.cookied-bm-mcp-ocata-contrail.local:
+      reclass_storage_name: cicd_control_node03
+      roles:
+      - cicd_control_manager
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_ctl
diff --git a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/salt.yaml b/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/salt.yaml
index 3705053..7ebda02 100644
--- a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/salt.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/salt.yaml
@@ -21,7 +21,7 @@
 
 {{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
 
-{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL(CONTROL_VLAN=CONTROL_VLAN, TENANT_VLAN=TENANT_VLAN) }}
+{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL(CONTROL_VLAN=CONTROL_VLAN, TENANT_VLAN=TENANT_VLAN, CLUSTER_PRODUCT_MODELS='cicd infra kubernetes opencontrail stacklight2') }}
 
 {{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
 
@@ -50,6 +50,13 @@
   retry: {count: 1, delay: 10}
   skip_fail: false
 
+- description: Delete proxy inclusion from kvm
+  cmd: |
+    sed -i 's/- system.salt.control.cluster.kubernetes_proxy_cluster//g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
 - description: Temporary WR for correct bridge name according to envoronment templates
   cmd: |
     sed -i 's/br\-ctl/br\_ctl/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
@@ -58,6 +65,24 @@
   retry: {count: 1, delay: 10}
   skip_fail: false
 
+- description: "Excluding tenant network from cluster"
+  cmd: |
+    set -e;
+    . /root/venv-reclass-tools/bin/activate;
+    reclass-tools add-key parameters._param.opencontrail_compute_address '${_param:single_address}' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/opencontrail/compute.yml;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: "Use correct compute interface"
+  cmd: |
+    set -e;
+    . /root/venv-reclass-tools/bin/activate;
+    reclass-tools add-key parameters._param.opencontrail_compute_iface 'ens11f1.${_param:control_vlan}' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/opencontrail/init.yml;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
 - description: Rerun openssh after env model is generated
   cmd: |
     salt-call state.sls openssh
@@ -80,7 +105,7 @@
 
 - description: Update minion information
   cmd: |
-    salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.sync_grains &&
+    salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.sync_all &&
     salt --hard-crash --state-output=mixed --state-verbose=False '*' mine.update &&
     salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.refresh_pillar && sleep 10
   node_name: {{ HOSTNAME_CFG01 }}
diff --git a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/sl.yaml b/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/sl.yaml
index 0b559a8..9dcb4f6 100644
--- a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/sl.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/sl.yaml
@@ -1,4 +1,5 @@
 {% from 'cookied-bm-mcp-pike-k8s-contrail/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% import 'shared-sl-tests.yaml' as SHARED_SL_TESTS with context %}
 
 # Install docker swarm
 - description: Configure docker service
@@ -65,36 +66,31 @@
   retry: {count: 1, delay: 5}
   skip_fail: false
 
+- description: Install keepalived on mon nodes
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@glusterfs:client' state.sls glusterfs.client
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 15}
+  skip_fail: false
+
 # Install slv2 infra
 #Launch containers
 - description: Install Mongo if target matches
   cmd: |
     if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' match.pillar 'mongodb:server' ; then
-      salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' state.sls mongodb
+      salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' state.sls mongodb.server
     fi
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
-- description: Configure Alerta if it is exists
+- description: Install Mongo if target matches
   cmd: |
-    if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:alerta' match.pillar 'prometheus:alerta' ; then
-      salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:alerta' state.sls prometheus.alerta
+    if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' match.pillar 'mongodb:server' ; then
+      salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' state.sls mongodb.cluster
     fi
   node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-- description: launch prometheus containers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master and I@prometheus:server' state.sls docker.client
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 10}
-  skip_fail: false
-
-- description: Check docker ps
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:server' cmd.run "docker ps"
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 10}
+  retry: {count: 5, delay: 20}
   skip_fail: false
 
 - description: Install telegraf
@@ -113,19 +109,31 @@
   skip_fail: false
 
 - description: Install elasticsearch server
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:server' state.sls elasticsearch.server -b 1
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:server:enabled:true and *01*' state.sls elasticsearch.server
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Install elasticsearch server
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:server:enabled:true' state.sls elasticsearch.server
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: Install kibana server
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:server' state.sls kibana.server -b 1
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:server:enabled:true and *01*' state.sls kibana.server
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Install kibana server
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:server:enabled:true' state.sls kibana.server
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: Install elasticsearch client
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:client' state.sls elasticsearch.client
+  cmd: sleep 30; salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:client' state.sls elasticsearch.client
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 2, delay: 30}
   skip_fail: false
@@ -141,23 +149,12 @@
     INFLUXDB_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' test.ping 1>/dev/null 2>&1 && echo true`;
     echo "Influxdb service presence: ${INFLUXDB_SERVICE}";
     if [[ "$INFLUXDB_SERVICE" == "true" ]]; then
-        salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' state.sls influxdb
+        salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server and *01*' state.sls influxdb;
+        salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' state.sls influxdb;
     fi
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
-  skip_fail: true
-
-# Install Prometheus LTS(optional if set in model)
-- description: Prometheus LTS(optional if set in model)
-  cmd: |
-    PROMETHEUS_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:relay' test.ping 1>/dev/null 2>&1 && echo true`;
-    echo "PROMETHEUS rely service presence: ${PROMETHEUS_SERVICE}";
-    if [[ "$PROMETHEUS_SERVICE" == "true" ]]; then
-        salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:relay' state.sls prometheus
-    fi
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: true
+  skip_fail: false
 
 # Install service for the log collection
 - description: Configure fluentd
@@ -186,72 +183,85 @@
   retry: {count: 1, delay: 10}
   skip_fail: false
 
-# Collect grains needed to configure the services
+  ######################################
+  ######################################
+  ######################################
 
-- description: Get grains
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' state.sls salt.minion.grains
+- description: Install Galera on first server
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@galera:master' state.sls galera
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 5}
+  skip_fail: false
+
+- description: Install Galera on other servers
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@galera:slave' state.sls galera -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 5}
+  skip_fail: false
+
+- description: Check Galera on first server
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@galera:master' mysql.status
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: true
+
+- description: Check Galera on other servers
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@galera:slave' mysql.status
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: true
+
+- description: Collect Grains
+  cmd: |
+    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' state.sls salt.minion.grains;
+    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' saltutil.refresh_modules;
+    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' mine.update
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
-- description: Sync modules
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' saltutil.refresh_modules
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-- description: Update mine
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' mine.update; sleep 5;
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 5, delay: 15}
-  skip_fail: false
-
-# Configure the services running in Docker Swarm
-- description: Configure prometheus in docker swarm
+- description: Check docker ps
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:server' state.sls prometheus
   node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-- description: Configure Remote Collector in Docker Swarm for Openstack deployments
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:server' state.sls heka.remote_collector
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-- description: Install sphinx
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@sphinx:server' state.sls sphinx
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-
-#- description: Install prometheus alertmanager
-#  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls prometheus,heka.remote_collector -b 1
-#  node_name: {{ HOSTNAME_CFG01 }}
-#  retry: {count: 1, delay: 10}
-#  skip_fail: false
-
-#- description: run docker state
-#  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker
-#  node_name: {{ HOSTNAME_CFG01 }}
-#  retry: {count: 1, delay: 10}
-#  skip_fail: false
-#
-#- description: docker ps
-#  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' dockerng.ps
-#  node_name: {{ HOSTNAME_CFG01 }}
-#  retry: {count: 1, delay: 10}
-#  skip_fail: false
-
-- description: Configure Grafana dashboards and datasources
-  cmd: sleep 30;  salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@grafana:client' state.sls grafana.client
-  node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 2, delay: 10}
   skip_fail: false
 
-- description: Run salt minion to create cert files
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False "*" state.sls salt.minion
+- description: launch prometheus containers
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master and I@prometheus:server' state.sls docker.client
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 10}
+  skip_fail: false
+
+- description: Check docker ps
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:server' cmd.run "docker ps"
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
+
+- description: Configure Grafana dashboards and datasources
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@grafana:client' state.sls grafana.client
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 5, delay: 60}
+  skip_fail: false
+
+- description: Configure Alerta if it is exists
+  cmd: |
+    if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:alerta' match.pillar 'prometheus:alerta' ; then
+      salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:alerta' state.sls prometheus.alerta
+    fi
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Run salt minion to create cert files
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False "*" state.sls salt.minion.cert
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: true
+
+{{  SHARED_SL_TESTS.MACRO_CLONE_SL_TESTS() }}
+{{  SHARED_SL_TESTS.MACRO_CONFIGURE_TESTS() }}
diff --git a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/underlay.yaml b/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/underlay.yaml
index da6afea..7832675 100644
--- a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/underlay.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/underlay.yaml
@@ -179,96 +179,6 @@
 
         nodes:
 
-        #  - name: {{ HOSTNAME_CFG01 }}
-        #    role: salt_master
-        #    params:
-        #      ipmi_user: !os_env IPMI_USER
-        #      ipmi_password: !os_env IPMI_PASSWORD
-        #      ipmi_previlegies: OPERATOR
-        #      ipmi_host: !os_env IPMI_HOST_CFG01  # hostname or IP address
-        #      ipmi_lan_interface: lanplus
-        #      ipmi_port: 623
-
-        #      root_volume_name: system     # see 'volumes' below
-        #      cloud_init_volume_name: iso  # see 'volumes' below
-        #      cloud_init_iface_up: enp3s0f1  # see 'interfaces' below.
-        #      volumes:
-        #        - name: system
-        #          capacity: !os_env NODE_VOLUME_SIZE, 200
-
-        #          # The same as for agent URL, here is an URL to the image that should be
-        #          # used for deploy the node. It should also be accessible from deploying
-        #          # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
-        #          source_image: !os_env IRONIC_SOURCE_IMAGE_URL
-        #          source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
-
-        #        - name: iso  # Volume with name 'iso' will be used
-        #                     # for store image with cloud-init metadata.
-
-        #          cloudinit_meta_data: *cloudinit_meta_data
-        #          cloudinit_user_data: *cloudinit_user_data_cfg01
-
-        #      interfaces:
-        #        - label: enp3s0f0  # Infra interface
-        #          mac_address: !os_env ETH0_MAC_ADDRESS_CFG01
-        #        - label: enp3s0f1
-        #          l2_network_device: admin
-        #          mac_address: !os_env ETH1_MAC_ADDRESS_CFG01
-
-        #      network_config:
-        #        enp3s0f0:
-        #          networks:
-        #           - infra
-        #        enp3s0f1:
-        #          networks:
-        #           - admin
-          # - name: {{ HOSTNAME_PRX01 }}
-          #   role: salt_minion
-          #   params:
-          #     ipmi_user: !os_env IPMI_USER
-          #     ipmi_password: !os_env IPMI_PASSWORD
-          #     ipmi_previlegies: OPERATOR
-          #     ipmi_host: !os_env IPMI_HOST_PRX01  # hostname or IP address
-          #     ipmi_lan_interface: lanplus
-          #     ipmi_port: 623
-
-          #     root_volume_name: system     # see 'volumes' below
-          #     cloud_init_volume_name: iso  # see 'volumes' below
-          #     cloud_init_iface_up: enp9s0f0  # see 'interfaces' below.
-          #     volumes:
-          #       - name: system
-          #         capacity: !os_env NODE_VOLUME_SIZE, 200
-
-          #         # The same as for agent URL, here is an URL to the image that should be
-          #         # used for deploy the node. It should also be accessible from deploying
-          #         # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
-          #         source_image: !os_env IRONIC_SOURCE_IMAGE_URL
-          #         source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
-
-          #       - name: iso  # Volume with name 'iso' will be used
-          #                    # for store image with cloud-init metadata.
-
-          #         cloudinit_meta_data: *cloudinit_meta_data
-          #         cloudinit_user_data: *cloudinit_user_data
-
-          #     interfaces:
-          #       - label: enp9s0f0
-          #         l2_network_device: admin
-          #         mac_address: !os_env ETH0_MAC_ADDRESS_PRX01
-          #       - label: enp9s0f1
-          #         mac_address: !os_env ETH1_MAC_ADDRESS_PRX01
-
-          #     network_config:
-          #       enp9s0f0:
-          #         networks:
-          #          - admin
-          #       bond0:
-          #         networks:
-          #          - control
-          #         aggregation: active-backup
-          #         parents:
-          #          - enp9s0f1
-
           - name: {{ HOSTNAME_KVM01 }}
             role: salt_minion
             params:
@@ -415,7 +325,7 @@
                    - enp9s0f1
 
           - name: {{ HOSTNAME_CTL01 }}
-            role: salt_minion
+            role: k8s_controller
             params:
               ipmi_user: !os_env IPMI_USER
               ipmi_password: !os_env IPMI_PASSWORD
@@ -591,12 +501,6 @@
                 enp9s0f0:
                   networks:
                    - admin
-                bond0:
-                  networks:
-                   - control
-                  aggregation: active-backup
-                  parents:
-                   - enp9s0f1
 
           - name: {{ HOSTNAME_CMP002 }}
             role: salt_minion
@@ -638,9 +542,3 @@
                 enp9s0f0:
                   networks:
                    - admin
-                bond0:
-                  networks:
-                   - control
-                  aggregation: active-backup
-                  parents:
-                   - enp9s0f1
diff --git a/tcp_tests/templates/k8s-ha-calico/k8s.yaml b/tcp_tests/templates/k8s-ha-calico/k8s.yaml
index a3f228e..88075db 100644
--- a/tcp_tests/templates/k8s-ha-calico/k8s.yaml
+++ b/tcp_tests/templates/k8s-ha-calico/k8s.yaml
@@ -45,7 +45,7 @@
 
 - description: Register addons
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@kubernetes:master and *01*' state.sls kubernetes.master.setup
+    -C 'I@kubernetes:master' state.sls kubernetes.master.setup
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
   skip_fail: false
diff --git a/tcp_tests/tests/system/test_install_opencontrail.py b/tcp_tests/tests/system/test_install_opencontrail.py
index efc37c8..4bd6183 100644
--- a/tcp_tests/tests/system/test_install_opencontrail.py
+++ b/tcp_tests/tests/system/test_install_opencontrail.py
@@ -52,14 +52,14 @@
         if settings.RUN_SL_TESTS:
             show_step(5)
             stacklight_deployed.run_sl_functional_tests(
-                'ctl01',
+                'cfg01',
                 '/root/stacklight-pytest/stacklight_tests/',
                 'tests/prometheus',
                 'test_alerts.py')
             show_step(8)
             # Download report
             stacklight_deployed.download_sl_test_report(
-                'ctl01',
+                'cfg01',
                 '/root/stacklight-pytest/stacklight_tests/report.xml')
         LOG.info("*************** DONE **************")
 
@@ -121,5 +121,74 @@
             stacklight_deployed.download_sl_test_report(
                 'ctl01',
                 '/root/stacklight-pytest/stacklight_tests/report.xml')
+        LOG.info("*************** DONE **************")
 
+    @pytest.mark.extract(container_system='docker', extract_from='myimage',
+                         files_to_extract=['report'])
+    @pytest.mark.merge_xunit(path='/root/report',
+                             output='/root/conformance_result.xml')
+    @pytest.mark.grab_k8s_results(name=['k8s_conformance.log',
+                                        'conformance_result.xml'])
+    @pytest.mark.grab_versions
+    @pytest.mark.fail_snapshot
+    def test_install_opencontrail4_k8s(self, config, show_step,
+                                       k8s_deployed, k8s_logs):
+        """Test for deploying MCP environment with k8s and check it
+
+        Scenario:
+            1. Prepare salt on hosts
+            2. Setup controller nodes
+            3. Setup compute nodes
+            4. Setup Kubernetes cluster
+            5. Run conformance if need
+
+        """
+
+        if config.k8s.k8s_conformance_run:
+            show_step(5)
+            k8s_deployed.run_conformance(raise_on_err=False)
+        LOG.info("*************** DONE **************")
+
+    @pytest.mark.extract(container_system='docker', extract_from='myimage',
+                         files_to_extract=['report'])
+    @pytest.mark.merge_xunit(path='/root/report',
+                             output='/root/conformance_result.xml')
+    @pytest.mark.grab_k8s_results(name=['k8s_conformance.log',
+                                        'conformance_result.xml'])
+    @pytest.mark.grab_versions
+    @pytest.mark.fail_snapshot
+    def test_install_opencontrail4_k8s_lma(self, config, show_step,
+                                           k8s_deployed,
+                                           stacklight_deployed,
+                                           k8s_logs):
+        """Test for deploying MCP environment with k8s and check it
+
+        Scenario:
+            1. Prepare salt on hosts
+            2. Setup controller nodes
+            3. Setup compute nodes
+            4. Setup Kubernetes cluster
+            5. Check targets
+            6. Check docker services
+            7. Run SL tests
+            8. Download SL report
+            9. Run conformance if need
+        """
+        # Run SL component tetsts
+        if settings.RUN_SL_TESTS:
+            show_step(7)
+            stacklight_deployed.run_sl_functional_tests(
+                'cfg01',
+                '/root/stacklight-pytest/stacklight_tests/',
+                'tests/prometheus',
+                'test_alerts.py')
+            show_step(8)
+            # Download report
+            stacklight_deployed.download_sl_test_report(
+                'cfg01',
+                '/root/stacklight-pytest/stacklight_tests/report.xml')
+
+        if config.k8s.k8s_conformance_run:
+            show_step(9)
+            k8s_deployed.run_conformance(raise_on_err=False)
         LOG.info("*************** DONE **************")