Merge "Add mstr1-wrkr5-cmp5-gtw0"
diff --git a/de/heat-templates/env/main-wrkr5-rack1-cmp1-rack2-cmp1.yaml b/de/heat-templates/env/main-wrkr5-rack1-cmp1-rack2-cmp1.yaml
index 622591f..9b0177f 100644
--- a/de/heat-templates/env/main-wrkr5-rack1-cmp1-rack2-cmp1.yaml
+++ b/de/heat-templates/env/main-wrkr5-rack1-cmp1-rack2-cmp1.yaml
@@ -22,7 +22,13 @@
   docker_ee_url: https://storebits.docker.com/ubuntu
   docker_ee_release: stable-19.03
   private_floating_interface: 'ens4'
+  default_interface: 'ens3'
   rack_private_floating_interface: 'veth-phy'
+  lmas_size: 0
+  lmas_metadata: {"labels": {"role": "stacklight", "stacklight": "enabled", "local-volume-provisioner": "enabled"}}
+  lmas_flavor: system.compact.stacklight.server
+  # qos_max_burst_kbps_egress: 1536
+  # qos_max_kbps_egress: 1433
   main_worker_hardware_metadata: |
     '00:00:00:00:00:00':
       write_files:
diff --git a/de/heat-templates/env/main-wrkr5-rack1-cmp2-rack2-cmp2.yaml b/de/heat-templates/env/main-wrkr5-rack1-cmp2-rack2-cmp2.yaml
index 07ce799..8654005 100644
--- a/de/heat-templates/env/main-wrkr5-rack1-cmp2-rack2-cmp2.yaml
+++ b/de/heat-templates/env/main-wrkr5-rack1-cmp2-rack2-cmp2.yaml
@@ -22,7 +22,11 @@
   docker_ee_url: https://storebits.docker.com/ubuntu
   docker_ee_release: stable-19.03
   private_floating_interface: 'ens4'
+  default_interface: 'ens3'
   rack_private_floating_interface: 'veth-phy'
+  lmas_size: 0
+  lmas_metadata: {"labels": {"role": "stacklight", "stacklight": "enabled", "local-volume-provisioner": "enabled"}}
+  lmas_flavor: system.compact.stacklight.server
   main_worker_hardware_metadata: |
     '00:00:00:00:00:00':
       write_files:
diff --git a/de/heat-templates/env/mstr1-wrkr3-cmp2-gtw0-lma3.yaml b/de/heat-templates/env/mstr1-wrkr3-cmp2-gtw0-lma3.yaml
index 4668a94..b73de93 100644
--- a/de/heat-templates/env/mstr1-wrkr3-cmp2-gtw0-lma3.yaml
+++ b/de/heat-templates/env/mstr1-wrkr3-cmp2-gtw0-lma3.yaml
@@ -26,7 +26,7 @@
   worker_metadata: {"labels": {"openstack-control-plane":"enabled","openvswitch":"enabled","openstack-gateway":"enabled","local-volume-provisioner": "enabled"}}
   cmp_metadata: {"labels": {"openstack-compute-node":"enabled","openvswitch":"enabled", "role":"ceph-osd-node"}}
   gtw_metadata: {"labels": {"openvswitch":"enabled"}}
-  lma_metadata: {"labels": {"role": "stacklight", "stacklight": "enabled"}}
+  lma_metadata: {"labels": {"role": "stacklight", "stacklight": "enabled", "local-volume-provisioner": "enabled"}}
   # hardware_metadata which is used for Ceph requires flavor with
   # ephemeral storage because it is used for Ceph bluestore.
   workers_flavor: 'system.compact.openstack.control.ephemeral'
diff --git a/de/heat-templates/fragments/NetworkAccVM.yaml b/de/heat-templates/fragments/NetworkAccVM.yaml
index 7ec1a14..d0eddc5 100644
--- a/de/heat-templates/fragments/NetworkAccVM.yaml
+++ b/de/heat-templates/fragments/NetworkAccVM.yaml
@@ -8,6 +8,9 @@
   dns_nameservers:
     type: json
     default: []
+  control_network_host_routes:
+    type: json
+    default: []
 
 resources:
 
@@ -20,6 +23,7 @@
       enable_dhcp: true
       cidr: { get_param: control_network_cidr }
       dns_nameservers: { get_param: dns_nameservers }
+      host_routes: { get_param: control_network_host_routes }
   router:
     type: OS::Neutron::Router
     properties:
diff --git a/de/heat-templates/fragments/NetworkPrvFl.yaml b/de/heat-templates/fragments/NetworkPrvFl.yaml
index 077cfa8..d673ac6 100644
--- a/de/heat-templates/fragments/NetworkPrvFl.yaml
+++ b/de/heat-templates/fragments/NetworkPrvFl.yaml
@@ -22,7 +22,7 @@
     type: string
   vsrx_image:
     type: string
-    default: vsrx-mcp2-tf-3-nets
+    default: vsrx-mcp2-tf-3-nets-lo
   vsrx_flavor:
     type: string
     default: oc_vsrx
diff --git a/de/heat-templates/fragments/NetworkPrvFlVSRX.yaml b/de/heat-templates/fragments/NetworkPrvFlVSRX.yaml
index 4f60e07..7cf2e1a 100644
--- a/de/heat-templates/fragments/NetworkPrvFlVSRX.yaml
+++ b/de/heat-templates/fragments/NetworkPrvFlVSRX.yaml
@@ -19,7 +19,7 @@
     type: string
   vsrx_image:
     type: string
-    default: vsrx-mcp2-tf-3-nets
+    default: vsrx-mcp2-tf-3-nets-lo
   vsrx_flavor:
     type: string
     default: oc_vsrx
diff --git a/de/heat-templates/fragments/NetworkTun.yaml b/de/heat-templates/fragments/NetworkTun.yaml
index 0b122ac..3595cc7 100644
--- a/de/heat-templates/fragments/NetworkTun.yaml
+++ b/de/heat-templates/fragments/NetworkTun.yaml
@@ -7,6 +7,9 @@
     type: string
   tun_network_pool_end:
     type: string
+  tun_network_host_routes:
+    type: json
+    default: []
 
 resources:
 
@@ -22,6 +25,7 @@
       allocation_pools:
         - start: { get_param: tun_network_pool_start }
           end: { get_param: tun_network_pool_end }
+      host_routes: { get_param: tun_network_host_routes }
 
 outputs:
   tun_network_id:
diff --git a/de/heat-templates/fragments/multirack/CentralSite.yaml b/de/heat-templates/fragments/multirack/CentralSite.yaml
index 53b72b3..d421a1f 100644
--- a/de/heat-templates/fragments/multirack/CentralSite.yaml
+++ b/de/heat-templates/fragments/multirack/CentralSite.yaml
@@ -68,6 +68,18 @@
   dns_nameservers:
     type: json
     default: []
+  lmas_size:
+    type: number
+  lmas_metadata:
+    type: json
+  lmas_flavor:
+    type: string
+  lmas_hardware_metadata:
+    description: The content of lab metadata.
+    default: ''
+    type: string
+  default_interface:
+    type: string
 
 resources:
   router:
@@ -206,6 +218,45 @@
           storage_frontend_subnet_id: { get_resource: storage_frontend_subnet }
           storage_frontend_interface: { get_param: storage_frontend_interface }
           storage_frontend_network_cidr: { get_param: storage_frontend_network_cidr }
+          default_interface: { get_param: default_interface }
+
+  lmas:
+    type: OS::Heat::ResourceGroup
+    depends_on:
+     - ucp
+    properties:
+      count: { get_param: lmas_size }
+      resource_def:
+        type: ./SrvInstancesVMCeph.yaml
+        properties:
+          metadata: { get_param: lmas_metadata}
+          ucp_master_host: { get_attr: [ucp, server_control_ip] }
+          docker_ee_url: { get_param: docker_ee_url }
+          docker_ee_release: { get_param: docker_ee_release }
+          docker_ucp_image: { get_param: docker_ucp_image}
+          docker_default_address_pool: { get_param: docker_default_address_pool }
+          node_type: "worker"
+          key_name: { get_param: key_name }
+          image: { get_param: image }
+          flavor: { get_param: lmas_flavor }
+          control_network: { get_resource: control_network }
+          control_subnet_id: { get_resource: control_subnet }
+          control_network_cidr: { get_param: control_network_cidr }
+          private_floating_network: { get_resource: private_floating_network }
+          private_floating_subnet_id: { get_resource: private_floating_subnet }
+          private_floating_interface: { get_param: private_floating_interface }
+          private_floating_network_cidr: { get_param: private_floating_network_cidr }
+          public_net_id: { get_param: public_net_id }
+          hardware_metadata: { get_param: lmas_hardware_metadata }
+          boot_timeout: { get_param: boot_timeout }
+          storage_backend_network: { get_resource: storage_backend_network }
+          storage_backend_subnet_id: { get_resource: storage_backend_subnet }
+          storage_backend_interface: { get_param: storage_backend_interface }
+          storage_backend_network_cidr: { get_param: storage_backend_network_cidr }
+          storage_frontend_network: { get_resource: storage_frontend_network }
+          storage_frontend_subnet_id: { get_resource: storage_frontend_subnet }
+          storage_frontend_interface: { get_param: storage_frontend_interface }
+          storage_frontend_network_cidr: { get_param: storage_frontend_network_cidr }
 
 outputs:
   worker_public_ip:
diff --git a/de/heat-templates/fragments/multirack/Rack.yaml b/de/heat-templates/fragments/multirack/Rack.yaml
index 0801a63..23036b3 100644
--- a/de/heat-templates/fragments/multirack/Rack.yaml
+++ b/de/heat-templates/fragments/multirack/Rack.yaml
@@ -42,11 +42,15 @@
     type: string
   functions_override:
     type: string
+  qos_policy_name:
+    type: string
 
 resources:
 
   control_network:
     type: OS::Neutron::Net
+    properties:
+      qos_policy: { get_param: qos_policy_name }
   control_subnet:
     type: OS::Neutron::Subnet
     properties:
diff --git a/de/heat-templates/fragments/multirack/SrvInstancesVMCeph.yaml b/de/heat-templates/fragments/multirack/SrvInstancesVMCeph.yaml
index c0d94b2..6a0997b 100644
--- a/de/heat-templates/fragments/multirack/SrvInstancesVMCeph.yaml
+++ b/de/heat-templates/fragments/multirack/SrvInstancesVMCeph.yaml
@@ -71,6 +71,9 @@
   storage_frontend_network_cidr:
     description: The CIDR for control network
     type: string
+  default_interface:
+    type: string
+    default: ''
   user_data_config:
     description: This is part of clout-config which denies to mount drive with label ephemeral0 to /mnt
     type: string
@@ -111,6 +114,7 @@
             $storage_backend_interface: { get_param: storage_backend_interface }
             $storage_backend_network_interface_ip: { get_attr: [storage_backend_server_port, fixed_ips, 0, ip_address] }
             $storage_backend_network_cidr: { get_param: storage_backend_network_cidr }
+            $default_interface: { get_param: default_interface }
 
   inject_files:
     type: "OS::Heat::CloudConfig"
diff --git a/de/heat-templates/multirack.yaml b/de/heat-templates/multirack.yaml
index 8e37448..abcae03 100644
--- a/de/heat-templates/multirack.yaml
+++ b/de/heat-templates/multirack.yaml
@@ -163,6 +163,28 @@
   rack_functions_override:
     type: string
     default: ''
+  lmas_size:
+    type: number
+  lmas_metadata:
+    type: json
+  lmas_flavor:
+    type: string
+  default_interface:
+    type: string
+    default: ''
+  qos_max_burst_kbps_ingress:
+    type: number
+    default: 0
+  qos_max_kbps_ingress:
+    type: number
+    default: 0
+  qos_max_burst_kbps_egress:
+    type: number
+    default: 0
+  qos_max_kbps_egress:
+    type: number
+    default: 0
+
 
 resources:
   keypair_name:
@@ -178,6 +200,42 @@
       public_key: { get_param: cluster_public_key }
       save_private_key: false
 
+  qos_policy_gen_name:
+    type: OS::Heat::RandomString
+    properties:
+      character_classes: [{"class": "hexdigits", "min": 1}]
+      length: 8
+      salt: constant
+
+  rack_qos_policy:
+    type: OS::Neutron::QoSPolicy
+    properties:
+      description: String
+      name:
+        list_join:
+        - '-'
+        - [ { get_param: "OS::stack_name" }, { get_attr: [qos_policy_gen_name, value] } ]
+      shared: True
+
+  rack_bandwith_rule_egress:
+    type: OS::Neutron::QoSBandwidthLimitRule
+    properties:
+      max_burst_kbps: { get_param: qos_max_burst_kbps_egress }
+      max_kbps: { get_param: qos_max_kbps_egress }
+      policy: { get_resource: rack_qos_policy }
+# NOTE (ohryhorov): section below with "direction" should be uncommented once cloud is
+# upgraded to OpenStack Train version.
+#
+#      direction: 'egress'
+#
+#  rack_bandwith_rule_ingress:
+#    type: OS::Neutron::QoSBandwidthLimitRule
+#    properties:
+#      max_burst_kbps: { get_param: qos_max_burst_kbps_ingress }
+#      max_kbps: { get_param: qos_max_kbps_ingress }
+#      policy: { get_resource: rack_qos_policy }
+#      direction: 'ingress'
+
   central_site:
     type: MCP2::CentralSite
     properties:
@@ -206,6 +264,11 @@
       worker_hardware_metadata: { get_param: main_worker_hardware_metadata }
       dns_nameservers: { get_param: dns_nameservers }
       boot_timeout: { get_param: central_boot_timeout }
+      lmas_size: { get_param: lmas_size }
+      lmas_metadata: { get_param: lmas_metadata }
+      lmas_flavor: { get_param: lmas_flavor }
+      lmas_hardware_metadata: { get_param: main_worker_hardware_metadata }
+      default_interface: { get_param: default_interface }
 
   rack01_router_routes:
     type: MCP2::RackRouterRoutes
@@ -237,6 +300,7 @@
       boot_timeout: { get_param: rack_boot_timeout }
       private_floating_interface: { get_param: rack_private_floating_interface }
       functions_override: { get_param: rack_functions_override }
+      qos_policy_name: { get_resource: rack_qos_policy }
 
   rack02_router_routes:
     depends_on:
@@ -270,6 +334,7 @@
       boot_timeout: { get_param: rack_boot_timeout }
       private_floating_interface: { get_param: rack_private_floating_interface }
       functions_override: { get_param: rack_functions_override }
+      qos_policy_name: { get_resource: rack_qos_policy }
 
 outputs:
   central_site_worker_public_ip:
diff --git a/de/heat-templates/scripts/instance_boot.sh b/de/heat-templates/scripts/instance_boot.sh
index 1464109..becccfe 100644
--- a/de/heat-templates/scripts/instance_boot.sh
+++ b/de/heat-templates/scripts/instance_boot.sh
@@ -1,5 +1,6 @@
 #!/bin/bash
 set -x
+set -e
 # allow access to the local variables from prepare-metadata.py
 set -a
 
@@ -16,6 +17,8 @@
 PUBLIC_INTERFACE_CIDR=${PUBLIC_INTERFACE_CIDR:-$private_floating_network_cidr}
 PUBLIC_INTERFACE_NETMASK=$(echo ${PUBLIC_INTERFACE_CIDR} | cut -d'/' -f2)
 
+DEFAULT_INTERFACE=${DEFAULT_INTERFACE:-$default_interface}
+
 STORAGE_BACKEND_INTERFACE=${STORAGE_BACKEND_INTERFACE:-$storage_backend_interface}
 STORAGE_BACKEND_INTERFACE_IP=${STORAGE_BACKEND_INTERFACE_IP:-$storage_backend_network_interface_ip}
 STORAGE_BACKEND_NETWORK=${STORAGE_BACKEND_NETWORK:-$storage_backend_network_cidr}
@@ -87,7 +90,7 @@
 }
 ### END COMMON FUNCTIONS ###
 
-DEFAULT_INTERFACE=$(ip route show |awk '/default/ {print $5}')
+DEFAULT_INTERFACE=${DEFAULT_INTERFACE:-$(ip route show |awk '/default/ {print $5}')}
 
 if [[ -n ${CONTROL_NETWORK_CIDR} ]]; then
     CONTROL_IP_ADDRESS=$(ip route get ${CONTROL_NETWORK_CIDR%/*} | head -n1 | fgrep -v ' via ' | awk '/ src / {print $6}')
@@ -218,11 +221,13 @@
         local device_num=$1
         local device_size=$2
         local vg_name=$3
-
-        #configure lvm only on compute nodes
-        if [[ ${NODE_METADATA} == *"openstack-compute-node"* ]]; then
-            truncate --size ${device_size}G /srv/disk${device_num}
-            cat <<EOF > /etc/systemd/system/setup-loopback-loop${device_num}.service
+        if losetup -l | grep /dev/loop${device_num}; then
+        echo "Volume /dev/loop${device_num} is already initialize"
+        else
+            #configure lvm only on compute nodes
+            if [[ ${NODE_METADATA} == *"openstack-compute-node"* ]]; then
+                truncate --size ${device_size}G /srv/disk${device_num}
+                cat <<EOF > /etc/systemd/system/setup-loopback-loop${device_num}.service
 [Unit]
 Description=Setup loop${device_num} device
 DefaultDependencies=no
@@ -238,12 +243,13 @@
 [Install]
 WantedBy=local-fs.target
 EOF
-            systemctl enable setup-loopback-loop${device_num}
-            systemctl start setup-loopback-loop${device_num}
-            #adding short sleep to give time for service to start
-            sleep 3
-            pvcreate /dev/loop${device_num}
-            vgcreate ${vg_name} /dev/loop${device_num}
+                systemctl enable setup-loopback-loop${device_num}
+                systemctl start setup-loopback-loop${device_num}
+                #adding short sleep to give time for service to start
+                sleep 3
+                pvcreate /dev/loop${device_num}
+                vgcreate ${vg_name} /dev/loop${device_num}
+            fi
         fi
     }
 
@@ -307,19 +313,22 @@
 }
 
 function install_ucp {
-    local tmpd
-    tmpd=$(mktemp -d)
-    function docker_run_retry {
-        docker container run --rm --name ucp \
-        -v /var/run/docker.sock:/var/run/docker.sock \
-        ${DOCKER_UCP_IMAGE} install \
-        --host-address $UCP_IP_ADDRESS \
-        --admin-username $UCP_USERNAME \
-        --admin-password $UCP_PASSWORD \
-        --existing-config
-    }
-
-    retry 10 "Can't bring up docker UCP container" docker_run_retry
+    if docker ps --all | grep ucp-controller; then
+        echo "Docker UCP container is running"
+    else
+        local tmpd
+        tmpd=$(mktemp -d)
+        function docker_run_retry {
+            docker container run --rm --name ucp \
+            -v /var/run/docker.sock:/var/run/docker.sock \
+            ${DOCKER_UCP_IMAGE} install \
+            --host-address $UCP_IP_ADDRESS \
+            --admin-username $UCP_USERNAME \
+            --admin-password $UCP_PASSWORD \
+            --existing-config
+        }
+        retry 10 "Can't bring up docker UCP container" docker_run_retry
+    fi
 }
 
 function get_authtoken_retry {
@@ -363,18 +372,25 @@
 }
 
 function join_node {
-    local type=${1}
-    function retry_join_node {
-        env -i $(docker swarm join-token $type |grep 'docker swarm join' | xargs)
-    }
-    retry 10 "Failed to join node to swarm" retry_join_node
+    if kubectl --kubeconfig /etc/kubernetes/admin.conf get nodes |grep -w Ready |awk '{print $1}' |grep -q $(hostname); then
+        echo "This node is already join"
+    else
+        local type=${1}
+        function retry_join_node {
+            env -i $(docker swarm join-token $type |grep 'docker swarm join' | xargs)
+        }
+        retry 10 "Failed to join node to swarm" retry_join_node
+    fi
 }
 
 function create_ucp_config {
     if [[ "${SINGLE_NODE}" == true ]]; then
         max_pods="kubelet_max_pods = 220"
     fi
-    echo "
+    if docker config ls | grep com.docker.ucp.config ; then
+        echo "Config com.docker.ucp.config already exists"
+    else
+        echo "
 [scheduling_configuration]
     enable_admin_ucp_scheduling = true
     default_node_orchestrator = \"kubernetes\"
@@ -382,10 +398,15 @@
     dns = [\"172.18.208.44\"]
     ${max_pods}
 " | docker config create com.docker.ucp.config -
+    fi
 }
 
 function swarm_init {
-    docker swarm init --advertise-addr ${UCP_IP_ADDRESS} --data-path-addr ${UCP_IP_ADDRESS} --listen-addr ${UCP_IP_ADDRESS} --data-path-port ${UCP_DOCKER_SWARM_DATA_PORT}
+    if docker node ls | grep $HOSTNAME; then
+        echo "This node is already part of a swarm"
+    else
+        docker swarm init --advertise-addr ${UCP_IP_ADDRESS} --data-path-addr ${UCP_IP_ADDRESS} --listen-addr ${UCP_IP_ADDRESS} --data-path-port ${UCP_DOCKER_SWARM_DATA_PORT}
+    fi
 }
 
 function rm_ucp_config {
@@ -452,16 +473,19 @@
 }
 
 function workaround_default_forward_policy {
-    cat << EOF > /etc/iptables/rules.v4
+    if grep "DOCKER-USER" /etc/iptables/rules.v4; then
+        echo "Iptables is already configurations"
+    else
+        cat << EOF > /etc/iptables/rules.v4
 *filter
 :DOCKER-USER - [0:0]
 EOF
-    for net in $FLOATING_NETWORK_PREFIXES; do
+        for net in $FLOATING_NETWORK_PREFIXES; do
 cat << EOF >> /etc/iptables/rules.v4
 -A DOCKER-USER -d ${net} -j ACCEPT
 -A DOCKER-USER -s ${net} -j ACCEPT
 EOF
-    done
+        done
 
 cat << EOF >> /etc/iptables/rules.v4
 -A DOCKER-USER -j RETURN
@@ -473,16 +497,17 @@
 *nat
 :POSTROUTING ACCEPT - [0:0]
 EOF
-    for net in $FLOATING_NETWORK_PREFIXES; do
+        for net in $FLOATING_NETWORK_PREFIXES; do
 cat << EOF >> /etc/iptables/rules.v4
 -A POSTROUTING -s ${net} -o ${DEFAULT_INTERFACE} -j MASQUERADE
 EOF
-    done
+        done
 
 cat << EOF >> /etc/iptables/rules.v4
 COMMIT
 EOF
-    sudo netfilter-persistent reload
+        sudo netfilter-persistent reload
+    fi
 }
 
 function disable_rp_filter {
@@ -590,7 +615,9 @@
 fi
 
     public_address_match_ip_line=$(grep -nm1 "${PUBLIC_NODE_IP_ADDRESS}/${PUBLIC_NODE_IP_NETMASK}" ${cloud_netplan_cfg} | cut -d: -f1)
-    sed -i "$((${public_address_match_ip_line}-1)),$((${public_address_match_ip_line}))d" ${cloud_netplan_cfg}
+    if [ -n "${public_address_match_ip_line}" ] ; then
+        sed -i "$((${public_address_match_ip_line}-1)),$((${public_address_match_ip_line}))d" ${cloud_netplan_cfg}
+    fi
 
 cat << EOF >> ${cloud_netplan_cfg}
     bridges:
@@ -693,18 +720,21 @@
 function collect_interfaces_metadata {
 
     local if_metadata_file="/usr/share/metadata/interfaces.yaml"
+    if [ -z "${TUNNEL_INTERFACE}" ] ; then
+        echo "Tunnel interface is empty"
+    else
+        pci_id=$(ethtool -i ${TUNNEL_INTERFACE} |grep bus-info | awk '{print $2}')
+        mac=$(cat $(find /sys/bus/pci/devices/${pci_id}/ -name net)/${TUNNEL_INTERFACE}/address)
+        ip=${TUNNEL_INTERFACE_IP}/${TUNNEL_INTERFACE_NETWORK_NETMASK}
 
-    pci_id=$(ethtool -i ${TUNNEL_INTERFACE} |grep bus-info | awk '{print $2}')
-    mac=$(cat $(find /sys/bus/pci/devices/${pci_id}/ -name net)/${TUNNEL_INTERFACE}/address)
-    ip=${TUNNEL_INTERFACE_IP}/${TUNNEL_INTERFACE_NETWORK_NETMASK}
-
-    cat << EOF > ${if_metadata_file}
+        cat << EOF > ${if_metadata_file}
 ${TUNNEL_INTERFACE}:
   pci_id: "${pci_id}"
   ip_addr: "${ip}"
   mac: "${mac}"
 EOF
 
+    fi
     HW_METADATA=$(echo ${HW_METADATA} | jq -cr ". += {\"interfaces\": {\"$(hostname)\": \"$(base64 -w 0 ${if_metadata_file})\"}}")
 
 }
@@ -881,6 +911,14 @@
     setup_evpn
 }
 
+# Exit on any errors
+function handle_exit {
+    if [ $? != 0 ] ; then
+        wait_condition_send "FAILURE" "Script is failure."
+    fi
+}
+trap handle_exit EXIT
+
 if [[ "${BASH_SOURCE[0]}" == "${0}" ]]; then
     case "$NODE_TYPE" in
         # Please keep the "prepare_metadata_files", "disable-rp-filter", "network_config" and "prepare_network" functions
diff --git a/de/heat-templates/top.yaml b/de/heat-templates/top.yaml
index 99e8d5d..d3949b6 100644
--- a/de/heat-templates/top.yaml
+++ b/de/heat-templates/top.yaml
@@ -17,6 +17,13 @@
     type: string
     description: The CIDR of control network, used to detect control interface.
     default: '10.10.0.0/24'
+  control_network_host_routes:
+    type: json
+    description: >
+      List of dicts with host routes in control subnet, e.g:
+      - "destination": "10.100.100.1/32"
+        "nexthop": "172.16.1.1"
+    default: []
   control_network_vsrx_peering_ip:
     type: string
     description: IP address of vsrx for tungsten fabric peering
@@ -177,6 +184,13 @@
   tun_network_pool_end:
     type: string
     default: '10.15.0.99'
+  tun_network_host_routes:
+    type: json
+    description: >
+      List of dicts with host routes in tunnel subnet, e.g:
+      - "destination": "10.100.100.1/32"
+        "nexthop": "172.16.1.1"
+    default: []
   ucp_metadata:
     type: json
     default: {"role":"ucp"}
@@ -289,7 +303,7 @@
     default: 'false'
   vsrx_image:
     type: string
-    default: vsrx-mcp2-tf-3-nets
+    default: vsrx-mcp2-tf-3-nets-lo
   vsrx_flavor:
     type: string
     default: oc_vsrx
@@ -339,6 +353,7 @@
       public_net_id: { get_param: public_net_id }
       control_network_cidr: { get_param: control_network_cidr }
       dns_nameservers: { get_param: dns_nameservers }
+      control_network_host_routes: { get_param: control_network_host_routes }
 
   tun_network:
     type: MCP2::NetworkTun
@@ -346,6 +361,7 @@
       tun_network_cidr: { get_param: tun_network_cidr }
       tun_network_pool_start: { get_param: tun_network_pool_start }
       tun_network_pool_end: { get_param: tun_network_pool_end }
+      tun_network_host_routes: { get_param: tun_network_host_routes }
 
   private_floating_network:
     type: MCP2::NetworkPrvFl
@@ -803,7 +819,7 @@
     properties:
       count: { get_param: ntw_size }
       resource_def:
-        type: MCP2::SrvInstancesCeph
+        type: MCP2::SrvInstancesCephOSD
         properties:
           metadata: { get_param: ntw_metadata }
           node_type: "worker"
@@ -828,11 +844,18 @@
           storage_frontend_network: { get_attr: [storage_network, storage_frontend_network_id] }
           storage_frontend_subnet_id: { get_attr: [storage_network, storage_frontend_subnet_id] }
           storage_frontend_network_cidr: { get_param: storage_frontend_network_cidr }
+          storage_backend_interface: { get_param: storage_backend_interface }
+          storage_backend_network: { get_attr: [ storage_network, storage_backend_network_id ] }
+          storage_backend_subnet_id: { get_attr: [ storage_network, storage_backend_subnet_id ] }
+          storage_backend_network_cidr: { get_param: storage_backend_network_cidr }
           ironic_baremetal_network: { get_attr: [ironic_baremetal_network, ironic_baremetal_network_id] }
           ironic_baremetal_subnet_id: { get_attr: [ironic_baremetal_network, ironic_baremetal_subnet_id] }
           ironic_baremetal_network_cidr: { get_param: ironic_baremetal_network_cidr }
           ironic_baremetal_tunnel_cidr: { get_param: ironic_baremetal_tunnel_cidr }
           ironic_mt_enabled: { get_param: ironic_mt_enabled }
+          tungstenfabric_enabled: { get_param: tungstenfabric_enabled }
+          tun_network: { get_attr: [tun_network, tun_network_id] }
+          tun_subnet_id: { get_attr: [tun_network, tun_subnet_id] }
           hardware_metadata: { get_param: hardware_metadata}
 
   vbmcs:
diff --git a/trymos/image_build/files/usr/share/trymos/launch.sh b/trymos/image_build/files/usr/share/trymos/launch.sh
index 28de67f..04170b7 100755
--- a/trymos/image_build/files/usr/share/trymos/launch.sh
+++ b/trymos/image_build/files/usr/share/trymos/launch.sh
@@ -121,7 +121,9 @@
     sed -i "s/${DEFAULT_INTERFACE}:/&\n            critical: true/" ${cloud_netplan_cfg}
 
     public_address_match_ip_line=$(grep -nm1 "${PUBLIC_NODE_IP_ADDRESS}/${PUBLIC_NODE_IP_NETMASK}" ${cloud_netplan_cfg} | cut -d: -f1)
-    sed -i "$((${public_address_match_ip_line}-1)),$((${public_address_match_ip_line}))d" ${cloud_netplan_cfg}
+    if [ -n "${public_address_match_ip_line}" ] ; then
+        sed -i "$((${public_address_match_ip_line}-1)),$((${public_address_match_ip_line}))d" ${cloud_netplan_cfg}
+    fi
 
 cat << EOF >> ${cloud_netplan_cfg}
     bridges:
@@ -315,7 +317,7 @@
     done
     wait_for_pods openstack
 
-    kubectl -n openstack create job --from=cronjob/nova-cell-setup  nova-cell-setup-pd01-$(cat /dev/urandom | tr -dc '[a-z]' | head -c3)
+    kubectl -n openstack create job --from=cronjob/nova-cell-setup  nova-cell-setup-pd01-$(cat /dev/urandom | tr -dc a-z | head -c3)
     info "Openstack was deployed successfully..."
 }