Deploy multirack env with L3

Related-PRODX: PRODX-13355

Change-Id: I820a71cab6ba5cd6098aae508aa4cb180f028e94
diff --git a/de/heat-templates/env/main-wrkr5-rack1-cmp1-rack2-cmp1.yaml b/de/heat-templates/env/main-wrkr5-rack1-cmp1-rack2-cmp1.yaml
new file mode 100644
index 0000000..622591f
--- /dev/null
+++ b/de/heat-templates/env/main-wrkr5-rack1-cmp1-rack2-cmp1.yaml
@@ -0,0 +1,93 @@
+resource_registry:
+  "MCP2::CentralSite": ../fragments/multirack/CentralSite.yaml
+  "MCP2::Rack": ../fragments/multirack/Rack.yaml
+  "MCP2::RackRouterRoutes": ../fragments/multirack/RackRouterRoutes.yaml
+
+parameters:
+  image: bionic-server-cloudimg-amd64-20190612
+  public_net_id: public
+  main_worker_size: 5
+  rack01_cmp_size: 1
+  rack02_cmp_size: 1
+  cluster_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCp0evjOaK8c8SKYK4r2+0BN7g+8YSvQ2n8nFgOURCyvkJqOHi1qPGZmuN0CclYVdVuZiXbWw3VxRbSW3EH736VzgY1U0JmoTiSamzLHaWsXvEIW8VCi7boli539QJP0ikJiBaNAgZILyCrVPN+A6mfqtacs1KXdZ0zlMq1BPtFciR1JTCRcVs5vP2Wwz5QtY2jMIh3aiwkePjMTQPcfmh1TkOlxYu5IbQyZ3G1ahA0mNKI9a0dtF282av/F6pwB/N1R1nEZ/9VtcN2I1mf1NW/tTHEEcTzXYo1R/8K9vlqAN8QvvGLZtZduGviNVNoNWvoxaXxDt8CPv2B2NCdQFZp
+  tunnel_interface: 'ens3'
+  main_worker_metadata: {"labels": {"openstack-control-plane":"enabled","openvswitch":"enabled","openstack-gateway": "enabled","role":"ceph-osd-node","local-volume-provisioner": "enabled"}}
+  rack01_cmp_metadata: {"labels": {"openstack-compute-node":"enabled","openvswitch":"enabled"}}
+  rack02_cmp_metadata: {"labels": {"openstack-compute-node":"enabled","openvswitch":"enabled"}}
+  # hardware_metadata which is used for Ceph requires flavor with
+  # ephemeral storage because it is used for Ceph bluestore.
+  main_worker_flavor: 'system.compact.openstack.control.ephemeral'
+  rack01_cmp_flavor: 'system.compact.openstack.control.ephemeral'
+  docker_ucp_image: docker-dev-kaas-local.docker.mirantis.net/lcm/docker/ucp:3.3.3
+  docker_ee_url: https://storebits.docker.com/ubuntu
+  docker_ee_release: stable-19.03
+  private_floating_interface: 'ens4'
+  rack_private_floating_interface: 'veth-phy'
+  main_worker_hardware_metadata: |
+    '00:00:00:00:00:00':
+      write_files:
+        - path: /usr/share/metadata/ceph.yaml
+          content: |
+            storageDevices:
+              - name: vdb
+                role: hdd
+                sizeGb: 20
+            ramGb: 8
+            cores: 2
+            # The roles will be assigned based on node labels.
+            # roles:
+            #   - mon
+            #   - mgr
+            ips:
+              - 192.168.122.101
+            crushPath: {}
+
+  rack_functions_override: |
+
+    function network_config {
+        PUBLIC_NODE_IP_ADDRESS=${PUBLIC_INTERFACE_IP:-$(ip addr show dev ${PUBLIC_INTERFACE} | grep -Po 'inet \K[\d.]+' | egrep -v "127.0.|172.17")}
+        PUBLIC_NODE_IP_NETMASK=${PUBLIC_INTERFACE_NETMASK:-$(ip addr show dev ${PUBLIC_INTERFACE} | grep -Po 'inet \K[\d.]+\/[\d]+' | egrep -v "127.0.|172.17" | cut -d'/' -f2)}
+
+        local public_interface=${1:-${PUBLIC_INTERFACE}}
+        local cloud_netplan_cfg="/etc/netplan/50-cloud-init.yaml"
+        local match_ip_line
+
+        DEBIAN_FRONTEND=noninteractive apt -y install bridge-utils
+
+    cat << EOF > /etc/systemd/network/10-veth-phy-br.netdev
+    [NetDev]
+    Name=veth-phy
+    Kind=veth
+    [Peer]
+    Name=veth-br
+    EOF
+
+        sed -i 's/.*ethernets:.*/&\n        veth-phy: {}/' ${cloud_netplan_cfg}
+        sed -i 's/.*ethernets:.*/&\n        veth-br: {}/' ${cloud_netplan_cfg}
+
+        public_address_match_ip_line=$(grep -nm1 "${PUBLIC_NODE_IP_ADDRESS}/${PUBLIC_NODE_IP_NETMASK}" ${cloud_netplan_cfg} | cut -d: -f1)
+        sed -i "$((${public_address_match_ip_line}-1)),$((${public_address_match_ip_line}))d" ${cloud_netplan_cfg}
+
+    cat << EOF >> ${cloud_netplan_cfg}
+        bridges:
+            br-public:
+                dhcp4: false
+    EOF
+
+    # Remove Tunnel interface from netplan
+    if [[ $TUNNEL_INTERFACE_NETPLAN_MANAGE == false ]]; then
+        sed -i "/        ${TUNNEL_INTERFACE}/,/            set-name: ${TUNNEL_INTERFACE}/d" ${cloud_netplan_cfg}
+    fi
+
+        netplan --debug apply
+
+        # NOTE(vsaienko): give some time to apply changes
+        sleep 15
+
+    # Remove Tunnel interface from netplan
+    if [[ $TUNNEL_INTERFACE_NETPLAN_MANAGE == false ]]; then
+        ip addr flush ${TUNNEL_INTERFACE}
+        ip link set ${TUNNEL_INTERFACE} up
+    fi
+
+    }