Add cluster context mstr1-wrkr3-cmp2-gtw0-vbmc5

Related-Prod: PRODX-3456
Change-Id: Ib45e35d3b150ac1a8ea30f50e49cfc79b46f98c7
diff --git a/de/heat-templates/env/mstr1-wrkr3-cmp2-gtw0-vbmc5.yaml b/de/heat-templates/env/mstr1-wrkr3-cmp2-gtw0-vbmc5.yaml
new file mode 100644
index 0000000..735d29a
--- /dev/null
+++ b/de/heat-templates/env/mstr1-wrkr3-cmp2-gtw0-vbmc5.yaml
@@ -0,0 +1,63 @@
+resource_registry:
+  "MCP2::NetworkAcc": ../fragments/NetworkAccVM.yaml
+  "MCP2::NetworkAccStorage": ../fragments/NetworkAccVMStorage.yaml
+  "MCP2::NetworkPrvFl": ../fragments/NetworkPrvFl.yaml
+  "MCP2::NetworkIronicFlat": ../fragments/NetworkIronicFlat.yaml
+  "MCP2::SrvInstances": ../fragments/SrvInstancesVM.yaml
+  "MCP2::SrvInstancesCeph": ../fragments/SrvInstancesVMCeph.yaml
+  "MCP2::SrvInstancesCephOSD": ../fragments/SrvInstancesVMCephOSD.yaml
+  "MCP2::NetworkTun": ../fragments/NetworkTun.yaml
+
+parameters:
+  image: bionic-server-cloudimg-amd64-20190612
+  public_net_id: public
+  masters_size: 0
+  worker_size: 5
+  cmp_size: 2
+  gtw_size: 0
+  lma_size: 0
+  osd_size: 0
+  vbmc_size: 5
+  ucp_boot_timeout: 3600
+  cluster_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCp0evjOaK8c8SKYK4r2+0BN7g+8YSvQ2n8nFgOURCyvkJqOHi1qPGZmuN0CclYVdVuZiXbWw3VxRbSW3EH736VzgY1U0JmoTiSamzLHaWsXvEIW8VCi7boli539QJP0ikJiBaNAgZILyCrVPN+A6mfqtacs1KXdZ0zlMq1BPtFciR1JTCRcVs5vP2Wwz5QtY2jMIh3aiwkePjMTQPcfmh1TkOlxYu5IbQyZ3G1ahA0mNKI9a0dtF282av/F6pwB/N1R1nEZ/9VtcN2I1mf1NW/tTHEEcTzXYo1R/8K9vlqAN8QvvGLZtZduGviNVNoNWvoxaXxDt8CPv2B2NCdQFZp
+  private_floating_network_cidr: '10.11.12.0/24'
+  private_floating_interface: 'ens4'
+  tunnel_interface: 'ens8'
+  worker_metadata: {"labels": {"openstack-control-plane":"enabled","openvswitch":"enabled","openstack-gateway": "enabled","local-volume-provisioner": "enabled"}}
+  cmp_metadata: {"labels": {"openstack-compute-node":"enabled","openvswitch":"enabled", "role":"ceph-osd-node"}}
+  gtw_metadata: {"labels": {"openvswitch":"enabled"}}
+  vbmc_metadata: {"labels": {"virtualbmc":"enabled"}}
+  ironic_baremetal_network_cidr: '10.13.0.0/24'
+  ironic_baremetal_network_ipam_pool_start: '10.13.0.2'
+  ironic_baremetal_network_ipam_pool_end: '10.13.0.98'
+  ironic_baremetal_network_pool_start: '10.13.0.100'
+  ironic_baremetal_network_pool_end: '10.13.0.200'
+  # By default on ci ironic network is set up as internal
+  # with default gateway set to 10.13.0.99, in order to
+  # tempest can have connectivity to bm vms via floating ips
+  ironic_baremetal_network_gateway: '10.13.0.99'
+  # hardware_metadata which is used for Ceph requires flavor with
+  # ephemeral storage because it is used for Ceph bluestore.
+  workers_flavor: 'mosk.l.control.ephemeral'
+  cmps_flavor: 'mosk.s.compute.ephemeral'
+  vbmcs_flavor: 'system.compact.openstack.control'
+  storage_frontend_network_cidr: '10.12.1.0/24'
+  storage_backend_network_cidr: '10.12.0.0/24'
+  hardware_metadata: |
+    '00:00:00:00:00:00':
+      write_files:
+        - path: /usr/share/metadata/ceph.yaml
+          content: |
+            storageDevices:
+              - name: vdb
+                role: hdd
+                sizeGb: 20
+            ramGb: 8
+            cores: 2
+            # The roles will be assigned based on node labels.
+            # roles:
+            #   - mon
+            #   - mgr
+            ips:
+              - 192.168.122.101
+            crushPath: {}