Add WC for HCO env

- added wait handle to HCO env
- added metadata for Ceph nodes

Related-PROD: KUBV-73
Change-Id: Ib8feca2eb1243586e5b17c869eed23f3a1ab5ccd
diff --git a/hco/top.yaml b/hco/top.yaml
index 59aeaeb..e8c4929 100644
--- a/hco/top.yaml
+++ b/hco/top.yaml
@@ -36,9 +36,26 @@
     type: string
     description: The CIDR of k8s network
     default: '10.11.0.0/24'
+  storage_backend_network_cidr:
+    type: string
+    default: '10.12.0.0/24'
+  storage_frontend_network_cidr:
+    type: string
+    default: '10.12.1.0/24'
   dns_nameservers:
     type: json
-    default: []
+    default: ['172.18.224.6', '172.18.176.6']
+  hardware_metadata:
+    description: The content of lab metadata.
+    default: ''
+    type: string
+  worker_metadata:
+    type: json
+    default: {}
+  boot_timeout:
+    type: number
+    description: Boot timeout for instance
+    default: 600
 
 resources:
 
@@ -85,6 +102,26 @@
       cidr: { get_param: data_network_cidr }
       gateway_ip: ~
 
+  storage_backend_network:
+    type: OS::Neutron::Net
+  storage_backend_subnet:
+    type: OS::Neutron::Subnet
+    properties:
+      network: { get_resource: storage_backend_network }
+      enable_dhcp: false
+      cidr: { get_param: storage_backend_network_cidr }
+      gateway_ip: ~
+
+  storage_frontend_network:
+    type: OS::Neutron::Net
+  storage_frontend_subnet:
+    type: OS::Neutron::Subnet
+    properties:
+      network: { get_resource: storage_frontend_network }
+      enable_dhcp: false
+      cidr: { get_param: storage_frontend_network_cidr }
+      gateway_ip: ~
+
   masters:
     type: OS::Heat::ResourceGroup
     depends_on:
@@ -96,15 +133,16 @@
       resource_def:
         type: VMInstances
         properties:
-          node_type: "controller"
           k8s_network: { get_resource: k8s_network }
           k8s_subnet_id: { get_resource: k8s_subnet }
           public_net_id: { get_param: public_net_id }
+          storage_frontend_network: { get_resource: storage_frontend_network }
           data_network: { get_resource: data_network }
           availability_zone: { get_param: availability_zone }
           image: { get_param: image }
           flavor: { get_param: masters_flavor }
           key_name: { get_attr: [keypair_name, value] }
+          boot_timeout: { get_param: boot_timeout }
 
   workers:
     type: OS::Heat::ResourceGroup
@@ -115,17 +153,21 @@
     properties:
       count: { get_param: workers_size }
       resource_def:
-        type: VMInstances
+        type: VMInstancesCeph
         properties:
-          node_type: "worker"
           k8s_network: { get_resource: k8s_network }
           k8s_subnet_id: { get_resource: k8s_subnet }
           public_net_id: { get_param: public_net_id }
+          storage_frontend_network: { get_resource: storage_frontend_network }
+          storage_backend_network: { get_resource: storage_backend_network }
           data_network: { get_resource: data_network }
           availability_zone: { get_param: availability_zone }
           image: { get_param: image }
           flavor: { get_param: workers_flavor }
           key_name: { get_attr: [keypair_name, value] }
+          metadata: { get_param: worker_metadata }
+          hardware_metadata: { get_param: hardware_metadata}
+          boot_timeout: { get_param: boot_timeout }
 
 outputs:
   masters_ips:
@@ -134,3 +176,12 @@
   workers_ips:
     description: Public IP addresses of the deployed worker instances
     value: { get_attr: [workers, server_public_ip] }
+  storage_frontend_network_cidr:
+    description: Storage network which is used as clientNet in Ceph CR
+    value: { get_param: storage_frontend_network_cidr }
+  storage_backend_network_cidr:
+    description: Storage network which is used as clusterNet in Ceph CR
+    value: { get_param: storage_backend_network_cidr }
+  workers_wc_data:
+    description: Metadata from workers
+    value: { get_attr: [workers, wc_data] }