Add WC for HCO env

- added wait handle to HCO env
- added metadata for Ceph nodes

Related-PROD: KUBV-73
Change-Id: Ib8feca2eb1243586e5b17c869eed23f3a1ab5ccd
diff --git a/hco/env/ctrl1-wrkr3.yaml b/hco/env/ctrl1-wrkr3.yaml
index b11a8cc..ef0145b 100644
--- a/hco/env/ctrl1-wrkr3.yaml
+++ b/hco/env/ctrl1-wrkr3.yaml
@@ -1,5 +1,6 @@
 resource_registry:
   "VMInstances": ../fragments/VMInstance.yaml
+  "VMInstancesCeph": ../fragments/VMInstanceCeph.yaml
 
 parameters:
   controllers_size: 1
@@ -7,3 +8,23 @@
   image: jammy-server-cloudimg-amd64-20240417
   public_net_id: c3799996-dc8e-4477-a309-09ea6dd71946
   cluster_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCp0evjOaK8c8SKYK4r2+0BN7g+8YSvQ2n8nFgOURCyvkJqOHi1qPGZmuN0CclYVdVuZiXbWw3VxRbSW3EH736VzgY1U0JmoTiSamzLHaWsXvEIW8VCi7boli539QJP0ikJiBaNAgZILyCrVPN+A6mfqtacs1KXdZ0zlMq1BPtFciR1JTCRcVs5vP2Wwz5QtY2jMIh3aiwkePjMTQPcfmh1TkOlxYu5IbQyZ3G1ahA0mNKI9a0dtF282av/F6pwB/N1R1nEZ/9VtcN2I1mf1NW/tTHEEcTzXYo1R/8K9vlqAN8QvvGLZtZduGviNVNoNWvoxaXxDt8CPv2B2NCdQFZp
+  worker_metadata: {"labels": {"role":"ceph-osd-node"}}
+  workers_flavor: 'system.compact.openstack.control.ephemeral'
+  hardware_metadata: |
+    '00:00:00:00:00:00':
+      write_files:
+        - path: /usr/share/metadata/ceph.yaml
+          content: |
+            storageDevices:
+              - name: vdb
+                role: hdd
+                sizeGb: 20
+            ramGb: 8
+            cores: 2
+            # The roles will be assigned based on node labels.
+            # roles:
+            #   - mon
+            #   - mgr
+            ips:
+              - 192.168.122.101
+            crushPath: {}
diff --git a/hco/fragments/VMInstance.yaml b/hco/fragments/VMInstance.yaml
index b4115f3..15dad90 100644
--- a/hco/fragments/VMInstance.yaml
+++ b/hco/fragments/VMInstance.yaml
@@ -2,8 +2,6 @@
 
 parameters:
 
-  node_type:
-    type: string
   k8s_network:
     type: string
   k8s_subnet_id:
@@ -12,9 +10,15 @@
     type: string
   data_network:
     type: string
+  storage_frontend_network:
+    type: string
   availability_zone:
     type: string
     default: nova
+  boot_timeout:
+    type: number
+    description: Boot timeout for instance
+    default: 600
   image:
     type: string
     description: Name of image to use for servers
@@ -41,6 +45,15 @@
       floating_network_id: { get_param: public_net_id }
       port_id: { get_resource: k8s_network_port }
 
+  wait_handle:
+    type: OS::Heat::WaitConditionHandle
+
+  wait_condition:
+    type: OS::Heat::WaitCondition
+    properties:
+      handle: { get_resource: wait_handle }
+      timeout: { get_param: boot_timeout }
+
   vm_server:
     type: OS::Nova::Server
     properties:
@@ -50,7 +63,36 @@
       key_name: { get_param: key_name }
       networks:
         - port: { get_resource: k8s_network_port }
+        - network: { get_param : storage_frontend_network }
         - network: { get_param : data_network }
+      user_data_format: RAW
+      user_data:
+        str_replace:
+          template: |
+            #!/bin/bash
+            
+            set -x
+                        
+            STATUS="SUCCESS"
+            REASON="The node has been successfully deployed"
+            DATA_BINARY="{\"status\": \"$STATUS\", \"reason\": \"$REASON\"}"
+            echo "Sending notification to wait condition ..."
+            
+            WC_EXIT_CODE=1
+            counter=0
+            while (( ${WC_EXIT_CODE} != 0 && ${counter} < 3 )); do
+                wc_notify -k --data-binary "$DATA_BINARY" && WC_EXIT_CODE=0
+                counter=$((counter + 1))
+            sleep 5
+            done
+            
+            if (( ${WC_EXIT_CODE} !=0 ))
+            then
+                echo "Cannot send notification to wait condition with a SUCCESS status"
+                exit 1
+            fi
+          params:
+            wc_notify: { get_attr: [wait_handle, curl_cli] }
 
 outputs:
   server_public_ip:
diff --git a/hco/fragments/VMInstanceCeph.yaml b/hco/fragments/VMInstanceCeph.yaml
new file mode 100644
index 0000000..5db84f8
--- /dev/null
+++ b/hco/fragments/VMInstanceCeph.yaml
@@ -0,0 +1,161 @@
+heat_template_version: queens
+
+parameters:
+
+  k8s_network:
+    type: string
+  k8s_subnet_id:
+    type: string
+  public_net_id:
+    type: string
+  data_network:
+    type: string
+  storage_frontend_network:
+    type: string
+  storage_backend_network:
+    type: string
+  availability_zone:
+    type: string
+    default: nova
+  boot_timeout:
+    type: number
+    description: Boot timeout for instance
+    default: 600
+  image:
+    type: string
+    description: Name of image to use for servers
+  flavor:
+    type: string
+    description: Flavor to use for servers
+  key_name:
+    type: string
+    description: Name of keypair to assign to servers
+  metadata:
+    type: json
+    default: {}
+  hardware_metadata:
+    description: The content of lab metadata.
+    type: string
+  user_data_config:
+    description: This is part of clout-config which denies to mount drive with label ephemeral0 to /mnt
+    type: string
+    default: |
+      #cloud-config
+      #
+      # Don't mount ephemeral0 to /mnt as it's by default
+      mounts:
+        - [ ephemeral0, null ]
+
+resources:
+
+  k8s_network_port:
+    type: OS::Neutron::Port
+    properties:
+      network: { get_param: k8s_network }
+      port_security_enabled: false
+      fixed_ips:
+        - subnet: { get_param: k8s_subnet_id }
+
+  floating_ip_k8s_net:
+    type: OS::Neutron::FloatingIP
+    properties:
+      floating_network_id: { get_param: public_net_id }
+      port_id: { get_resource: k8s_network_port }
+
+  software_config:
+    type: OS::Heat::SoftwareConfig
+    properties:
+      group: ungrouped
+      config:
+        str_replace:
+          template: |
+            #!/bin/bash
+            
+            set -x
+            
+            /usr/sbin/prepare-metadata.py  --metadata-file /usr/share/metadata/lab-metadata.yaml
+            
+            HW_METADATA='{}'
+            if [[ -f /usr/share/metadata/ceph.yaml && 'node_metadata' == *"ceph-osd-node"* ]]; then
+                HW_METADATA="{\"ceph\": {\"$(hostname)\": \"$(base64 -w 0 /usr/share/metadata/ceph.yaml)\"}}"
+                ceph_store_drive=$(cat /usr/share/metadata/ceph.yaml | egrep '\- name\: vd?' | awk '{print $3}')
+                if [[ -b /dev/${ceph_store_drive} ]]; then
+                    sgdisk --zap-all /dev/${ceph_store_drive}
+                fi
+            fi
+            
+            STATUS="SUCCESS"
+            REASON="The node has been successfully deployed"
+            DATA_BINARY="{\"status\": \"$STATUS\", \"reason\": \"$REASON\", \"data\": $HW_METADATA}"
+            echo "Sending notification to wait condition with data: $HW_METADATA"
+            
+            WC_EXIT_CODE=1
+            counter=0
+            while (( ${WC_EXIT_CODE} != 0 && ${counter} < 3 )); do
+                wc_notify -k --data-binary "$DATA_BINARY" && WC_EXIT_CODE=0
+                counter=$((counter + 1))
+            sleep 5
+            done
+            
+            if (( ${WC_EXIT_CODE} !=0 ))
+            then
+                echo "Cannot send notification to wait condition with a SUCCESS status"
+                exit 1
+            fi
+          params:
+            wc_notify: { get_attr: [wait_handle, curl_cli] }
+            node_metadata: { get_param: metadata }
+
+  inject_files:
+    type: "OS::Heat::CloudConfig"
+    properties:
+      cloud_config:
+        write_files:
+          - path: /usr/sbin/prepare-metadata.py
+            owner: "root:root"
+            permissions: "0755"
+            content: {get_file: ../../de/heat-templates/scripts/prepare-metadata.py}
+          - path: /usr/share/metadata/lab-metadata.yaml
+            owner: "root:root"
+            permissions: "0644"
+            content: { get_param: hardware_metadata}
+
+  install_config_agent:
+    type: "OS::Heat::MultipartMime"
+    properties:
+      parts:
+      - config: {get_resource: software_config}
+      - config: {get_resource: inject_files}
+      - config: {get_param: user_data_config}
+
+  wait_handle:
+    type: OS::Heat::WaitConditionHandle
+
+  wait_condition:
+    type: OS::Heat::WaitCondition
+    properties:
+      handle: {get_resource: wait_handle}
+      timeout: { get_param: boot_timeout }
+
+  vm_server:
+    type: OS::Nova::Server
+    properties:
+      availability_zone: { get_param: availability_zone }
+      image: { get_param: image }
+      flavor: { get_param: flavor }
+      key_name: { get_param: key_name }
+      networks:
+        - port: { get_resource: k8s_network_port }
+        - network: { get_param : storage_frontend_network }
+        - network: { get_param : storage_backend_network }
+        - network: { get_param : data_network }
+      user_data_format: SOFTWARE_CONFIG
+      user_data: { get_resource:  install_config_agent}
+
+outputs:
+  server_public_ip:
+    description: Floating IP address of server in public network
+    value: { get_attr: [ floating_ip_k8s_net, floating_ip_address ] }
+  wc_data:
+    description: Metadata from instance
+    value: { get_attr: [wait_condition, data]}
diff --git a/hco/top.yaml b/hco/top.yaml
index 59aeaeb..e8c4929 100644
--- a/hco/top.yaml
+++ b/hco/top.yaml
@@ -36,9 +36,26 @@
     type: string
     description: The CIDR of k8s network
     default: '10.11.0.0/24'
+  storage_backend_network_cidr:
+    type: string
+    default: '10.12.0.0/24'
+  storage_frontend_network_cidr:
+    type: string
+    default: '10.12.1.0/24'
   dns_nameservers:
     type: json
-    default: []
+    default: ['172.18.224.6', '172.18.176.6']
+  hardware_metadata:
+    description: The content of lab metadata.
+    default: ''
+    type: string
+  worker_metadata:
+    type: json
+    default: {}
+  boot_timeout:
+    type: number
+    description: Boot timeout for instance
+    default: 600
 
 resources:
 
@@ -85,6 +102,26 @@
       cidr: { get_param: data_network_cidr }
       gateway_ip: ~
 
+  storage_backend_network:
+    type: OS::Neutron::Net
+  storage_backend_subnet:
+    type: OS::Neutron::Subnet
+    properties:
+      network: { get_resource: storage_backend_network }
+      enable_dhcp: false
+      cidr: { get_param: storage_backend_network_cidr }
+      gateway_ip: ~
+
+  storage_frontend_network:
+    type: OS::Neutron::Net
+  storage_frontend_subnet:
+    type: OS::Neutron::Subnet
+    properties:
+      network: { get_resource: storage_frontend_network }
+      enable_dhcp: false
+      cidr: { get_param: storage_frontend_network_cidr }
+      gateway_ip: ~
+
   masters:
     type: OS::Heat::ResourceGroup
     depends_on:
@@ -96,15 +133,16 @@
       resource_def:
         type: VMInstances
         properties:
-          node_type: "controller"
           k8s_network: { get_resource: k8s_network }
           k8s_subnet_id: { get_resource: k8s_subnet }
           public_net_id: { get_param: public_net_id }
+          storage_frontend_network: { get_resource: storage_frontend_network }
           data_network: { get_resource: data_network }
           availability_zone: { get_param: availability_zone }
           image: { get_param: image }
           flavor: { get_param: masters_flavor }
           key_name: { get_attr: [keypair_name, value] }
+          boot_timeout: { get_param: boot_timeout }
 
   workers:
     type: OS::Heat::ResourceGroup
@@ -115,17 +153,21 @@
     properties:
       count: { get_param: workers_size }
       resource_def:
-        type: VMInstances
+        type: VMInstancesCeph
         properties:
-          node_type: "worker"
           k8s_network: { get_resource: k8s_network }
           k8s_subnet_id: { get_resource: k8s_subnet }
           public_net_id: { get_param: public_net_id }
+          storage_frontend_network: { get_resource: storage_frontend_network }
+          storage_backend_network: { get_resource: storage_backend_network }
           data_network: { get_resource: data_network }
           availability_zone: { get_param: availability_zone }
           image: { get_param: image }
           flavor: { get_param: workers_flavor }
           key_name: { get_attr: [keypair_name, value] }
+          metadata: { get_param: worker_metadata }
+          hardware_metadata: { get_param: hardware_metadata}
+          boot_timeout: { get_param: boot_timeout }
 
 outputs:
   masters_ips:
@@ -134,3 +176,12 @@
   workers_ips:
     description: Public IP addresses of the deployed worker instances
     value: { get_attr: [workers, server_public_ip] }
+  storage_frontend_network_cidr:
+    description: Storage network which is used as clientNet in Ceph CR
+    value: { get_param: storage_frontend_network_cidr }
+  storage_backend_network_cidr:
+    description: Storage network which is used as clusterNet in Ceph CR
+    value: { get_param: storage_backend_network_cidr }
+  workers_wc_data:
+    description: Metadata from workers
+    value: { get_attr: [workers, wc_data] }