Add WC for HCO env
- added wait handle to HCO env
- added metadata for Ceph nodes
Related-PROD: KUBV-73
Change-Id: Ib8feca2eb1243586e5b17c869eed23f3a1ab5ccd
diff --git a/hco/fragments/VMInstance.yaml b/hco/fragments/VMInstance.yaml
index b4115f3..15dad90 100644
--- a/hco/fragments/VMInstance.yaml
+++ b/hco/fragments/VMInstance.yaml
@@ -2,8 +2,6 @@
parameters:
- node_type:
- type: string
k8s_network:
type: string
k8s_subnet_id:
@@ -12,9 +10,15 @@
type: string
data_network:
type: string
+ storage_frontend_network:
+ type: string
availability_zone:
type: string
default: nova
+ boot_timeout:
+ type: number
+ description: Boot timeout for instance
+ default: 600
image:
type: string
description: Name of image to use for servers
@@ -41,6 +45,15 @@
floating_network_id: { get_param: public_net_id }
port_id: { get_resource: k8s_network_port }
+ wait_handle:
+ type: OS::Heat::WaitConditionHandle
+
+ wait_condition:
+ type: OS::Heat::WaitCondition
+ properties:
+ handle: { get_resource: wait_handle }
+ timeout: { get_param: boot_timeout }
+
vm_server:
type: OS::Nova::Server
properties:
@@ -50,7 +63,36 @@
key_name: { get_param: key_name }
networks:
- port: { get_resource: k8s_network_port }
+ - network: { get_param : storage_frontend_network }
- network: { get_param : data_network }
+ user_data_format: RAW
+ user_data:
+ str_replace:
+ template: |
+ #!/bin/bash
+
+ set -x
+
+ STATUS="SUCCESS"
+ REASON="The node has been successfully deployed"
+ DATA_BINARY="{\"status\": \"$STATUS\", \"reason\": \"$REASON\"}"
+ echo "Sending notification to wait condition ..."
+
+ WC_EXIT_CODE=1
+ counter=0
+ while (( ${WC_EXIT_CODE} != 0 && ${counter} < 3 )); do
+ wc_notify -k --data-binary "$DATA_BINARY" && WC_EXIT_CODE=0
+ counter=$((counter + 1))
+ sleep 5
+ done
+
+ if (( ${WC_EXIT_CODE} !=0 ))
+ then
+ echo "Cannot send notification to wait condition with a SUCCESS status"
+ exit 1
+ fi
+ params:
+ wc_notify: { get_attr: [wait_handle, curl_cli] }
outputs:
server_public_ip:
diff --git a/hco/fragments/VMInstanceCeph.yaml b/hco/fragments/VMInstanceCeph.yaml
new file mode 100644
index 0000000..5db84f8
--- /dev/null
+++ b/hco/fragments/VMInstanceCeph.yaml
@@ -0,0 +1,161 @@
+heat_template_version: queens
+
+parameters:
+
+ k8s_network:
+ type: string
+ k8s_subnet_id:
+ type: string
+ public_net_id:
+ type: string
+ data_network:
+ type: string
+ storage_frontend_network:
+ type: string
+ storage_backend_network:
+ type: string
+ availability_zone:
+ type: string
+ default: nova
+ boot_timeout:
+ type: number
+ description: Boot timeout for instance
+ default: 600
+ image:
+ type: string
+ description: Name of image to use for servers
+ flavor:
+ type: string
+ description: Flavor to use for servers
+ key_name:
+ type: string
+ description: Name of keypair to assign to servers
+ metadata:
+ type: json
+ default: {}
+ hardware_metadata:
+ description: The content of lab metadata.
+ type: string
+ user_data_config:
+ description: This is part of clout-config which denies to mount drive with label ephemeral0 to /mnt
+ type: string
+ default: |
+ #cloud-config
+ #
+ # Don't mount ephemeral0 to /mnt as it's by default
+ mounts:
+ - [ ephemeral0, null ]
+
+resources:
+
+ k8s_network_port:
+ type: OS::Neutron::Port
+ properties:
+ network: { get_param: k8s_network }
+ port_security_enabled: false
+ fixed_ips:
+ - subnet: { get_param: k8s_subnet_id }
+
+ floating_ip_k8s_net:
+ type: OS::Neutron::FloatingIP
+ properties:
+ floating_network_id: { get_param: public_net_id }
+ port_id: { get_resource: k8s_network_port }
+
+ software_config:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ group: ungrouped
+ config:
+ str_replace:
+ template: |
+ #!/bin/bash
+
+ set -x
+
+ /usr/sbin/prepare-metadata.py --metadata-file /usr/share/metadata/lab-metadata.yaml
+
+ HW_METADATA='{}'
+ if [[ -f /usr/share/metadata/ceph.yaml && 'node_metadata' == *"ceph-osd-node"* ]]; then
+ HW_METADATA="{\"ceph\": {\"$(hostname)\": \"$(base64 -w 0 /usr/share/metadata/ceph.yaml)\"}}"
+ ceph_store_drive=$(cat /usr/share/metadata/ceph.yaml | egrep '\- name\: vd?' | awk '{print $3}')
+ if [[ -b /dev/${ceph_store_drive} ]]; then
+ sgdisk --zap-all /dev/${ceph_store_drive}
+ fi
+ fi
+
+ STATUS="SUCCESS"
+ REASON="The node has been successfully deployed"
+ DATA_BINARY="{\"status\": \"$STATUS\", \"reason\": \"$REASON\", \"data\": $HW_METADATA}"
+ echo "Sending notification to wait condition with data: $HW_METADATA"
+
+ WC_EXIT_CODE=1
+ counter=0
+ while (( ${WC_EXIT_CODE} != 0 && ${counter} < 3 )); do
+ wc_notify -k --data-binary "$DATA_BINARY" && WC_EXIT_CODE=0
+ counter=$((counter + 1))
+ sleep 5
+ done
+
+ if (( ${WC_EXIT_CODE} !=0 ))
+ then
+ echo "Cannot send notification to wait condition with a SUCCESS status"
+ exit 1
+ fi
+ params:
+ wc_notify: { get_attr: [wait_handle, curl_cli] }
+ node_metadata: { get_param: metadata }
+
+ inject_files:
+ type: "OS::Heat::CloudConfig"
+ properties:
+ cloud_config:
+ write_files:
+ - path: /usr/sbin/prepare-metadata.py
+ owner: "root:root"
+ permissions: "0755"
+ content: {get_file: ../../de/heat-templates/scripts/prepare-metadata.py}
+ - path: /usr/share/metadata/lab-metadata.yaml
+ owner: "root:root"
+ permissions: "0644"
+ content: { get_param: hardware_metadata}
+
+ install_config_agent:
+ type: "OS::Heat::MultipartMime"
+ properties:
+ parts:
+ - config: {get_resource: software_config}
+ - config: {get_resource: inject_files}
+ - config: {get_param: user_data_config}
+
+ wait_handle:
+ type: OS::Heat::WaitConditionHandle
+
+ wait_condition:
+ type: OS::Heat::WaitCondition
+ properties:
+ handle: {get_resource: wait_handle}
+ timeout: { get_param: boot_timeout }
+
+ vm_server:
+ type: OS::Nova::Server
+ properties:
+ availability_zone: { get_param: availability_zone }
+ image: { get_param: image }
+ flavor: { get_param: flavor }
+ key_name: { get_param: key_name }
+ networks:
+ - port: { get_resource: k8s_network_port }
+ - network: { get_param : storage_frontend_network }
+ - network: { get_param : storage_backend_network }
+ - network: { get_param : data_network }
+ user_data_format: SOFTWARE_CONFIG
+ user_data: { get_resource: install_config_agent}
+
+outputs:
+ server_public_ip:
+ description: Floating IP address of server in public network
+ value: { get_attr: [ floating_ip_k8s_net, floating_ip_address ] }
+ wc_data:
+ description: Metadata from instance
+ value: { get_attr: [wait_condition, data]}