blob: 642fba236dccf937450cce3fe744e874a0cc2c78 [file] [log] [blame]
Anton Samoylov17e7c032024-10-14 23:55:18 +04001heat_template_version: queens
2
3parameters:
4
5 k8s_network:
6 type: string
7 k8s_subnet_id:
8 type: string
Anton Samoylovafd066e2025-08-27 10:58:38 +04009 k8s_subnet_ipv6_id:
10 type: string
Anton Samoylov17e7c032024-10-14 23:55:18 +040011 public_net_id:
12 type: string
13 data_network:
14 type: string
15 storage_frontend_network:
16 type: string
17 storage_backend_network:
18 type: string
Denis Egorenko9dd84da2025-09-23 15:05:32 +040019 storage_volumes_per_node:
20 type: number
21 default: 0
Anton Samoylov17e7c032024-10-14 23:55:18 +040022 availability_zone:
23 type: string
24 default: nova
25 boot_timeout:
26 type: number
27 description: Boot timeout for instance
28 default: 600
29 image:
30 type: string
31 description: Name of image to use for servers
32 flavor:
33 type: string
34 description: Flavor to use for servers
35 key_name:
36 type: string
37 description: Name of keypair to assign to servers
38 metadata:
39 type: json
40 default: {}
41 hardware_metadata:
42 description: The content of lab metadata.
43 type: string
44 user_data_config:
45 description: This is part of clout-config which denies to mount drive with label ephemeral0 to /mnt
46 type: string
47 default: |
48 #cloud-config
49 #
50 # Don't mount ephemeral0 to /mnt as it's by default
51 mounts:
52 - [ ephemeral0, null ]
53
54resources:
55
56 k8s_network_port:
57 type: OS::Neutron::Port
58 properties:
59 network: { get_param: k8s_network }
60 port_security_enabled: false
61 fixed_ips:
62 - subnet: { get_param: k8s_subnet_id }
Anton Samoylovafd066e2025-08-27 10:58:38 +040063 - subnet: { get_param: k8s_subnet_ipv6_id }
Anton Samoylov17e7c032024-10-14 23:55:18 +040064
65 floating_ip_k8s_net:
66 type: OS::Neutron::FloatingIP
67 properties:
68 floating_network_id: { get_param: public_net_id }
69 port_id: { get_resource: k8s_network_port }
70
71 software_config:
72 type: OS::Heat::SoftwareConfig
73 properties:
74 group: ungrouped
75 config:
76 str_replace:
77 template: |
78 #!/bin/bash
79
80 set -x
81
82 /usr/sbin/prepare-metadata.py --metadata-file /usr/share/metadata/lab-metadata.yaml
83
84 HW_METADATA='{}'
85 if [[ -f /usr/share/metadata/ceph.yaml && 'node_metadata' == *"ceph-osd-node"* ]]; then
86 HW_METADATA="{\"ceph\": {\"$(hostname)\": \"$(base64 -w 0 /usr/share/metadata/ceph.yaml)\"}}"
87 ceph_store_drive=$(cat /usr/share/metadata/ceph.yaml | egrep '\- name\: vd?' | awk '{print $3}')
88 if [[ -b /dev/${ceph_store_drive} ]]; then
89 sgdisk --zap-all /dev/${ceph_store_drive}
90 fi
91 fi
Ann Taraday56283e52024-10-25 15:36:20 +020092
93 apt install nfs-common -y
Anton Samoylov17e7c032024-10-14 23:55:18 +040094
95 STATUS="SUCCESS"
96 REASON="The node has been successfully deployed"
97 DATA_BINARY="{\"status\": \"$STATUS\", \"reason\": \"$REASON\", \"data\": $HW_METADATA}"
98 echo "Sending notification to wait condition with data: $HW_METADATA"
99
100 WC_EXIT_CODE=1
101 counter=0
102 while (( ${WC_EXIT_CODE} != 0 && ${counter} < 3 )); do
103 wc_notify -k --data-binary "$DATA_BINARY" && WC_EXIT_CODE=0
104 counter=$((counter + 1))
105 sleep 5
106 done
107
108 if (( ${WC_EXIT_CODE} !=0 ))
109 then
110 echo "Cannot send notification to wait condition with a SUCCESS status"
111 exit 1
112 fi
113 params:
114 wc_notify: { get_attr: [wait_handle, curl_cli] }
115 node_metadata: { get_param: metadata }
116
117 inject_files:
118 type: "OS::Heat::CloudConfig"
119 properties:
120 cloud_config:
121 write_files:
122 - path: /usr/sbin/prepare-metadata.py
123 owner: "root:root"
124 permissions: "0755"
125 content: {get_file: ../../de/heat-templates/scripts/prepare-metadata.py}
126 - path: /usr/share/metadata/lab-metadata.yaml
127 owner: "root:root"
128 permissions: "0644"
129 content: { get_param: hardware_metadata}
Anton Samoylov9b7c6802025-01-20 11:20:35 +0400130 - path: /etc/k0s/containerd.d/cdi.toml
131 owner: "root:root"
132 permissions: "0644"
133 content: {get_file: ../configs/cdi.toml}
Anton Samoylov17e7c032024-10-14 23:55:18 +0400134
135 install_config_agent:
136 type: "OS::Heat::MultipartMime"
137 properties:
138 parts:
139 - config: {get_resource: software_config}
140 - config: {get_resource: inject_files}
141 - config: {get_param: user_data_config}
142
143 wait_handle:
144 type: OS::Heat::WaitConditionHandle
145
146 wait_condition:
147 type: OS::Heat::WaitCondition
148 properties:
149 handle: {get_resource: wait_handle}
150 timeout: { get_param: boot_timeout }
151
Anton Samoylov72279102025-03-05 18:52:39 +0400152 server:
Anton Samoylov17e7c032024-10-14 23:55:18 +0400153 type: OS::Nova::Server
154 properties:
155 availability_zone: { get_param: availability_zone }
156 image: { get_param: image }
157 flavor: { get_param: flavor }
158 key_name: { get_param: key_name }
159 networks:
160 - port: { get_resource: k8s_network_port }
161 - network: { get_param : storage_frontend_network }
162 - network: { get_param : storage_backend_network }
163 - network: { get_param : data_network }
164 user_data_format: SOFTWARE_CONFIG
165 user_data: { get_resource: install_config_agent}
166
Denis Egorenko9dd84da2025-09-23 15:05:32 +0400167 extra_volumes:
168 type: OS::Heat::ResourceGroup
169 properties:
170 count: { get_param: storage_volumes_per_node }
171 resource_def:
172 type: ./CinderVolumeVM.yaml
173 properties:
174 instance_id: { get_resource: server }
175
Anton Samoylov17e7c032024-10-14 23:55:18 +0400176outputs:
177 server_public_ip:
178 description: Floating IP address of server in public network
179 value: { get_attr: [ floating_ip_k8s_net, floating_ip_address ] }
180 wc_data:
181 description: Metadata from instance
182 value: { get_attr: [wait_condition, data]}