[ceph][kubevirt] Allow to deploy specified volumes for Ceph related envs
Add new env file for Ceph tests.
Related-Prod: PRODX-54501
Change-Id: I1e6983ac8cd4b1aee332d9f7e11f7a53bf7a479c
diff --git a/hco/env/ctrl3-wrkr4-ceph.yaml b/hco/env/ctrl3-wrkr4-ceph.yaml
new file mode 100644
index 0000000..eba05be
--- /dev/null
+++ b/hco/env/ctrl3-wrkr4-ceph.yaml
@@ -0,0 +1,30 @@
+resource_registry:
+ "VMInstances": ../fragments/VMInstance.yaml
+ "VMInstancesCeph": ../fragments/VMInstanceCeph.yaml
+
+parameters:
+ controllers_size: 3
+ workers_size: 4
+ storage_volumes_per_node: 3
+ image: ubuntu-24.04-server-cloudimg-amd64-20250805
+ public_net_id: public
+ cluster_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCp0evjOaK8c8SKYK4r2+0BN7g+8YSvQ2n8nFgOURCyvkJqOHi1qPGZmuN0CclYVdVuZiXbWw3VxRbSW3EH736VzgY1U0JmoTiSamzLHaWsXvEIW8VCi7boli539QJP0ikJiBaNAgZILyCrVPN+A6mfqtacs1KXdZ0zlMq1BPtFciR1JTCRcVs5vP2Wwz5QtY2jMIh3aiwkePjMTQPcfmh1TkOlxYu5IbQyZ3G1ahA0mNKI9a0dtF282av/F6pwB/N1R1nEZ/9VtcN2I1mf1NW/tTHEEcTzXYo1R/8K9vlqAN8QvvGLZtZduGviNVNoNWvoxaXxDt8CPv2B2NCdQFZp
+ worker_metadata: {"labels": {"role":"ceph-osd-node"}}
+ hardware_metadata: |
+ '00:00:00:00:00:00':
+ write_files:
+ - path: /usr/share/metadata/ceph.yaml
+ content: |
+ storageDevices:
+ - name: vdb
+ role: hdd
+ sizeGb: 30
+ ramGb: 8
+ cores: 2
+ # The roles will be assigned based on node labels.
+ # roles:
+ # - mon
+ # - mgr
+ ips:
+ - 192.168.122.101
+ crushPath: {}
diff --git a/hco/fragments/CinderVolumeVM.yaml b/hco/fragments/CinderVolumeVM.yaml
new file mode 100644
index 0000000..b0726f4
--- /dev/null
+++ b/hco/fragments/CinderVolumeVM.yaml
@@ -0,0 +1,30 @@
+heat_template_version: queens
+
+parameters:
+ volume_size:
+ type: number
+ description: Size of volume to attach to instance
+ default: 30
+
+ volume_type:
+ type: string
+ description: The type of the volume mapping to a backend, if any.
+ default: standard-iops
+
+ instance_id:
+ type: string
+ description: Server to attach volume to
+
+resources:
+ volume:
+ type: OS::Cinder::Volume
+ properties:
+ volume_type: { get_param: volume_type }
+ size: { get_param: volume_size }
+ description: Volume for stack
+
+ volume_attachment:
+ type: OS::Cinder::VolumeAttachment
+ properties:
+ volume_id: { get_resource: volume }
+ instance_uuid: { get_param: instance_id }
diff --git a/hco/fragments/VMInstanceCeph.yaml b/hco/fragments/VMInstanceCeph.yaml
index ad54722..642fba2 100644
--- a/hco/fragments/VMInstanceCeph.yaml
+++ b/hco/fragments/VMInstanceCeph.yaml
@@ -16,6 +16,9 @@
type: string
storage_backend_network:
type: string
+ storage_volumes_per_node:
+ type: number
+ default: 0
availability_zone:
type: string
default: nova
@@ -161,6 +164,15 @@
user_data_format: SOFTWARE_CONFIG
user_data: { get_resource: install_config_agent}
+ extra_volumes:
+ type: OS::Heat::ResourceGroup
+ properties:
+ count: { get_param: storage_volumes_per_node }
+ resource_def:
+ type: ./CinderVolumeVM.yaml
+ properties:
+ instance_id: { get_resource: server }
+
outputs:
server_public_ip:
description: Floating IP address of server in public network
diff --git a/hco/top.yaml b/hco/top.yaml
index 2a7bd37..272d90f 100644
--- a/hco/top.yaml
+++ b/hco/top.yaml
@@ -50,6 +50,9 @@
storage_frontend_network_cidr:
type: string
default: '10.12.1.0/24'
+ storage_volumes_per_node:
+ type: number
+ default: 0
dns_nameservers:
type: json
default: ['172.18.224.6', '172.18.176.6']
@@ -182,6 +185,7 @@
public_net_id: { get_param: public_net_id }
storage_frontend_network: { get_resource: storage_frontend_network }
storage_backend_network: { get_resource: storage_backend_network }
+ storage_volumes_per_node: { get_param: storage_volumes_per_node }
data_network: { get_resource: data_network }
availability_zone: { get_param: availability_zone }
image: { get_param: image }