[KUBEV] Extend template to support hybrid lab deployments.
Add VM Compute resource to deploy virtual nodes using virtual BMC
and connect the phys network (pxe) to instances.
Related: KUBEV-414
Change-Id: I7b5409ae1b4336f3ab78ecb63b435a6036200bd9
diff --git a/hco/env/system-phys-2488.yaml b/hco/env/system-phys-2488.yaml
new file mode 100644
index 0000000..dfd5d58
--- /dev/null
+++ b/hco/env/system-phys-2488.yaml
@@ -0,0 +1,32 @@
+resource_registry:
+ "VMInstances": ../fragments/VMInstance.yaml
+ "VMInstancesCeph": ../fragments/VMInstanceCeph.yaml
+
+parameters:
+ controllers_size: 1
+ workers_size: 1
+ image: ubuntu-24.04-server-cloudimg-amd64-20250805
+ public_net_id: public
+ cluster_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCp0evjOaK8c8SKYK4r2+0BN7g+8YSvQ2n8nFgOURCyvkJqOHi1qPGZmuN0CclYVdVuZiXbWw3VxRbSW3EH736VzgY1U0JmoTiSamzLHaWsXvEIW8VCi7boli539QJP0ikJiBaNAgZILyCrVPN+A6mfqtacs1KXdZ0zlMq1BPtFciR1JTCRcVs5vP2Wwz5QtY2jMIh3aiwkePjMTQPcfmh1TkOlxYu5IbQyZ3G1ahA0mNKI9a0dtF282av/F6pwB/N1R1nEZ/9VtcN2I1mf1NW/tTHEEcTzXYo1R/8K9vlqAN8QvvGLZtZduGviNVNoNWvoxaXxDt8CPv2B2NCdQFZp
+ worker_metadata: {"labels": {"role":"ceph-osd-node"}}
+ hardware_metadata: |
+ '00:00:00:00:00:00':
+ write_files:
+ - path: /usr/share/metadata/ceph.yaml
+ content: |
+ storageDevices:
+ - name: vdb
+ role: hdd
+ sizeGb: 20
+ ramGb: 8
+ cores: 2
+ # The roles will be assigned based on node labels.
+ # roles:
+ # - mon
+ # - mgr
+ ips:
+ - 192.168.122.101
+ crushPath: {}
+ hybrid_lab: true
+ pxe_network: system-phys-2488
+ pxe_subnet: system-phys-2488-subnet
diff --git a/hco/fragments/VMCompute.yaml b/hco/fragments/VMCompute.yaml
new file mode 100644
index 0000000..e8e25ab
--- /dev/null
+++ b/hco/fragments/VMCompute.yaml
@@ -0,0 +1,169 @@
+heat_template_version: queens
+
+parameters:
+
+ k8s_network:
+ type: string
+ k8s_subnet_id:
+ type: string
+ public_net_id:
+ type: string
+ pxe_network:
+ type: string
+ pxe_subnet:
+ type: string
+ availability_zone:
+ type: string
+ default: nova
+ boot_timeout:
+ type: number
+ description: Boot timeout for instance
+ default: 450
+ image:
+ type: string
+ description: Name of image to use for servers
+ flavor:
+ type: string
+ description: Flavor to use for servers
+ key_name:
+ type: string
+ description: Name of keypair to assign to servers
+
+resources:
+
+ pxe_network_port:
+ type: OS::Neutron::Port
+ properties:
+ network: { get_param: pxe_network }
+ port_security_enabled: false
+ fixed_ips:
+ - subnet: { get_param: pxe_subnet }
+
+ ip_addr_pxe:
+ type: OS::Heat::Value
+ properties:
+ type: string
+ value: { get_attr: [ pxe_network_port, fixed_ips, 0, ip_address ] }
+
+ ip_mask_pxe:
+ type: OS::Heat::Value
+ properties:
+ type: string
+ value: { str_split: [ '/', { get_attr: [ pxe_network_port, subnets, 0, cidr ] }, 1 ] }
+
+ k8s_network_port:
+ type: OS::Neutron::Port
+ properties:
+ network: { get_param: k8s_network }
+ port_security_enabled: false
+ fixed_ips:
+ - subnet: { get_param: k8s_subnet_id }
+
+ floating_ip_k8s_net:
+ type: OS::Neutron::FloatingIP
+ properties:
+ floating_network_id: { get_param: public_net_id }
+ port_id: { get_resource: k8s_network_port }
+
+ wait_handle:
+ type: OS::Heat::WaitConditionHandle
+
+ wait_condition:
+ type: OS::Heat::WaitCondition
+ properties:
+ handle: { get_resource: wait_handle }
+ timeout: { get_param: boot_timeout }
+
+ server_init:
+ type: OS::Heat::CloudConfig
+ properties:
+ cloud_config:
+ password: 'r00tme'
+ chpasswd:
+ expire: false
+ ssh_pwauth: true
+ packages:
+ - bridge-utils
+ - cpu-checker
+ - libvirt-clients
+ - libvirt-daemon
+ - libvirt-daemon-system
+ - qemu-kvm
+ - virt-manager
+ - virtinst
+ - python3-virtualbmc
+ - ipmitool
+ write_files:
+ - path: /etc/systemd/system/virtualbmc.service
+ content: |
+ [Unit]
+ Description=Virtual BMC Service
+ After=network.target libvirtd.service
+ [Service]
+ Type=simple
+ ExecStart=/usr/bin/vbmcd --foreground
+ ExecStop=/bin/kill -HUP $MAINPID
+ User=root
+ Group=root
+ [Install]
+ WantedBy=multi-user.target
+ - path: /etc/netplan/99-custom-bridge.yaml
+ content:
+ str_replace:
+ template: |
+ network:
+ version: 2
+ bridges:
+ br_pxe:
+ interfaces: [ens4]
+ addresses: [ip_addr/ip_mask]
+ params:
+ ip_addr: { get_attr: [ ip_addr_pxe, value ] }
+ ip_mask: { get_attr: [ ip_mask_pxe, value ] }
+ runcmd:
+ - str_replace:
+ template: |
+ #!/bin/bash
+ set +x
+ netplan apply
+ sudo ip addr flush dev ens4
+ systemctl enable --now virtualbmc.service
+ # Run instances with vBMC
+ virt-install --name child-control-1 --ram 8192 --vcpus 4 --disk size=20,bus=scsi --os-variant generic --network bridge=br_pxe,model=virtio --graphics vnc,listen=0.0.0.0 --boot network --pxe --noautoconsole
+ virt-install --name child-control-2 --ram 8192 --vcpus 4 --disk size=20,bus=scsi --os-variant generic --network bridge=br_pxe,model=virtio --graphics vnc,listen=0.0.0.0 --boot network --pxe --noautoconsole
+ virt-install --name child-control-3 --ram 8192 --vcpus 4 --disk size=20,bus=scsi --os-variant generic --network bridge=br_pxe,model=virtio --graphics vnc,listen=0.0.0.0 --boot network --pxe --noautoconsole
+ vbmc add child-control-1 --port 6231 --username engineer --password password && vbmc start child-control-1
+ vbmc add child-control-2 --port 6232 --username engineer --password password && vbmc start child-control-2
+ vbmc add child-control-3 --port 6233 --username engineer --password password && vbmc start child-control-3
+ # Collect VM data
+ mac1=$(virsh domiflist child-control-1 | grep 'br_pxe' | awk '{print $5}')
+ mac2=$(virsh domiflist child-control-2 | grep 'br_pxe' | awk '{print $5}')
+ mac3=$(virsh domiflist child-control-3 | grep 'br_pxe' | awk '{print $5}')
+ # Simple success signal
+ wc_notify --data-binary '{"status": "SUCCESS", "data": {"vnodes": [{"mac": "'${mac1}'", "port": "6231"}, {"mac": "'${mac2}'", "port": "6232"}, {"mac": "'${mac3}'", "port": "6233"}]}}'
+ params:
+ wc_notify: { get_attr: [ wait_handle, curl_cli ] }
+
+ server:
+ type: OS::Nova::Server
+ properties:
+ availability_zone: { get_param: availability_zone }
+ image: { get_param: image }
+ flavor: { get_param: flavor }
+ key_name: { get_param: key_name }
+ networks:
+ - port: { get_resource: k8s_network_port }
+ - port: { get_resource: pxe_network_port }
+ user_data_format: RAW
+ user_data: { get_resource: server_init }
+
+outputs:
+ server_public_ip:
+ description: Floating IP address of server in public network
+ value: { get_attr: [ floating_ip_k8s_net, floating_ip_address ] }
+ vbmc_ip:
+ description: IP address of interface in PXE network (virtual BMC)
+ value: { get_attr: [ ip_addr_pxe, value ] }
+ vnodes_data:
+ description: Virtual nodes data (mac addresses and vbmc ports)
+ value: { get_attr: [ wait_condition, data ] }
diff --git a/hco/fragments/VMInstanceCeph.yaml b/hco/fragments/VMInstanceCeph.yaml
index 642fba2..61cbd9e 100644
--- a/hco/fragments/VMInstanceCeph.yaml
+++ b/hco/fragments/VMInstanceCeph.yaml
@@ -50,9 +50,32 @@
# Don't mount ephemeral0 to /mnt as it's by default
mounts:
- [ ephemeral0, null ]
+ hybrid_lab:
+ type: boolean
+ default: false
+ pxe_network:
+ type: string
+ default: ''
+ pxe_subnet:
+ type: string
+ default: ''
+
+conditions:
+
+ connect_pxe_network:
+ get_param: hybrid_lab
resources:
+ pxe_network_port:
+ type: OS::Neutron::Port
+ condition: connect_pxe_network
+ properties:
+ network: { get_param: pxe_network }
+ port_security_enabled: false
+ fixed_ips:
+ - subnet: { get_param: pxe_subnet }
+
k8s_network_port:
type: OS::Neutron::Port
properties:
@@ -157,10 +180,18 @@
flavor: { get_param: flavor }
key_name: { get_param: key_name }
networks:
- - port: { get_resource: k8s_network_port }
- - network: { get_param : storage_frontend_network }
- - network: { get_param : storage_backend_network }
- - network: { get_param : data_network }
+ if:
+ - connect_pxe_network
+ - - port: { get_resource: k8s_network_port }
+ - network: { get_param: storage_frontend_network }
+ - network: { get_param: storage_backend_network }
+ - network: { get_param: data_network }
+ - port: { get_resource: pxe_network_port }
+ - - port: { get_resource: k8s_network_port }
+ - network: { get_param: storage_frontend_network }
+ - network: { get_param: storage_backend_network }
+ - network: { get_param: data_network }
+
user_data_format: SOFTWARE_CONFIG
user_data: { get_resource: install_config_agent}
diff --git a/hco/top.yaml b/hco/top.yaml
index 272d90f..7e5f4af 100644
--- a/hco/top.yaml
+++ b/hco/top.yaml
@@ -67,6 +67,26 @@
type: number
description: Boot timeout for instance
default: 600
+ # Hybrid lab parameters
+ hybrid_lab:
+ type: boolean
+ description: Deploy VM Compute for hybrid deployment (BM + virtual nodes).
+ default: false
+ vm_compute_flavor:
+ type: string
+ default: 'system.golden.openstack.control'
+ pxe_network:
+ type: string
+ description: The name of pxe network
+ default: ''
+ pxe_subnet:
+ type: string
+ default: ''
+
+conditions:
+
+ deploy_vm_compute:
+ get_param: hybrid_lab
resources:
@@ -194,6 +214,26 @@
metadata: { get_param: worker_metadata }
hardware_metadata: { get_param: hardware_metadata}
boot_timeout: { get_param: boot_timeout }
+ hybrid_lab: { get_param: hybrid_lab }
+ pxe_network: { get_param: pxe_network }
+ pxe_subnet: { get_param: pxe_subnet }
+
+ vm_compute:
+ type: ./fragments/VMCompute.yaml
+ condition: deploy_vm_compute
+ depends_on:
+ - k8s_network
+ - data_network
+ - public_router_iface
+ properties:
+ k8s_network: { get_resource: k8s_network }
+ k8s_subnet_id: { get_resource: k8s_subnet }
+ public_net_id: { get_param: public_net_id }
+ pxe_network: { get_param: pxe_network }
+ pxe_subnet: { get_param: pxe_subnet }
+ image: { get_param: image }
+ flavor: { get_param: vm_compute_flavor }
+ key_name: { get_attr: [ keypair_name, value ] }
outputs:
masters_ips:
@@ -214,3 +254,15 @@
public_router_gw_ipv6:
description: Public gateway IPv6 address (used for kubevirt tests)
value: { get_param: k8s_network_ipv6_gw_ip }
+ vm_compute_ip:
+ condition: deploy_vm_compute
+ description: Public IP address of the deployed compute instance
+ value: { get_attr: [ vm_compute, server_public_ip ] }
+ vbmc_ip:
+ condition: deploy_vm_compute
+ description: IP address of interface in PXE network (is used for virtual BMC)
+ value: { get_attr: [ vm_compute, vbmc_ip ] }
+ vnodes_data:
+ condition: deploy_vm_compute
+ description: Virtual nodes data (mac addresses and vbmc ports)
+ value: { get_attr: [ vm_compute, vnodes_data ] }