[KUBEV] Extend template to support hybrid lab deployments.
Add VM Compute resource to deploy virtual nodes using virtual BMC
and connect the phys network (pxe) to instances.
Related: KUBEV-414
Change-Id: I7b5409ae1b4336f3ab78ecb63b435a6036200bd9
diff --git a/hco/fragments/VMCompute.yaml b/hco/fragments/VMCompute.yaml
new file mode 100644
index 0000000..e8e25ab
--- /dev/null
+++ b/hco/fragments/VMCompute.yaml
@@ -0,0 +1,169 @@
+heat_template_version: queens
+
+parameters:
+
+ k8s_network:
+ type: string
+ k8s_subnet_id:
+ type: string
+ public_net_id:
+ type: string
+ pxe_network:
+ type: string
+ pxe_subnet:
+ type: string
+ availability_zone:
+ type: string
+ default: nova
+ boot_timeout:
+ type: number
+ description: Boot timeout for instance
+ default: 450
+ image:
+ type: string
+ description: Name of image to use for servers
+ flavor:
+ type: string
+ description: Flavor to use for servers
+ key_name:
+ type: string
+ description: Name of keypair to assign to servers
+
+resources:
+
+ pxe_network_port:
+ type: OS::Neutron::Port
+ properties:
+ network: { get_param: pxe_network }
+ port_security_enabled: false
+ fixed_ips:
+ - subnet: { get_param: pxe_subnet }
+
+ ip_addr_pxe:
+ type: OS::Heat::Value
+ properties:
+ type: string
+ value: { get_attr: [ pxe_network_port, fixed_ips, 0, ip_address ] }
+
+ ip_mask_pxe:
+ type: OS::Heat::Value
+ properties:
+ type: string
+ value: { str_split: [ '/', { get_attr: [ pxe_network_port, subnets, 0, cidr ] }, 1 ] }
+
+ k8s_network_port:
+ type: OS::Neutron::Port
+ properties:
+ network: { get_param: k8s_network }
+ port_security_enabled: false
+ fixed_ips:
+ - subnet: { get_param: k8s_subnet_id }
+
+ floating_ip_k8s_net:
+ type: OS::Neutron::FloatingIP
+ properties:
+ floating_network_id: { get_param: public_net_id }
+ port_id: { get_resource: k8s_network_port }
+
+ wait_handle:
+ type: OS::Heat::WaitConditionHandle
+
+ wait_condition:
+ type: OS::Heat::WaitCondition
+ properties:
+ handle: { get_resource: wait_handle }
+ timeout: { get_param: boot_timeout }
+
+ server_init:
+ type: OS::Heat::CloudConfig
+ properties:
+ cloud_config:
+ password: 'r00tme'
+ chpasswd:
+ expire: false
+ ssh_pwauth: true
+ packages:
+ - bridge-utils
+ - cpu-checker
+ - libvirt-clients
+ - libvirt-daemon
+ - libvirt-daemon-system
+ - qemu-kvm
+ - virt-manager
+ - virtinst
+ - python3-virtualbmc
+ - ipmitool
+ write_files:
+ - path: /etc/systemd/system/virtualbmc.service
+ content: |
+ [Unit]
+ Description=Virtual BMC Service
+ After=network.target libvirtd.service
+ [Service]
+ Type=simple
+ ExecStart=/usr/bin/vbmcd --foreground
+ ExecStop=/bin/kill -HUP $MAINPID
+ User=root
+ Group=root
+ [Install]
+ WantedBy=multi-user.target
+ - path: /etc/netplan/99-custom-bridge.yaml
+ content:
+ str_replace:
+ template: |
+ network:
+ version: 2
+ bridges:
+ br_pxe:
+ interfaces: [ens4]
+ addresses: [ip_addr/ip_mask]
+ params:
+ ip_addr: { get_attr: [ ip_addr_pxe, value ] }
+ ip_mask: { get_attr: [ ip_mask_pxe, value ] }
+ runcmd:
+ - str_replace:
+ template: |
+ #!/bin/bash
+ set +x
+ netplan apply
+ sudo ip addr flush dev ens4
+ systemctl enable --now virtualbmc.service
+ # Run instances with vBMC
+ virt-install --name child-control-1 --ram 8192 --vcpus 4 --disk size=20,bus=scsi --os-variant generic --network bridge=br_pxe,model=virtio --graphics vnc,listen=0.0.0.0 --boot network --pxe --noautoconsole
+ virt-install --name child-control-2 --ram 8192 --vcpus 4 --disk size=20,bus=scsi --os-variant generic --network bridge=br_pxe,model=virtio --graphics vnc,listen=0.0.0.0 --boot network --pxe --noautoconsole
+ virt-install --name child-control-3 --ram 8192 --vcpus 4 --disk size=20,bus=scsi --os-variant generic --network bridge=br_pxe,model=virtio --graphics vnc,listen=0.0.0.0 --boot network --pxe --noautoconsole
+ vbmc add child-control-1 --port 6231 --username engineer --password password && vbmc start child-control-1
+ vbmc add child-control-2 --port 6232 --username engineer --password password && vbmc start child-control-2
+ vbmc add child-control-3 --port 6233 --username engineer --password password && vbmc start child-control-3
+ # Collect VM data
+ mac1=$(virsh domiflist child-control-1 | grep 'br_pxe' | awk '{print $5}')
+ mac2=$(virsh domiflist child-control-2 | grep 'br_pxe' | awk '{print $5}')
+ mac3=$(virsh domiflist child-control-3 | grep 'br_pxe' | awk '{print $5}')
+ # Simple success signal
+ wc_notify --data-binary '{"status": "SUCCESS", "data": {"vnodes": [{"mac": "'${mac1}'", "port": "6231"}, {"mac": "'${mac2}'", "port": "6232"}, {"mac": "'${mac3}'", "port": "6233"}]}}'
+ params:
+ wc_notify: { get_attr: [ wait_handle, curl_cli ] }
+
+ server:
+ type: OS::Nova::Server
+ properties:
+ availability_zone: { get_param: availability_zone }
+ image: { get_param: image }
+ flavor: { get_param: flavor }
+ key_name: { get_param: key_name }
+ networks:
+ - port: { get_resource: k8s_network_port }
+ - port: { get_resource: pxe_network_port }
+ user_data_format: RAW
+ user_data: { get_resource: server_init }
+
+outputs:
+ server_public_ip:
+ description: Floating IP address of server in public network
+ value: { get_attr: [ floating_ip_k8s_net, floating_ip_address ] }
+ vbmc_ip:
+ description: IP address of interface in PXE network (virtual BMC)
+ value: { get_attr: [ ip_addr_pxe, value ] }
+ vnodes_data:
+ description: Virtual nodes data (mac addresses and vbmc ports)
+ value: { get_attr: [ wait_condition, data ] }