blob: 23f314af07483c5d0d9eb0de608f291c7ed70ac3 [file] [log] [blame]
resource_registry:
"MCP2::NetworkAcc": ../fragments/NetworkAccVM.yaml
"MCP2::NetworkAccStorage": ../fragments/NetworkAccVMStorage.yaml
"MCP2::NetworkPrvFl": ../fragments/NetworkPrvFl.yaml
"MCP2::NetworkIronicFlat": ../fragments/NetworkIronicFlat.yaml
"MCP2::SrvInstances": ../fragments/SrvInstancesVM.yaml
"MCP2::SrvInstancesCeph": ../fragments/SrvInstancesVMCeph.yaml
"MCP2::SrvInstancesCephOSD": ../fragments/SrvInstancesVMCephOSD.yaml
"MCP2::NetworkTun": ../fragments/NetworkTun.yaml
parameters:
image: bionic-server-cloudimg-amd64-20190612
public_net_id: public
masters_size: 0
worker_size: 0
cmp_size: 0
gtw_size: 0
lma_size: 0
osd_size: 0
tsrv_size: 0
ucp_boot_timeout: 3600
cluster_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCp0evjOaK8c8SKYK4r2+0BN7g+8YSvQ2n8nFgOURCyvkJqOHi1qPGZmuN0CclYVdVuZiXbWw3VxRbSW3EH736VzgY1U0JmoTiSamzLHaWsXvEIW8VCi7boli539QJP0ikJiBaNAgZILyCrVPN+A6mfqtacs1KXdZ0zlMq1BPtFciR1JTCRcVs5vP2Wwz5QtY2jMIh3aiwkePjMTQPcfmh1TkOlxYu5IbQyZ3G1ahA0mNKI9a0dtF282av/F6pwB/N1R1nEZ/9VtcN2I1mf1NW/tTHEEcTzXYo1R/8K9vlqAN8QvvGLZtZduGviNVNoNWvoxaXxDt8CPv2B2NCdQFZp
private_floating_network_cidr: '10.11.12.0/24'
private_floating_network_gateway: '10.11.12.1'
private_floating_interface: 'ens4'
tunnel_interface: 'ens3'
ucp_metadata: {"labels": {"openstack-control-plane":"enabled","openstack-compute-node":"enabled","openvswitch":"enabled", "openstack-gateway":"enabled","role":"ceph-osd-node","local-volume-provisioner": "enabled", "openstack-compute-node":"enabled","openvswitch":"enabled", "role":"ceph-osd-node"}}
#compact.cid: RAM 32768 | Disk 100 | VCPU 8
ucp_flavor: 'mosk.aio.s.ephemeral'
cmp_metadata: {"labels": {"openstack-compute-node":"enabled","openvswitch":"enabled", "role":"ceph-osd-node"}}
gtw_metadata: {"labels": {"openvswitch":"enabled"}}
# hardware_metadata which is used for Ceph requires flavor with
# ephemeral storage because it is used for Ceph bluestore.
workers_flavor: 'system.compact.openstack.control.ephemeral'
cmps_flavor: 'mosk.s.compute.ephemeral'
storage_frontend_network_cidr: '10.12.1.0/24'
storage_frontend_network_gateway: '10.12.1.1'
storage_backend_network_cidr: '10.12.0.0/24'
kubernetes_installer: k0s
single_node: 'true'
hardware_metadata: |
'00:00:00:00:00:00':
write_files:
- path: /usr/share/metadata/ceph.yaml
content: |
storageDevices:
- name: vdb
role: hdd
sizeGb: 20
ramGb: 8
cores: 2
# The roles will be assigned based on node labels.
# roles:
# - mon
# - mgr
ips:
- 192.168.122.101
crushPath: {}