blob: fafe70e3a8311b37767ab31568aef90a8cde2eeb [file] [log] [blame]
resource_registry:
"MCP2::NetworkAcc": ../fragments/NetworkAccVM.yaml
"MCP2::NetworkAccStorage": ../fragments/NetworkAccVMStorage.yaml
"MCP2::NetworkPrvFl": ../fragments/NetworkPrvFl.yaml
"MCP2::NetworkIronicFlat": ../fragments/NetworkIronicFlat.yaml
"MCP2::NetworkTun": ../fragments/NetworkTun.yaml
"MCP2::SrvInstances": ../fragments/SrvInstancesVM.yaml
"MCP2::SrvInstancesCeph": ../fragments/SrvInstancesVMCeph.yaml
"MCP2::SrvInstancesCephOSD": ../fragments/SrvInstancesVMCephOSD.yaml
"MCP2::VMX": ../fragments/vMX.yaml
"OS::Nova::VmxRe": ../fragments/vmx-components/vms/re.yaml
"OS::Nova::VmxFpcSingle": ../fragments/vmx-components/vms/fpc_no_metadata.yaml
"OS::Networking::VmxFpcFixedNet": ../fragments/vmx-components/vms/fpc_fixed_net.yaml
"OS::Networking::VmxInternalNet": ../fragments/vmx-components/bridges/bridges_internal.yaml
"OS::Networking::VmxPort": ../fragments/vmx-components/ports/port.yaml
"OS::Networking::VmxFpcEvpnNet": ../fragments/vmx-components/network/evpn_network.yaml
parameters:
# vmx parameters
vmx_linux_img: vPFC-20170216
vmx_linux_flav: vfp.lite
vmx_junos_img: vmx-re-x86-64-17.1R1.8
vmx_junos_flav: vcp.lite
vmx_gateway_ip: 10.10.0.1
image: bionic-server-cloudimg-amd64-20190612
public_net_id: public
masters_size: 0
worker_size: 3
cmp_size: 2
gtw_size: 0
lma_size: 0
osd_size: 0
ntw_size: 3
ucp_boot_timeout: 3600
cluster_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCp0evjOaK8c8SKYK4r2+0BN7g+8YSvQ2n8nFgOURCyvkJqOHi1qPGZmuN0CclYVdVuZiXbWw3VxRbSW3EH736VzgY1U0JmoTiSamzLHaWsXvEIW8VCi7boli539QJP0ikJiBaNAgZILyCrVPN+A6mfqtacs1KXdZ0zlMq1BPtFciR1JTCRcVs5vP2Wwz5QtY2jMIh3aiwkePjMTQPcfmh1TkOlxYu5IbQyZ3G1ahA0mNKI9a0dtF282av/F6pwB/N1R1nEZ/9VtcN2I1mf1NW/tTHEEcTzXYo1R/8K9vlqAN8QvvGLZtZduGviNVNoNWvoxaXxDt8CPv2B2NCdQFZp
tungstenfabric_enabled: true
vmx_enabled: true
metallb_address_pools: '10.11.12.201-10.11.12.254'
private_floating_network_cidr: '10.11.12.0/24'
private_floating_network_ipam_pool_start: '10.11.12.3'
private_floating_network_ipam_pool_end: '10.11.12.70'
private_floating_network_gateway: '10.11.12.1'
control_network_ext_router_ip: '10.10.0.131'
tun_network_ext_router_ip: '10.15.0.131'
evpn_network_vmx_ip: '10.20.100.100'
private_floating_interface: 'ens4'
tunnel_interface: 'ens8'
worker_metadata: {"labels": {"openstack-control-plane":"enabled","local-volume-provisioner": "enabled"}}
cmp_metadata: {"labels": {"openstack-compute-node":"enabled","tfvrouter":"enabled", "role":"ceph-osd-node","openstack-gateway": "enabled"}}
ntw_metadata: {"labels": {"tfconfig":"enabled","tfconfigdb":"enabled","tfcontrol":"enabled","tfanalytics":"enabled","tfanalyticsdb":"enabled","tfwebui":"enabled","local-volume-provisioner": "enabled"}}
# hardware_metadata which is used for Ceph requires flavor with
# ephemeral storage because it is used for Ceph bluestore.
workers_flavor: 'system.compact.openstack.control.ephemeral'
cmps_flavor: 'mosk.s.compute.ephemeral'
storage_frontend_network_cidr: '10.12.1.0/24'
storage_frontend_network_gateway: '10.12.1.1'
storage_frontend_network_ipam_pool_start: '10.12.1.3'
storage_frontend_network_ipam_pool_end: '10.12.1.254'
storage_backend_network_cidr: '10.12.0.0/24'
hardware_metadata: |
'00:00:00:00:00:00':
write_files:
- path: /usr/share/metadata/ceph.yaml
content: |
storageDevices:
- name: vdb
role: hdd
sizeGb: 20
ramGb: 8
cores: 2
# The roles will be assigned based on node labels.
# roles:
# - mon
# - mgr
ips:
- 192.168.122.101
crushPath: {}