blob: 2fea9021dc7a4cb2f2c1a54ac3e36f6a41435e60 [file] [log] [blame]
Mykyta Karpind950e372022-12-06 16:34:07 +02001resource_registry:
2 "MCP2::NetworkAcc": ../fragments/NetworkAccVM.yaml
3 "MCP2::NetworkAccStorage": ../fragments/NetworkAccVMStorage.yaml
4 "MCP2::NetworkPrvFl": ../fragments/NetworkPrvFl.yaml
5 "MCP2::NetworkIronicFlat": ../fragments/NetworkIronicFlat.yaml
Mykyta Karpinad86fe72025-01-20 08:25:20 +01006 "MCP2::NetworkExtSrv": ../fragments/NetworkExtSrv.yaml
Mykyta Karpind950e372022-12-06 16:34:07 +02007 "MCP2::SrvInstances": ../fragments/SrvInstancesVM.yaml
8 "MCP2::SrvInstancesCeph": ../fragments/SrvInstancesVMCeph.yaml
9 "MCP2::SrvInstancesCephOSD": ../fragments/SrvInstancesVMCephOSD.yaml
10 "MCP2::NetworkTun": ../fragments/NetworkTun.yaml
11
12parameters:
13 image: bionic-server-cloudimg-amd64-20190612
14 public_net_id: public
15 masters_size: 0
16 worker_size: 3
17 cmp_size: 5
18 gtw_size: 0
19 lma_size: 0
20 osd_size: 0
21 ucp_boot_timeout: 3600
22 cluster_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCp0evjOaK8c8SKYK4r2+0BN7g+8YSvQ2n8nFgOURCyvkJqOHi1qPGZmuN0CclYVdVuZiXbWw3VxRbSW3EH736VzgY1U0JmoTiSamzLHaWsXvEIW8VCi7boli539QJP0ikJiBaNAgZILyCrVPN+A6mfqtacs1KXdZ0zlMq1BPtFciR1JTCRcVs5vP2Wwz5QtY2jMIh3aiwkePjMTQPcfmh1TkOlxYu5IbQyZ3G1ahA0mNKI9a0dtF282av/F6pwB/N1R1nEZ/9VtcN2I1mf1NW/tTHEEcTzXYo1R/8K9vlqAN8QvvGLZtZduGviNVNoNWvoxaXxDt8CPv2B2NCdQFZp
23 private_floating_network_cidr: '10.11.12.0/24'
Mykyta Karpinb3a7f292024-10-18 15:44:36 +020024 private_floating_network_gateway: '10.11.12.1'
Mykyta Karpind950e372022-12-06 16:34:07 +020025 private_floating_interface: 'ens4'
26 tunnel_interface: 'ens8'
27 worker_metadata: {"labels": {"openstack-control-plane":"enabled","openvswitch":"enabled","openstack-gateway":"enabled","local-volume-provisioner": "enabled"}}
28 cmp_metadata: {"labels": {"openstack-compute-node":"enabled","openvswitch":"enabled", "role":"ceph-osd-node"}}
29 gtw_metadata: {"labels": {"openvswitch":"enabled"}}
30 # hardware_metadata which is used for Ceph requires flavor with
31 # ephemeral storage because it is used for Ceph bluestore.
32 workers_flavor: 'mosk.l.control.ephemeral'
33 cmps_flavor: 'mosk.s.compute.ephemeral'
34 storage_frontend_network_cidr: '10.12.1.0/24'
Mykyta Karpinb3a7f292024-10-18 15:44:36 +020035 storage_frontend_network_gateway: '10.12.1.1'
Mykyta Karpin1ac7cc02024-11-06 16:10:22 +010036 storage_frontend_network_ipam_pool_start: '10.12.1.3'
37 storage_frontend_network_ipam_pool_end: '10.12.1.254'
Mykyta Karpind950e372022-12-06 16:34:07 +020038 storage_backend_network_cidr: '10.12.0.0/24'
39 cmp_lvm_loop_device_size: 50
40 cmp_cinder_lvm_loop_device_size: 50
41 hardware_metadata: |
42 '00:00:00:00:00:00':
43 write_files:
44 - path: /usr/share/metadata/ceph.yaml
45 content: |
46 storageDevices:
47 - name: vdb
48 role: hdd
49 sizeGb: 20
50 ramGb: 8
51 cores: 2
52 # The roles will be assigned based on node labels.
53 # roles:
54 # - mon
55 # - mgr
56 ips:
57 - 192.168.122.101
58 crushPath: {}