blob: d18b39f393c50fd695d8ca619c234e7504e68c78 [file] [log] [blame]
Oleksii Grudev850b9742020-09-25 13:27:59 +03001resource_registry:
2 "MCP2::NetworkAcc": ../fragments/NetworkAccVM.yaml
3 "MCP2::NetworkAccStorage": ../fragments/NetworkAccVMStorage.yaml
4 "MCP2::NetworkPrvFl": ../fragments/NetworkPrvFl.yaml
5 "MCP2::NetworkIronicFlat": ../fragments/NetworkIronicFlat.yaml
6 "MCP2::SrvInstances": ../fragments/SrvInstancesVM.yaml
7 "MCP2::SrvInstancesCeph": ../fragments/SrvInstancesVMCeph.yaml
8 "MCP2::SrvInstancesCephOSD": ../fragments/SrvInstancesVMCephOSD.yaml
9
10parameters:
11 image: bionic-server-cloudimg-amd64-20190612
12 public_net_id: public
13 masters_size: 0
14 worker_size: 5
15 cmp_size: 2
16 gtw_size: 0
17 lma_size: 0
18 osd_size: 0
19 vbmc_size: 3
20 ucp_boot_timeout: 3600
21 cluster_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCp0evjOaK8c8SKYK4r2+0BN7g+8YSvQ2n8nFgOURCyvkJqOHi1qPGZmuN0CclYVdVuZiXbWw3VxRbSW3EH736VzgY1U0JmoTiSamzLHaWsXvEIW8VCi7boli539QJP0ikJiBaNAgZILyCrVPN+A6mfqtacs1KXdZ0zlMq1BPtFciR1JTCRcVs5vP2Wwz5QtY2jMIh3aiwkePjMTQPcfmh1TkOlxYu5IbQyZ3G1ahA0mNKI9a0dtF282av/F6pwB/N1R1nEZ/9VtcN2I1mf1NW/tTHEEcTzXYo1R/8K9vlqAN8QvvGLZtZduGviNVNoNWvoxaXxDt8CPv2B2NCdQFZp
22 # NOTE(vsaienko): in MCP1 ips for nodes allocated closer to range beginning 10.13.0.0-10.13.102.0
23 # floating network pool is 10.13.128.0-10.13.255.254
24 private_floating_network_cidr: '10.13.0.0/16'
25 private_floating_network_ipam_pool_start: '10.13.126.0'
26 private_floating_network_ipam_pool_end: '10.13.126.255'
27 private_floating_network_pool_start: '10.13.127.0'
28 private_floating_network_pool_end: '10.13.127.255'
29 ironic_baremetal_network_cidr: '10.14.0.0/24'
30 ironic_baremetal_network_ipam_pool_start: '10.14.0.2'
31 ironic_baremetal_network_ipam_pool_end: '10.14.0.99'
32 ironic_baremetal_network_pool_start: '10.14.0.100'
33 ironic_baremetal_network_pool_end: '10.14.0.200'
34 control_network_cidr: '10.9.10.0/24'
35 private_floating_interface: 'ens4'
Vasyl Saienko3dd0c272020-11-16 17:20:07 +020036 tunnel_interface: 'ens3'
Oleksii Grudev850b9742020-09-25 13:27:59 +030037 worker_metadata: {"labels": {"openstack-control-plane":"enabled","openvswitch":"enabled","openstack-gateway":"enabled","role":"ceph-osd-node","local-volume-provisioner": "enabled"}}
38 cmp_metadata: {"labels": {"openstack-compute-node":"enabled","openvswitch":"enabled", "role":"ceph-osd-node"}}
39 gtw_metadata: {"labels": {"openvswitch":"enabled"}}
Oleksii Grudev3502a942020-09-25 18:21:18 +030040 vbmc_metadata: {"labels": {"virtualbmc":"enabled"}}
Oleksii Grudev850b9742020-09-25 13:27:59 +030041 # hardware_metadata which is used for Ceph requires flavor with
42 # ephemeral storage because it is used for Ceph bluestore.
43 workers_flavor: 'system.compact.openstack.control.ephemeral'
44 cmps_flavor: 'system.compact.openstack.control.ephemeral'
Oleksii Grudev3502a942020-09-25 18:21:18 +030045 vbmcs_flavor: 'system.compact.openstack.control'
Oleksii Grudev850b9742020-09-25 13:27:59 +030046 storage_frontend_network_cidr: '10.12.1.0/24'
47 storage_backend_network_cidr: '10.12.0.0/24'
48 hardware_metadata: |
49 '00:00:00:00:00:00':
50 write_files:
51 - path: /usr/share/metadata/ceph.yaml
52 content: |
53 storageDevices:
54 - name: vdb
55 role: hdd
56 sizeGb: 20
57 ramGb: 8
58 cores: 2
59 # The roles will be assigned based on node labels.
60 # roles:
61 # - mon
62 # - mgr
63 ips:
64 - 192.168.122.101
65 crushPath: {}