blob: 19e145af0cb2e1a183c711255b5874e060a98835 [file] [log] [blame]
resource_registry:
"MCP2::NetworkAcc": ../fragments/NetworkAccBM.yaml
"MCP2::NetworkPrvFl": ../fragments/NetworkPrvFl.yaml
"MCP2::SrvInstances": ../fragments/SrvInstancesBM.yaml
"MCP2::NetworkAccStorage": ../fragments/NetworkAccVMStorage.yaml
"MCP2::SrvInstancesCeph": ../fragments/SrvInstancesBMCeph.yaml
"MCP2::SrvInstancesCephOSD": ../fragments/SrvInstancesBMCephOSD.yaml
parameters:
image: bionic-server-cloudimg-amd64-20190612
ucp_flavor: oscore-bm-lab-01
masters_flavor: oscore-bm-lab-01
workers_flavor: oscore-bm-lab-01
cmps_flavor: oscore-bm-lab-01
gtws_flavor: oscore-bm-lab-01
lmas_flavor: oscore-bm-lab-01
osds_flavor: oscore-bm-lab-01
public_net_id: 'physnet1-402'
control_network_cidr: '172.16.43.0/24'
masters_size: 0
worker_size: 3
cmp_size: 2
gtw_size: 0
lma_size: 0
osd_size: 0
ucp_boot_timeout: 3600
cluster_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCp0evjOaK8c8SKYK4r2+0BN7g+8YSvQ2n8nFgOURCyvkJqOHi1qPGZmuN0CclYVdVuZiXbWw3VxRbSW3EH736VzgY1U0JmoTiSamzLHaWsXvEIW8VCi7boli539QJP0ikJiBaNAgZILyCrVPN+A6mfqtacs1KXdZ0zlMq1BPtFciR1JTCRcVs5vP2Wwz5QtY2jMIh3aiwkePjMTQPcfmh1TkOlxYu5IbQyZ3G1ahA0mNKI9a0dtF282av/F6pwB/N1R1nEZ/9VtcN2I1mf1NW/tTHEEcTzXYo1R/8K9vlqAN8QvvGLZtZduGviNVNoNWvoxaXxDt8CPv2B2NCdQFZp
private_floating_network_cidr: '172.16.49.128/26'
private_floating_network_ipam_pool_start: '172.16.49.130'
private_floating_network_ipam_pool_end: '172.16.49.139'
private_floating_network_pool_start: '172.16.49.140'
private_floating_network_pool_end: '172.16.49.160'
private_floating_network_gateway: '172.16.49.129'
metallb_address_pools: [ '172.16.49.161-172.16.49.190' ]
private_floating_interface: 'enp3s0f1.403'
storage_frontend_interface: 'enp3s0f1.404'
storage_backend_interface: 'enp3s0f1.405'
worker_metadata: {"labels": {"openstack-control-plane":"enabled","openvswitch":"enabled","openstack-gateway":"enabled","role":"ceph-osd-node","local-volume-provisioner": "enabled"}}
cmp_metadata: {"labels": {"openstack-compute-node":"enabled","openvswitch":"enabled", "role":"ceph-osd-node"}}
gtw_metadata: {"labels": {"openvswitch":"enabled"}}
tunnel_interface: 'enp3s0f0'
live_migration_interface: 'enp3s0f0'
storage_frontend_network_cidr: '10.12.0.0/24'
storage_backend_network_cidr: '10.12.0.0/24'
functions_override: |
function network_config {
PUBLIC_NODE_IP_ADDRESS=${PUBLIC_INTERFACE_IP:-$(ip addr show dev ${PUBLIC_INTERFACE} | grep -Po 'inet \K[\d.]+' | egrep -v "127.0.|172.17")}
PUBLIC_NODE_IP_NETMASK=${PUBLIC_INTERFACE_NETMASK:-$(ip addr show dev ${PUBLIC_INTERFACE} | grep -Po 'inet \K[\d.]+\/[\d]+' | egrep -v "127.0.|172.17" | cut -d'/' -f2)}
local cloud_netplan_cfg="/etc/netplan/50-cloud-init.yaml"
local match_ip_line
local public_if_mac
DEBIAN_FRONTEND=noninteractive apt -y install bridge-utils
cat << EOF > /etc/systemd/network/10-veth-phy-br.netdev
[NetDev]
Name=veth-phy
Kind=veth
[Peer]
Name=veth-br
EOF
public_if_mac=$(ip link show $(echo ${PUBLIC_INTERFACE} | cut -d'.' -f1) | grep 'link/ether' | awk '{print $2}')
sed -i 's/.*ethernets:.*/&\n veth-phy: {}/' ${cloud_netplan_cfg}
sed -i 's/.*ethernets:.*/&\n veth-br: {}/' ${cloud_netplan_cfg}
sed -i "s/.*ethernets:.*/&\n $(echo ${PUBLIC_INTERFACE} | cut -d'.' -f1):\n dhcp4: false\n match:\n macaddress: ${public_if_mac}\n set-name: $(echo ${PUBLIC_INTERFACE} | cut -d'.' -f1)/" ${cloud_netplan_cfg}
if [ $STORAGE_FRONTEND_INTERFACE ]; then
STORAGE_FRONTEND_INTERFACE_DEF="
${STORAGE_FRONTEND_INTERFACE}:
id: 404
link: $(echo ${STORAGE_FRONTEND_INTERFACE} | cut -d'.' -f1)
addresses: [ "${STORAGE_FRONTEND_INTERFACE_IP}/${STORAGE_FRONTEND_NETWORK_NETMASK}" ]
"
fi
cat << EOF >> ${cloud_netplan_cfg}
vlans:
${PUBLIC_INTERFACE}:
id: 403
link: $(echo ${PUBLIC_INTERFACE} | cut -d'.' -f1)
${STORAGE_FRONTEND_INTERFACE_DEF}
bridges:
br-public:
dhcp4: false
interfaces:
- ${PUBLIC_INTERFACE}
- veth-br
addresses: [ "${PUBLIC_NODE_IP_ADDRESS}/${PUBLIC_NODE_IP_NETMASK}" ]
EOF
netplan --debug apply
# NOTE(vsaienko): give some time to apply changes
sleep 15
}
hardware_metadata: |
'0c:c4:7a:20:19:8a':
write_files:
- path: /usr/share/metadata/ceph.yaml
content: |
storageDevices:
- name: sdb
role: hdd
sizeGb: 450
ramGb: 64
cores: 12
# The roles will be assigned based on node labels.
# roles:
# - mon
# - mgr
ips:
- 192.168.122.101
crushPath: {}
'0c:c4:7a:20:19:28':
write_files:
- path: /usr/share/metadata/ceph.yaml
content: |
storageDevices:
- name: sdb
role: hdd
sizeGb: 450
ramGb: 64
cores: 12
ips:
- 192.168.122.102
crushPath: {}
'0c:c4:7a:20:19:48':
write_files:
- path: /usr/share/metadata/ceph.yaml
content: |
storageDevices:
- name: sdb
role: hdd
sizeGb: 450
ramGb: 64
cores: 12
ips:
- 192.168.122.103
crushPath: {}
'0c:c4:7a:20:15:28':
write_files:
- path: /usr/share/metadata/ceph.yaml
content: |
storageDevices:
- name: sdb
role: hdd
sizeGb: 450
ramGb: 64
cores: 12
ips:
- 192.168.122.104
crushPath: {}
'0c:c4:7a:20:19:46':
write_files:
- path: /usr/share/metadata/ceph.yaml
content: |
storageDevices:
- name: sdb
role: hdd
sizeGb: 450
ramGb: 64
cores: 12
ips:
- 192.168.122.105
crushPath: {}
'0c:c4:7a:17:90:66':
write_files:
- path: /usr/share/metadata/ceph.yaml
content: |
storageDevices:
- name: sdb
role: hdd
sizeGb: 450
ramGb: 64
cores: 12
ips:
- 192.168.122.106
crushPath: {}