Add AIO with k0s
Related-Prod: PRODX-46480
Change-Id: I824d424d524213f56fdfe43dd27710f306e6ecb5
diff --git a/de/heat-templates/env/k0s-aio.yaml b/de/heat-templates/env/k0s-aio.yaml
new file mode 100644
index 0000000..ab65e9e
--- /dev/null
+++ b/de/heat-templates/env/k0s-aio.yaml
@@ -0,0 +1,56 @@
+resource_registry:
+ "MCP2::NetworkAcc": ../fragments/NetworkAccVM.yaml
+ "MCP2::NetworkAccStorage": ../fragments/NetworkAccVMStorage.yaml
+ "MCP2::NetworkPrvFl": ../fragments/NetworkPrvFl.yaml
+ "MCP2::NetworkIronicFlat": ../fragments/NetworkIronicFlat.yaml
+ "MCP2::SrvInstances": ../fragments/SrvInstancesVM.yaml
+ "MCP2::SrvInstancesCeph": ../fragments/SrvInstancesVMCeph.yaml
+ "MCP2::SrvInstancesCephOSD": ../fragments/SrvInstancesVMCephOSD.yaml
+ "MCP2::NetworkTun": ../fragments/NetworkTun.yaml
+
+parameters:
+ image: bionic-server-cloudimg-amd64-20190612
+ public_net_id: public
+ masters_size: 0
+ worker_size: 0
+ cmp_size: 0
+ gtw_size: 0
+ lma_size: 0
+ osd_size: 0
+ tsrv_size: 0
+ ucp_boot_timeout: 3600
+ cluster_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCp0evjOaK8c8SKYK4r2+0BN7g+8YSvQ2n8nFgOURCyvkJqOHi1qPGZmuN0CclYVdVuZiXbWw3VxRbSW3EH736VzgY1U0JmoTiSamzLHaWsXvEIW8VCi7boli539QJP0ikJiBaNAgZILyCrVPN+A6mfqtacs1KXdZ0zlMq1BPtFciR1JTCRcVs5vP2Wwz5QtY2jMIh3aiwkePjMTQPcfmh1TkOlxYu5IbQyZ3G1ahA0mNKI9a0dtF282av/F6pwB/N1R1nEZ/9VtcN2I1mf1NW/tTHEEcTzXYo1R/8K9vlqAN8QvvGLZtZduGviNVNoNWvoxaXxDt8CPv2B2NCdQFZp
+ private_floating_network_cidr: '10.11.12.0/24'
+ private_floating_interface: 'ens4'
+ tunnel_interface: 'ens8'
+ ucp_metadata: {"labels": {"openstack-control-plane":"enabled","openstack-compute-node":"enabled","openvswitch":"enabled", "openstack-gateway":"enabled","role":"ceph-osd-node","local-volume-provisioner": "enabled", "openstack-compute-node":"enabled","openvswitch":"enabled", "role":"ceph-osd-node"}}
+ #compact.cid: RAM 32768 | Disk 100 | VCPU 8
+ ucp_flavor: 'mosk.aio.ephemeral'
+ cmp_metadata: {"labels": {"openstack-compute-node":"enabled","openvswitch":"enabled", "role":"ceph-osd-node"}}
+ gtw_metadata: {"labels": {"openvswitch":"enabled"}}
+ # hardware_metadata which is used for Ceph requires flavor with
+ # ephemeral storage because it is used for Ceph bluestore.
+ workers_flavor: 'system.compact.openstack.control.ephemeral'
+ cmps_flavor: 'mosk.s.compute.ephemeral'
+ storage_frontend_network_cidr: '10.12.1.0/24'
+ storage_backend_network_cidr: '10.12.0.0/24'
+ kubernetes_installer: k0s
+ single_node: 'true'
+ hardware_metadata: |
+ '00:00:00:00:00:00':
+ write_files:
+ - path: /usr/share/metadata/ceph.yaml
+ content: |
+ storageDevices:
+ - name: vdb
+ role: hdd
+ sizeGb: 20
+ ramGb: 8
+ cores: 2
+ # The roles will be assigned based on node labels.
+ # roles:
+ # - mon
+ # - mgr
+ ips:
+ - 192.168.122.101
+ crushPath: {}
diff --git a/de/heat-templates/scripts/functions.sh b/de/heat-templates/scripts/functions.sh
index 3f838b6..1283894 100644
--- a/de/heat-templates/scripts/functions.sh
+++ b/de/heat-templates/scripts/functions.sh
@@ -359,9 +359,13 @@
}
function install_k0s {
+ local install_flags="--enable-worker"
+ if [[ "${SINGLE_NODE}" == true ]]; then
+ install_flags="$install_flags --single"
+ fi
mkdir -p /etc/k0s
k0s config create > /etc/k0s/k0s.yaml
- k0s install controller -c /etc/k0s/k0s.yaml
+ k0s install controller -c /etc/k0s/k0s.yaml $install_flags
k0s start
sleep 60
@@ -372,8 +376,10 @@
mkdir -p /etc/k0s
k0s kubeconfig admin > /etc/k0s/admin.conf
- k0s token create --role=worker > /etc/k0s/worker_token.yaml
- k0s token create --role=controller > /etc/k0s/controller_token.yaml
+ if [[ "${SINGLE_NODE}" != true ]]; then
+ k0s token create --role=worker > /etc/k0s/worker_token.yaml
+ k0s token create --role=controller > /etc/k0s/controller_token.yaml
+ fi
install_pkgs nginx
rm -f /etc/nginx/sites-enabled/default
diff --git a/de/heat-templates/scripts/launch.sh b/de/heat-templates/scripts/launch.sh
index e5392ac..53148b3 100644
--- a/de/heat-templates/scripts/launch.sh
+++ b/de/heat-templates/scripts/launch.sh
@@ -152,16 +152,18 @@
download_k0s
install_k0s
fi
+ wait_for_node
+ set_node_labels
+ collect_ceph_metadata
+ configure_contrack
if [[ "${KUBERNETES_INSTALLER}" == "ucp" ]]; then
- wait_for_node
- set_node_labels
- collect_ceph_metadata
- configure_contrack
disable_iptables_for_bridges
fi
if [[ "${SINGLE_NODE}" == true ]]; then
nested_virt_config
- disable_master_taint
+ if [[ "${KUBERNETES_INSTALLER}" == "ucp" ]]; then
+ disable_master_taint
+ fi
collect_interfaces_metadata
fi
cron_disable_calico_offloading