Allow to deploy kubernetes with k0s
Related-Prod: PRODX-3456
Change-Id: I82c489a228c1cd137a6b679d8e764a61ca60e5c6
diff --git a/de/heat-templates/env/k0s-mstr1-wrkr3-cmp0-gtw0.yaml b/de/heat-templates/env/k0s-mstr1-wrkr3-cmp0-gtw0.yaml
new file mode 100644
index 0000000..3804aae
--- /dev/null
+++ b/de/heat-templates/env/k0s-mstr1-wrkr3-cmp0-gtw0.yaml
@@ -0,0 +1,53 @@
+resource_registry:
+ "MCP2::NetworkAcc": ../fragments/NetworkAccVM.yaml
+ "MCP2::NetworkAccStorage": ../fragments/NetworkAccVMStorage.yaml
+ "MCP2::NetworkPrvFl": ../fragments/NetworkPrvFl.yaml
+ "MCP2::NetworkIronicFlat": ../fragments/NetworkIronicFlat.yaml
+ "MCP2::SrvInstances": ../fragments/SrvInstancesVM.yaml
+ "MCP2::SrvInstancesCeph": ../fragments/SrvInstancesVMCeph.yaml
+ "MCP2::SrvInstancesCephOSD": ../fragments/SrvInstancesVMCephOSD.yaml
+ "MCP2::NetworkTun": ../fragments/NetworkTun.yaml
+
+parameters:
+ image: bionic-server-cloudimg-amd64-20190612
+ public_net_id: public
+ masters_size: 0
+ worker_size: 3
+ cmp_size: 0
+ gtw_size: 0
+ lma_size: 0
+ osd_size: 0
+ spare_size: 0
+ ucp_boot_timeout: 3600
+ cluster_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCp0evjOaK8c8SKYK4r2+0BN7g+8YSvQ2n8nFgOURCyvkJqOHi1qPGZmuN0CclYVdVuZiXbWw3VxRbSW3EH736VzgY1U0JmoTiSamzLHaWsXvEIW8VCi7boli539QJP0ikJiBaNAgZILyCrVPN+A6mfqtacs1KXdZ0zlMq1BPtFciR1JTCRcVs5vP2Wwz5QtY2jMIh3aiwkePjMTQPcfmh1TkOlxYu5IbQyZ3G1ahA0mNKI9a0dtF282av/F6pwB/N1R1nEZ/9VtcN2I1mf1NW/tTHEEcTzXYo1R/8K9vlqAN8QvvGLZtZduGviNVNoNWvoxaXxDt8CPv2B2NCdQFZp
+ private_floating_network_cidr: '10.11.12.0/24'
+ private_floating_interface: 'ens4'
+ tunnel_interface: 'ens8'
+ worker_metadata: {"labels": {"openstack-control-plane":"enabled","openstack-compute-node":"enabled","openvswitch":"enabled", "openstack-gateway":"enabled","role":"ceph-osd-node","local-volume-provisioner": "enabled"}}
+ cmp_metadata: {"labels": {"openstack-compute-node":"enabled","openvswitch":"enabled", "role":"ceph-osd-node"}}
+ gtw_metadata: {"labels": {"openvswitch":"enabled"}}
+ # hardware_metadata which is used for Ceph requires flavor with
+ # ephemeral storage because it is used for Ceph bluestore.
+ workers_flavor: 'system.compact.openstack.control.ephemeral'
+ cmps_flavor: 'system.compact.openstack.control.ephemeral'
+ storage_frontend_network_cidr: '10.12.1.0/24'
+ storage_backend_network_cidr: '10.12.0.0/24'
+ kubernetes_installer: k0s
+ hardware_metadata: |
+ '00:00:00:00:00:00':
+ write_files:
+ - path: /usr/share/metadata/ceph.yaml
+ content: |
+ storageDevices:
+ - name: vdb
+ role: hdd
+ sizeGb: 20
+ ramGb: 8
+ cores: 2
+ # The roles will be assigned based on node labels.
+ # roles:
+ # - mon
+ # - mgr
+ ips:
+ - 192.168.122.101
+ crushPath: {}
diff --git a/de/heat-templates/env/mstr1-wrkr3-cmp0-gtw0.yaml b/de/heat-templates/env/mstr1-wrkr3-cmp0-gtw0.yaml
index f5c79f1..8176416 100644
--- a/de/heat-templates/env/mstr1-wrkr3-cmp0-gtw0.yaml
+++ b/de/heat-templates/env/mstr1-wrkr3-cmp0-gtw0.yaml
@@ -17,6 +17,7 @@
gtw_size: 0
lma_size: 0
osd_size: 0
+ spare_size: 0
ucp_boot_timeout: 3600
cluster_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCp0evjOaK8c8SKYK4r2+0BN7g+8YSvQ2n8nFgOURCyvkJqOHi1qPGZmuN0CclYVdVuZiXbWw3VxRbSW3EH736VzgY1U0JmoTiSamzLHaWsXvEIW8VCi7boli539QJP0ikJiBaNAgZILyCrVPN+A6mfqtacs1KXdZ0zlMq1BPtFciR1JTCRcVs5vP2Wwz5QtY2jMIh3aiwkePjMTQPcfmh1TkOlxYu5IbQyZ3G1ahA0mNKI9a0dtF282av/F6pwB/N1R1nEZ/9VtcN2I1mf1NW/tTHEEcTzXYo1R/8K9vlqAN8QvvGLZtZduGviNVNoNWvoxaXxDt8CPv2B2NCdQFZp
private_floating_network_cidr: '10.11.12.0/24'
diff --git a/de/heat-templates/fragments/SrvInstancesBM.yaml b/de/heat-templates/fragments/SrvInstancesBM.yaml
index 4494194..8efb0fd 100644
--- a/de/heat-templates/fragments/SrvInstancesBM.yaml
+++ b/de/heat-templates/fragments/SrvInstancesBM.yaml
@@ -7,6 +7,8 @@
default: {}
node_type:
type: string
+ kubernetes_installer:
+ type: string
key_name:
type: string
description: Name of keypair to assign to servers
@@ -66,6 +68,7 @@
template: { get_file: ../scripts/instance_boot.sh }
params:
$node_type: { get_param: node_type }
+ $kubernetes_installer: { get_param: kubernetes_installer }
$wait_condition_notify: { get_attr: [ wait_handle, curl_cli ] }
$docker_ee_url: { get_param: docker_ee_url }
$docker_ee_release: { get_param: docker_ee_release }
diff --git a/de/heat-templates/fragments/SrvInstancesBMCeph.yaml b/de/heat-templates/fragments/SrvInstancesBMCeph.yaml
index 3a7b32f..f3d4b7a 100644
--- a/de/heat-templates/fragments/SrvInstancesBMCeph.yaml
+++ b/de/heat-templates/fragments/SrvInstancesBMCeph.yaml
@@ -7,6 +7,8 @@
default: {}
node_type:
type: string
+ kubernetes_installer:
+ type: string
key_name:
type: string
description: Name of keypair to assign to servers
@@ -83,6 +85,7 @@
template: { get_file: ../scripts/instance_boot.sh }
params:
$node_type: { get_param: node_type }
+ $kubernetes_installer: { get_param: kubernetes_installer }
$wait_condition_notify: { get_attr: [ wait_handle, curl_cli ] }
$docker_ee_url: { get_param: docker_ee_url }
$docker_ee_release: { get_param: docker_ee_release }
diff --git a/de/heat-templates/fragments/SrvInstancesBMCephOSD.yaml b/de/heat-templates/fragments/SrvInstancesBMCephOSD.yaml
index 1d4b2da..ba089d2 100644
--- a/de/heat-templates/fragments/SrvInstancesBMCephOSD.yaml
+++ b/de/heat-templates/fragments/SrvInstancesBMCephOSD.yaml
@@ -7,6 +7,8 @@
default: {}
node_type:
type: string
+ kubernetes_installer:
+ type: string
key_name:
type: string
description: Name of keypair to assign to servers
@@ -101,6 +103,7 @@
template: { get_file: ../scripts/instance_boot.sh }
params:
$node_type: { get_param: node_type }
+ $kubernetes_installer: { get_param: kubernetes_installer }
$wait_condition_notify: { get_attr: [ wait_handle, curl_cli ] }
$docker_ee_url: { get_param: docker_ee_url }
$docker_ee_release: { get_param: docker_ee_release }
diff --git a/de/heat-templates/fragments/SrvInstancesVM.yaml b/de/heat-templates/fragments/SrvInstancesVM.yaml
index 253b1da..d82bd96 100644
--- a/de/heat-templates/fragments/SrvInstancesVM.yaml
+++ b/de/heat-templates/fragments/SrvInstancesVM.yaml
@@ -7,6 +7,8 @@
default: {}
node_type:
type: string
+ kubernetes_installer:
+ type: string
key_name:
type: string
description: Name of keypair to assign to servers
@@ -80,6 +82,7 @@
template: { get_file: ../scripts/instance_boot.sh }
params:
$node_type: { get_param: node_type }
+ $kubernetes_installer: { get_param: kubernetes_installer }
$wait_condition_notify: { get_attr: [ wait_handle, curl_cli ] }
$docker_ee_url: { get_param: docker_ee_url }
$docker_ee_release: { get_param: docker_ee_release }
diff --git a/de/heat-templates/fragments/SrvInstancesVMCeph.yaml b/de/heat-templates/fragments/SrvInstancesVMCeph.yaml
index c06fd18..cf88f83 100644
--- a/de/heat-templates/fragments/SrvInstancesVMCeph.yaml
+++ b/de/heat-templates/fragments/SrvInstancesVMCeph.yaml
@@ -7,6 +7,8 @@
default: {}
node_type:
type: string
+ kubernetes_installer:
+ type: string
key_name:
type: string
description: Name of keypair to assign to servers
@@ -91,6 +93,7 @@
template: { get_file: ../scripts/instance_boot.sh }
params:
$node_type: { get_param: node_type }
+ $kubernetes_installer: { get_param: kubernetes_installer }
$wait_condition_notify: { get_attr: [ wait_handle, curl_cli ] }
$docker_ee_url: { get_param: docker_ee_url }
$docker_ee_release: { get_param: docker_ee_release }
diff --git a/de/heat-templates/fragments/SrvInstancesVMCephOSD.yaml b/de/heat-templates/fragments/SrvInstancesVMCephOSD.yaml
index a862bdd..227f299 100644
--- a/de/heat-templates/fragments/SrvInstancesVMCephOSD.yaml
+++ b/de/heat-templates/fragments/SrvInstancesVMCephOSD.yaml
@@ -7,6 +7,8 @@
default: {}
node_type:
type: string
+ kubernetes_installer:
+ type: string
key_name:
type: string
description: Name of keypair to assign to servers
@@ -121,6 +123,7 @@
template: { get_file: ../scripts/instance_boot.sh }
params:
$node_type: { get_param: node_type }
+ $kubernetes_installer: { get_param: kubernetes_installer }
$wait_condition_notify: { get_attr: [ wait_handle, curl_cli ] }
$docker_ee_url: { get_param: docker_ee_url }
$docker_ee_release: { get_param: docker_ee_release }
diff --git a/de/heat-templates/fragments/multirack/CentralSite.yaml b/de/heat-templates/fragments/multirack/CentralSite.yaml
index d421a1f..e5f9c1e 100644
--- a/de/heat-templates/fragments/multirack/CentralSite.yaml
+++ b/de/heat-templates/fragments/multirack/CentralSite.yaml
@@ -80,6 +80,8 @@
type: string
default_interface:
type: string
+ kubernetes_installer:
+ type: string
resources:
router:
@@ -168,6 +170,7 @@
docker_ucp_image: { get_param: docker_ucp_image}
docker_default_address_pool: { get_param: docker_default_address_pool }
node_type: "ucp"
+ kubernetes_installer: { get_param: kubernetes_installer }
key_name: { get_param: key_name }
image: { get_param: image }
flavor: { get_param: ucp_flavor }
@@ -197,6 +200,7 @@
docker_ucp_image: { get_param: docker_ucp_image}
docker_default_address_pool: { get_param: docker_default_address_pool }
node_type: "worker"
+ kubernetes_installer: { get_param: kubernetes_installer }
key_name: { get_param: key_name }
image: { get_param: image }
flavor: { get_param: worker_flavor }
diff --git a/de/heat-templates/fragments/multirack/Rack.yaml b/de/heat-templates/fragments/multirack/Rack.yaml
index 23036b3..71a820e 100644
--- a/de/heat-templates/fragments/multirack/Rack.yaml
+++ b/de/heat-templates/fragments/multirack/Rack.yaml
@@ -44,6 +44,8 @@
type: string
qos_policy_name:
type: string
+ kubernetes_installer:
+ type: string
resources:
@@ -81,6 +83,7 @@
docker_ee_release: { get_param: docker_ee_release }
docker_ucp_image: { get_param: docker_ucp_image}
node_type: "worker"
+ kubernetes_installer: { get_param: kubernetes_installer }
key_name: { get_param: key_name }
image: { get_param: image }
flavor: { get_param: cmp_flavor }
diff --git a/de/heat-templates/fragments/multirack/SrvInstancesVM.yaml b/de/heat-templates/fragments/multirack/SrvInstancesVM.yaml
index 5879d30..54cf445 100644
--- a/de/heat-templates/fragments/multirack/SrvInstancesVM.yaml
+++ b/de/heat-templates/fragments/multirack/SrvInstancesVM.yaml
@@ -7,6 +7,8 @@
default: {}
node_type:
type: string
+ kubernetes_installer:
+ type: string
key_name:
type: string
description: Name of keypair to assign to servers
@@ -66,6 +68,7 @@
template: { get_file: ../../scripts/instance_boot.sh }
params:
$node_type: { get_param: node_type }
+ $kubernetes_installer: { get_param: kubernetes_installer }
$wait_condition_notify: { get_attr: [ wait_handle, curl_cli ] }
$docker_ee_url: { get_param: docker_ee_url }
$docker_ee_release: { get_param: docker_ee_release }
diff --git a/de/heat-templates/fragments/multirack/SrvInstancesVMCeph.yaml b/de/heat-templates/fragments/multirack/SrvInstancesVMCeph.yaml
index 6a0997b..deb88ea 100644
--- a/de/heat-templates/fragments/multirack/SrvInstancesVMCeph.yaml
+++ b/de/heat-templates/fragments/multirack/SrvInstancesVMCeph.yaml
@@ -7,6 +7,8 @@
default: {}
node_type:
type: string
+ kubernetes_installer:
+ type: string
key_name:
type: string
description: Name of keypair to assign to servers
@@ -95,6 +97,7 @@
template: { get_file: ../../scripts/instance_boot.sh }
params:
$node_type: { get_param: node_type }
+ $kubernetes_installer: { get_param: kubernetes_installer }
$wait_condition_notify: { get_attr: [ wait_handle, curl_cli ] }
$docker_ee_url: { get_param: docker_ee_url }
$docker_ee_release: { get_param: docker_ee_release }
diff --git a/de/heat-templates/fragments/multirack/SrvInstancesVMRack.yaml b/de/heat-templates/fragments/multirack/SrvInstancesVMRack.yaml
index 783eb61..05a6db3 100644
--- a/de/heat-templates/fragments/multirack/SrvInstancesVMRack.yaml
+++ b/de/heat-templates/fragments/multirack/SrvInstancesVMRack.yaml
@@ -7,6 +7,8 @@
default: {}
node_type:
type: string
+ kubernetes_installer:
+ type: string
key_name:
type: string
description: Name of keypair to assign to servers
@@ -60,6 +62,7 @@
template: { get_file: ../../scripts/instance_boot.sh }
params:
$node_type: { get_param: node_type }
+ $kubernetes_installer: { get_param: kubernetes_installer }
$wait_condition_notify: { get_attr: [ wait_handle, curl_cli ] }
$docker_ee_url: { get_param: docker_ee_url }
$docker_ee_release: { get_param: docker_ee_release }
diff --git a/de/heat-templates/scripts/instance_boot.sh b/de/heat-templates/scripts/instance_boot.sh
index becccfe..483afe2 100644
--- a/de/heat-templates/scripts/instance_boot.sh
+++ b/de/heat-templates/scripts/instance_boot.sh
@@ -43,6 +43,7 @@
TUNNEL_INTERFACE=$(ip -o addr show |grep -w ${TUNNEL_INTERFACE_IP}/${TUNNEL_INTERFACE_NETWORK_NETMASK} | awk '{print $2}')
NODE_TYPE=${NODE_TYPE:-$node_type}
+KUBERNETES_INSTALLER=${KUBERNETES_INSTALLER:-$kubernetes_installer}
UCP_MASTER_HOST=${UCP_MASTER_HOST:-$ucp_master_host}
NODE_METADATA=${NODE_METADATA:-'$node_metadata'}
DOCKER_EE_URL=${DOCKER_EE_URL:-$docker_ee_url}
@@ -138,6 +139,8 @@
LVM_LOOP_DEVICE_SIZE=${LVM_LOOP_DEVICE_SIZE:-0}
CINDER_LVM_LOOP_DEVICE_SIZE=${CINDER_LVM_LOOP_DEVICE_SIZE:-0}
+_KUBECTL_CMD="kubectl --kubeconfig /etc/kubernetes/admin.conf "
+
function retry {
local retries=$1
shift
@@ -331,6 +334,47 @@
fi
}
+function download_k0s {
+ curl -sSLf https://get.k0s.sh | sudo sh
+}
+
+function install_k0s {
+ k0s install controller --single
+ k0s start
+
+ sleep 10
+
+ function _retry_wait_k0s {
+ k0s kubectl get nodes |grep -w Ready |awk '{print $1}' |grep -q $(hostname)
+ }
+ retry $NODE_DEPLOYMENT_RETRIES "The node didn't come up." _retry_wait_k0s
+
+ mkdir -p /etc/kubernetes/
+ mkdir -p /root/.kube/
+ k0s kubeconfig admin > /etc/kubernetes/admin.conf
+ cp /etc/kubernetes/admin.conf /root/.kube/config
+
+ mkdir -p /etc/k0s
+ k0s kubeconfig admin > /etc/k0s/admin.conf
+ k0s token create --role=worker > /etc/k0s/worker_token.yaml
+ k0s token create --role=controller > /etc/k0s/controller_token.yaml
+
+ apt install nginx -y
+ rm -f /etc/nginx/sites-enabled/default
+ ln -s /etc/k0s/ /var/www/k0s
+ cat << EOF > /etc/nginx/sites-enabled/k0s
+server {
+ listen *:80;
+ root /var/www;
+
+ location /k0s {
+ autoindex on;
+ }
+}
+EOF
+ systemctl restart nginx
+}
+
function get_authtoken_retry {
# Download the bundle https://docs.docker.com/ee/ucp/user-access/cli/
# Create an environment variable with the user security token
@@ -366,11 +410,22 @@
function wait_for_node {
function retry_wait {
- kubectl --kubeconfig /etc/kubernetes/admin.conf get nodes |grep -w Ready |awk '{print $1}' |grep -q $(hostname)
+ ${_KUBECTL_CMD} get nodes |grep -w Ready |awk '{print $1}' |grep -q $(hostname)
}
retry $NODE_DEPLOYMENT_RETRIES "The node didn't come up." retry_wait
}
+function download_k8s_metadata {
+ mkdir -p /etc/k0s/
+ mkdir -p /etc/kubernetes
+ for f in worker_token.yaml controller_token.yaml admin.conf; do
+ curl --retry 6 --retry-delay 5 -L http://${UCP_MASTER_HOST}/k0s/${f} -o /etc/k0s/${f}
+ done
+ cp /etc/k0s/admin.conf /etc/kubernetes/admin.conf
+ mkdir -p /root/.kube/
+ cp /etc/k0s/admin.conf /root/.kube/config
+}
+
function join_node {
if kubectl --kubeconfig /etc/kubernetes/admin.conf get nodes |grep -w Ready |awk '{print $1}' |grep -q $(hostname); then
echo "This node is already join"
@@ -383,6 +438,11 @@
fi
}
+function join_k0s_node {
+ k0s install worker --token-file /etc/k0s/worker_token.yaml
+ k0s start
+}
+
function create_ucp_config {
if [[ "${SINGLE_NODE}" == true ]]; then
max_pods="kubelet_max_pods = 220"
@@ -469,7 +529,7 @@
systemctl restart systemd-resolved
# Make sure local hostname is present in /etc/hosts
- sed -i "s/127.0.0.1 localhost/127.0.0.1 localhost\n${CONTROL_IP_ADDRESS} $(hostname)/" /etc/hosts
+ sed -i "s/127.0.0.1 localhost/127.0.0.1 localhost\n${CONTROL_IP_ADDRESS} $(hostname -s).cluster.local $(hostname -s)/" /etc/hosts
}
function workaround_default_forward_policy {
@@ -691,7 +751,7 @@
function set_node_labels {
function set_node_labels_retry {
- kubectl patch node $(hostname) -p "{\"metadata\": $(echo $NODE_METADATA | jq -c ".")}"
+ ${_KUBECTL_CMD} patch node $(hostname) -p "{\"metadata\": $(echo $NODE_METADATA | jq -c ".")}"
}
retry 10 "Labeling node failed" set_node_labels_retry
}
@@ -706,7 +766,7 @@
function collect_ceph_metadata {
local ceph_osd_node
- ceph_osd_node=$(kubectl get nodes -l role=ceph-osd-node -o jsonpath={.items[?\(@.metadata.name==\"$(hostname)\"\)].metadata.name})
+ ceph_osd_node=$(${_KUBECTL_CMD} get nodes -l role=ceph-osd-node -o jsonpath={.items[?\(@.metadata.name==\"$(hostname)\"\)].metadata.name})
if [[ -f /usr/share/metadata/ceph.yaml && ${ceph_osd_node} ]]; then
HW_METADATA="{\"ceph\": {\"$(hostname)\": \"$(base64 -w 0 /usr/share/metadata/ceph.yaml)\"}}"
@@ -928,24 +988,33 @@
disable_rp_filter
network_config
prepare_network
- prepare_docker_config
+ if [[ "${KUBERNETES_INSTALLER}" == "ucp" ]]; then
+ prepare_docker_config
+ fi
install_required_packages
configure_ntp
configure_atop
workaround_default_forward_policy
- install_docker
- swarm_init
- create_ucp_config
- cache_images
- install_ucp
- download_bundles
- rm_ucp_config
+ if [[ "${KUBERNETES_INSTALLER}" == "ucp" ]]; then
+ install_docker
+ swarm_init
+ create_ucp_config
+ cache_images
+ install_ucp
+ download_bundles
+ rm_ucp_config
+ elif [[ "${KUBERNETES_INSTALLER}" == "k0s" ]]; then
+ download_k0s
+ install_k0s
+ fi
install_kubectl
wait_for_node
set_node_labels
collect_ceph_metadata
configure_contrack
- disable_iptables_for_bridges
+ if [[ "${KUBERNETES_INSTALLER}" == "ucp" ]]; then
+ disable_iptables_for_bridges
+ fi
if [[ "${SINGLE_NODE}" == true ]]; then
nested_virt_config
disable_master_taint
@@ -958,21 +1027,27 @@
disable_rp_filter
network_config
prepare_network
- prepare_docker_config
+ if [[ "${KUBERNETES_INSTALLER}" == "ucp" ]]; then
+ prepare_docker_config
+ fi
install_required_packages
configure_ntp
configure_atop
workaround_default_forward_policy
- install_docker
- cache_images
- download_bundles
- join_node manager
+ if [[ "${KUBERNETES_INSTALLER}" == "ucp" ]]; then
+ install_docker
+ cache_images
+ download_bundles
+ join_node manager
+ fi
install_kubectl
wait_for_node
set_node_labels
collect_ceph_metadata
configure_contrack
- disable_iptables_for_bridges
+ if [[ "${KUBERNETES_INSTALLER}" == "ucp" ]]; then
+ disable_iptables_for_bridges
+ fi
collect_interfaces_metadata
;;
worker)
@@ -984,22 +1059,32 @@
disable_rp_filter
network_config
prepare_network
- prepare_docker_config
+ if [[ "${KUBERNETES_INSTALLER}" == "ucp" ]]; then
+ prepare_docker_config
+ fi
install_required_packages
enable_iscsi
configure_ntp
configure_atop
workaround_default_forward_policy
- install_docker
- cache_images
- download_bundles
- join_node worker
+ if [[ "${KUBERNETES_INSTALLER}" == "ucp" ]]; then
+ install_docker
+ cache_images
+ download_bundles
+ join_node worker
+ elif [[ "${KUBERNETES_INSTALLER}" == "k0s" ]]; then
+ download_k0s
+ download_k8s_metadata
+ join_k0s_node worker
+ fi
install_kubectl
wait_for_node
set_node_labels
collect_ceph_metadata
configure_contrack
- disable_iptables_for_bridges
+ if [[ "${KUBERNETES_INSTALLER}" == "ucp" ]]; then
+ disable_iptables_for_bridges
+ fi
collect_interfaces_metadata
configure_lvm
;;
@@ -1008,29 +1093,39 @@
disable_rp_filter
network_config
prepare_network
- prepare_docker_config
+ if [[ "${KUBERNETES_INSTALLER}" == "ucp" ]]; then
+ prepare_docker_config
+ fi
install_required_packages
configure_ntp
configure_atop
- install_docker
- cache_images
- download_bundles
+ if [[ "${KUBERNETES_INSTALLER}" == "ucp" ]]; then
+ install_docker
+ cache_images
+ download_bundles
+ fi
workaround_default_forward_policy
configure_contrack
- disable_iptables_for_bridges
+ if [[ "${KUBERNETES_INSTALLER}" == "ucp" ]]; then
+ disable_iptables_for_bridges
+ fi
;;
frr)
prepare_metadata_files
disable_rp_filter
network_config
prepare_network
- prepare_docker_config
+ if [[ "${KUBERNETES_INSTALLER}" == "ucp" ]]; then
+ prepare_docker_config
+ fi
install_required_packages
configure_ntp
configure_atop
- install_docker
- cache_images
- download_bundles
+ if [[ "${KUBERNETES_INSTALLER}" == "ucp" ]]; then
+ install_docker
+ cache_images
+ download_bundles
+ fi
workaround_default_forward_policy
configure_contrack
disable_iptables_for_bridges
diff --git a/de/heat-templates/top.yaml b/de/heat-templates/top.yaml
index d3949b6..afc0d85 100644
--- a/de/heat-templates/top.yaml
+++ b/de/heat-templates/top.yaml
@@ -327,6 +327,9 @@
The size of loop device for advanced computes to configure LVM on, in gigabytes
type: number
default: 0
+ kubernetes_installer:
+ type: string
+ default: "ucp"
conditions:
aio_deploy:
@@ -405,6 +408,7 @@
docker_ucp_swarm_data_port: { get_param: docker_ucp_swarm_data_port }
docker_default_address_pool: { get_param: docker_default_address_pool }
node_type: "ucp"
+ kubernetes_installer: { get_param: kubernetes_installer }
key_name: { get_attr: [keypair_name, value] }
image: { get_param: image }
flavor: { get_param: ucp_flavor }
@@ -431,6 +435,7 @@
properties:
metadata: { get_param: master_metadata}
node_type: "master"
+ kubernetes_installer: { get_param: kubernetes_installer }
key_name: { get_attr: [keypair_name, value] }
image: { get_param: image }
flavor: { get_param: masters_flavor }
@@ -461,6 +466,7 @@
properties:
metadata: { get_param: worker_metadata}
node_type: "worker"
+ kubernetes_installer: { get_param: kubernetes_installer }
key_name: { get_attr: [keypair_name, value] }
image: { get_param: image }
flavor: { get_param: workers_flavor }
@@ -509,6 +515,7 @@
properties:
metadata: { get_param: cmp_metadata }
node_type: "worker"
+ kubernetes_installer: { get_param: kubernetes_installer }
key_name: { get_attr: [keypair_name, value] }
image: { get_param: image }
flavor: { get_param: cmps_flavor }
@@ -557,6 +564,7 @@
properties:
metadata: { get_param: acmp_metadata }
node_type: "worker"
+ kubernetes_installer: { get_param: kubernetes_installer }
key_name: { get_attr: [keypair_name, value] }
image: { get_param: image }
flavor: { get_param: acmps_flavor }
@@ -606,6 +614,7 @@
properties:
metadata: { get_param: gtw_metadata }
node_type: "worker"
+ kubernetes_installer: { get_param: kubernetes_installer }
key_name: { get_attr: [keypair_name, value] }
image: { get_param: image }
flavor: { get_param: gtws_flavor }
@@ -645,6 +654,7 @@
properties:
metadata: { get_param: lma_metadata }
node_type: "worker"
+ kubernetes_installer: { get_param: kubernetes_installer }
key_name: { get_attr: [keypair_name, value] }
image: { get_param: image }
flavor: { get_param: lmas_flavor }
@@ -684,6 +694,7 @@
properties:
metadata: { get_param: osd_metadata }
node_type: "worker"
+ kubernetes_installer: { get_param: kubernetes_installer }
key_name: { get_attr: [keypair_name, value] }
image: { get_param: image }
flavor: { get_param: osds_flavor }
@@ -730,6 +741,7 @@
properties:
metadata: { get_param: frr_metadata }
node_type: "frr"
+ kubernetes_installer: { get_param: kubernetes_installer }
key_name: { get_attr: [keypair_name, value] }
image: { get_param: image }
flavor: { get_param: frrs_flavor }
@@ -777,6 +789,7 @@
properties:
metadata: { get_param: spare_metadata }
node_type: "spare"
+ kubernetes_installer: { get_param: kubernetes_installer }
key_name: { get_attr: [keypair_name, value] }
image: { get_param: image }
flavor: { get_param: spares_flavor }
@@ -823,6 +836,7 @@
properties:
metadata: { get_param: ntw_metadata }
node_type: "worker"
+ kubernetes_installer: { get_param: kubernetes_installer }
key_name: { get_attr: [keypair_name, value] }
image: { get_param: image }
flavor: { get_param: ntws_flavor }
@@ -869,6 +883,7 @@
properties:
metadata: { get_param: vbmc_metadata}
node_type: "worker"
+ kubernetes_installer: { get_param: kubernetes_installer }
key_name: { get_attr: [keypair_name, value] }
image: { get_param: image }
flavor: { get_param: vbmcs_flavor }