Rework install script to use functions from gerrit
To avoid exceeding configdrive limit move download functons
as a library from gerrit.
Related-Prod: PRODX-3456
Change-Id: I38c8dbff5818fd3362e653ad8b1caa913580d1ec
diff --git a/de/heat-templates/fragments/SrvInstancesBM.yaml b/de/heat-templates/fragments/SrvInstancesBM.yaml
index 06f47c1..3cb80ee 100644
--- a/de/heat-templates/fragments/SrvInstancesBM.yaml
+++ b/de/heat-templates/fragments/SrvInstancesBM.yaml
@@ -72,7 +72,7 @@
group: ungrouped
config:
str_replace:
- template: { get_file: ../scripts/instance_boot.sh }
+ template: { get_file: ../scripts/launch.sh }
params:
$node_type: { get_param: node_type }
$kubernetes_installer: { get_param: kubernetes_installer }
diff --git a/de/heat-templates/fragments/SrvInstancesBMCeph.yaml b/de/heat-templates/fragments/SrvInstancesBMCeph.yaml
index 7c88e3d..ad2a702 100644
--- a/de/heat-templates/fragments/SrvInstancesBMCeph.yaml
+++ b/de/heat-templates/fragments/SrvInstancesBMCeph.yaml
@@ -89,7 +89,7 @@
group: ungrouped
config:
str_replace:
- template: { get_file: ../scripts/instance_boot.sh }
+ template: { get_file: ../scripts/launch.sh }
params:
$node_type: { get_param: node_type }
$kubernetes_installer: { get_param: kubernetes_installer }
diff --git a/de/heat-templates/fragments/SrvInstancesBMCephOSD.yaml b/de/heat-templates/fragments/SrvInstancesBMCephOSD.yaml
index f2f4cd2..0c92f7a 100644
--- a/de/heat-templates/fragments/SrvInstancesBMCephOSD.yaml
+++ b/de/heat-templates/fragments/SrvInstancesBMCephOSD.yaml
@@ -110,7 +110,7 @@
group: ungrouped
config:
str_replace:
- template: { get_file: ../scripts/instance_boot.sh }
+ template: { get_file: ../scripts/launch.sh }
params:
$node_type: { get_param: node_type }
$kubernetes_installer: { get_param: kubernetes_installer }
diff --git a/de/heat-templates/fragments/SrvInstancesVM.yaml b/de/heat-templates/fragments/SrvInstancesVM.yaml
index cb37946..374ec92 100644
--- a/de/heat-templates/fragments/SrvInstancesVM.yaml
+++ b/de/heat-templates/fragments/SrvInstancesVM.yaml
@@ -82,6 +82,9 @@
kubectl_version:
type: string
default: 1.18.8
+ devops_utils_refspec:
+ type: string
+ default: 'master'
resources:
@@ -91,7 +94,7 @@
group: ungrouped
config:
str_replace:
- template: { get_file: ../scripts/instance_boot.sh }
+ template: { get_file: ../scripts/launch.sh }
params:
$node_type: { get_param: node_type }
$kubernetes_installer: { get_param: kubernetes_installer }
@@ -113,6 +116,7 @@
$single_node: { get_param: single_node }
$secure_overlay_enabled: { get_param: secure_overlay_enabled }
$kubectl_version: { get_param: kubectl_version }
+ $devops_utils_refspec: { get_param: devops_utils_refspec }
inject_files:
type: "OS::Heat::CloudConfig"
diff --git a/de/heat-templates/fragments/SrvInstancesVMCeph.yaml b/de/heat-templates/fragments/SrvInstancesVMCeph.yaml
index 0e3a99d..320b564 100644
--- a/de/heat-templates/fragments/SrvInstancesVMCeph.yaml
+++ b/de/heat-templates/fragments/SrvInstancesVMCeph.yaml
@@ -91,6 +91,9 @@
kubectl_version:
type: string
default: 1.18.8
+ devops_utils_refspec:
+ type: string
+ default: 'master'
resources:
@@ -100,7 +103,7 @@
group: ungrouped
config:
str_replace:
- template: { get_file: ../scripts/instance_boot.sh }
+ template: { get_file: ../scripts/launch.sh }
params:
$node_type: { get_param: node_type }
$kubernetes_installer: { get_param: kubernetes_installer }
@@ -126,6 +129,7 @@
$ironic_baremetal_tunnel_cidr: { get_param: ironic_baremetal_tunnel_cidr }
$ironic_mt_enabled: { get_param: ironic_mt_enabled }
$kubectl_version: { get_param: kubectl_version }
+ $devops_utils_refspec: { get_param: devops_utils_refspec }
inject_files:
type: "OS::Heat::CloudConfig"
diff --git a/de/heat-templates/fragments/SrvInstancesVMCephOSD.yaml b/de/heat-templates/fragments/SrvInstancesVMCephOSD.yaml
index cc0e841..dff89f5 100644
--- a/de/heat-templates/fragments/SrvInstancesVMCephOSD.yaml
+++ b/de/heat-templates/fragments/SrvInstancesVMCephOSD.yaml
@@ -124,6 +124,9 @@
kubectl_version:
type: string
default: 1.18.8
+ devops_utils_refspec:
+ type: string
+ default: 'master'
resources:
@@ -133,7 +136,7 @@
group: ungrouped
config:
str_replace:
- template: { get_file: ../scripts/instance_boot.sh }
+ template: { get_file: ../scripts/launch.sh }
params:
$node_type: { get_param: node_type }
$kubernetes_installer: { get_param: kubernetes_installer }
@@ -168,6 +171,7 @@
$lvm_loop_device_size: { get_param: lvm_loop_device_size }
$cinder_lvm_loop_device_size: { get_param: cinder_lvm_loop_device_size }
$kubectl_version: { get_param: kubectl_version }
+ $devops_utils_refspec: { get_param: devops_utils_refspec }
inject_files:
type: "OS::Heat::CloudConfig"
diff --git a/de/heat-templates/fragments/multirack/SrvInstancesVM.yaml b/de/heat-templates/fragments/multirack/SrvInstancesVM.yaml
index cdaa49e..dc437b2 100644
--- a/de/heat-templates/fragments/multirack/SrvInstancesVM.yaml
+++ b/de/heat-templates/fragments/multirack/SrvInstancesVM.yaml
@@ -67,7 +67,7 @@
group: ungrouped
config:
str_replace:
- template: { get_file: ../../scripts/instance_boot.sh }
+ template: { get_file: ../../scripts/launch.sh }
params:
$node_type: { get_param: node_type }
$kubernetes_installer: { get_param: kubernetes_installer }
diff --git a/de/heat-templates/fragments/multirack/SrvInstancesVMCeph.yaml b/de/heat-templates/fragments/multirack/SrvInstancesVMCeph.yaml
index 5adf1f7..288d03c 100644
--- a/de/heat-templates/fragments/multirack/SrvInstancesVMCeph.yaml
+++ b/de/heat-templates/fragments/multirack/SrvInstancesVMCeph.yaml
@@ -96,7 +96,7 @@
group: ungrouped
config:
str_replace:
- template: { get_file: ../../scripts/instance_boot.sh }
+ template: { get_file: ../../scripts/launch.sh }
params:
$node_type: { get_param: node_type }
$kubernetes_installer: { get_param: kubernetes_installer }
diff --git a/de/heat-templates/fragments/multirack/SrvInstancesVMRack.yaml b/de/heat-templates/fragments/multirack/SrvInstancesVMRack.yaml
index 9e09613..8efb30a 100644
--- a/de/heat-templates/fragments/multirack/SrvInstancesVMRack.yaml
+++ b/de/heat-templates/fragments/multirack/SrvInstancesVMRack.yaml
@@ -61,7 +61,7 @@
group: ungrouped
config:
str_replace:
- template: { get_file: ../../scripts/instance_boot.sh }
+ template: { get_file: ../../scripts/launch.sh }
params:
$node_type: { get_param: node_type }
$kubernetes_installer: { get_param: kubernetes_installer }
diff --git a/de/heat-templates/scripts/instance_boot.sh b/de/heat-templates/scripts/functions.sh
similarity index 77%
rename from de/heat-templates/scripts/instance_boot.sh
rename to de/heat-templates/scripts/functions.sh
index 3c3431e..404c0bc 100644
--- a/de/heat-templates/scripts/instance_boot.sh
+++ b/de/heat-templates/scripts/functions.sh
@@ -5,64 +5,20 @@
set -a
# ensure we don't re-source this in the same environment
-[[ -z "$_INSTALL_SCRIPT" ]] || return 0
-declare -r -g _INSTALL_SCRIPT=1
+[[ -z "$_FUNCTIONS_SCRIPT" ]] || return 0
+declare -r -g _FUNCTIONS_SCRIPT=1
-#
-# Variables in this block are passed from heat template
-#
-CONTROL_NETWORK_CIDR=${CONTROL_NETWORK_CIDR:-$control_network_cidr}
-PUBLIC_INTERFACE=${PUBLIC_INTERFACE:-$private_floating_interface}
-PUBLIC_INTERFACE_IP=${PUBLIC_INTERFACE_IP:-$private_floating_interface_ip}
-PUBLIC_INTERFACE_CIDR=${PUBLIC_INTERFACE_CIDR:-$private_floating_network_cidr}
PUBLIC_INTERFACE_NETMASK=$(echo ${PUBLIC_INTERFACE_CIDR} | cut -d'/' -f2)
-DEFAULT_INTERFACE=${DEFAULT_INTERFACE:-$default_interface}
-STORAGE_BACKEND_INTERFACE=${STORAGE_BACKEND_INTERFACE:-$storage_backend_interface}
-STORAGE_BACKEND_INTERFACE_IP=${STORAGE_BACKEND_INTERFACE_IP:-$storage_backend_network_interface_ip}
-STORAGE_BACKEND_NETWORK=${STORAGE_BACKEND_NETWORK:-$storage_backend_network_cidr}
STORAGE_BACKEND_NETWORK_NETMASK=$(echo ${STORAGE_BACKEND_NETWORK} | cut -d'/' -f2)
-STORAGE_FRONTEND_INTERFACE=${STORAGE_FRONTEND_INTERFACE:-$storage_frontend_interface}
-STORAGE_FRONTEND_INTERFACE_IP=${STORAGE_FRONTEND_INTERFACE_IP:-$storage_frontend_network_interface_ip}
-STORAGE_FRONTEND_NETWORK=${STORAGE_FRONTEND_NETWORK:-$storage_frontend_network_cidr}
STORAGE_FRONTEND_NETWORK_NETMASK=$(echo ${STORAGE_FRONTEND_NETWORK} | cut -d'/' -f2)
-
-IRONIC_BAREMETAL_NETWORK=${IRONIC_BAREMETAL_NETWORK:-$ironic_baremetal_network_cidr}
IRONIC_BAREMETAL_INTERFACE_IP=${IRONIC_BAREMETAL_INTERFACE_IP:-$ironic_baremetal_interface_ip}
IRONIC_BAREMETAL_NETWORK_NETMASK=$(echo ${IRONIC_BAREMETAL_NETWORK} | cut -d'/' -f2)
IRONIC_BAREMETAL_INTERFACE=$(ip route |grep ${IRONIC_BAREMETAL_NETWORK} | awk '/ src / {print $3}')
-IRONIC_BAREMETAL_TUNNEL_NETWORK=${IRONIC_BAREMETAL_TUNNEL_NETWORK:-$ironic_baremetal_tunnel_cidr}
-TUNNEL_INTERFACE_IP=${TUNNEL_INTERFACE_IP:-$tunnel_interface_ip}
-FRR_BGP_NEIGHBORS=${FRR_BGP_NEIGHBORS:-$frr_bgp_neighbors}
-FRR_EVPN_TUNNELS_RANGE=${FRR_EVPN_TUNNELS_RANGE:-$frr_evpn_tunnels_range}
-FRR_EVPN_VXLAN_DST_PORT=${FRR_EVPN_VXLAN_DST_PORT:-$frr_evpn_vxlan_dst_port}
TUNNEL_INTERFACE_NETWORK_NETMASK=$(ip -o addr show |grep -w ${TUNNEL_INTERFACE_IP} | awk '{print $4}' |awk -F '/' '{print $2}')
TUNNEL_INTERFACE=$(ip -o addr show |grep -w ${TUNNEL_INTERFACE_IP}/${TUNNEL_INTERFACE_NETWORK_NETMASK} | awk '{print $2}')
-
-NODE_TYPE=${NODE_TYPE:-$node_type}
-KUBERNETES_INSTALLER=${KUBERNETES_INSTALLER:-$kubernetes_installer}
-UCP_MASTER_HOST=${UCP_MASTER_HOST:-$ucp_master_host}
-NODE_METADATA=${NODE_METADATA:-'$node_metadata'}
-DOCKER_EE_URL=${DOCKER_EE_URL:-$docker_ee_url}
-DOCKER_EE_RELEASE=${DOCKER_EE_RELEASE:-$docker_ee_release}
-DOCKER_EE_PACKAGES=${DOCKER_EE_PACKAGES:-$docker_ee_packages}
-DOCKER_UCP_IMAGE=${DOCKER_UCP_IMAGE:-$docker_ucp_image}
-BINARY_BASE_URL=${BINARY_BASE_URL:-$binary_base_url}
-UCP_DOCKER_SWARM_DATA_PORT=${UCP_DOCKER_SWARM_DATA_PORT:-$docker_ucp_swarm_data_port}
-FLOATING_NETWORK_PREFIXES=${FLOATING_NETWORK_PREFIXES:-$private_floating_network_cidr}
-IRONIC_MT_ENABLED=${IRONIC_MT_ENABLED:-$ironic_mt_enabled}
IRONIC_BAREMETAL_NETWORK_PREFIX=$(sed 's/[0-9]*\/[0-9]*$//' <<< $IRONIC_BAREMETAL_NETWORK)
IRONIC_BAREMETAL_TUNNEL_NETWORK_PREFIX=$(sed 's/[0-9]*\/[0-9]*$//' <<< $IRONIC_BAREMETAL_TUNNEL_NETWORK)
-
-HUGE_PAGES=${HUGE_PAGES:-$huge_pages}
-TUNGSTENFABRIC_ENABLED=${TUNGSTENFABRIC_ENABLED:-$tungstenfabric_enabled}
-SINGLE_NODE=${SINGLE_NODE:-$single_node}
-DOCKER_DEFAULT_ADDRESS_POOL=${DOCKER_DEFAULT_ADDRESS_POOL:-$docker_default_address_pool}
-LVM_LOOP_DEVICE_SIZE=${LVM_LOOP_DEVICE_SIZE:-$lvm_loop_device_size}
-CINDER_LVM_LOOP_DEVICE_SIZE=${CINDER_LVM_LOOP_DEVICE_SIZE:-$cinder_lvm_loop_device_size}
-SECURE_OVERLAY_ENABLED=${SECURE_OVERLAY_ENABLED:-$secure_overlay_enabled}
-#
-# End of block
-#
+STORAGE_FRONTEND_NETWORK_NETMASK=$(echo ${STORAGE_FRONTEND_NETWORK} | cut -d'/' -f2)
DOCKER_DEFAULT_ADDRESS_POOL=${DOCKER_DEFAULT_ADDRESS_POOL:-10.10.1.0/16}
# DOCKER_DEFAULT_ADDRESS_SIZE have to be less then netmask in DOCKER_DEFAULT_ADDRESS_POOL because
# to the fact that actual netmask for docker_gwbridge is given from it
@@ -97,7 +53,6 @@
UCP_USERNAME=${UCP_USERNAME:-admin}
UCP_PASSWORD=${UCP_PASSWORD:-administrator}
OS_CODENAME=$(lsb_release -c -s)
-KUBECTL_VERSION=${KUBECTL_VERSION:-$kubectl_version}
NODE_DEPLOYMENT_RETRIES=${NODE_DEPLOYMENT_RETRIES:-15}
FLOATING_NETWORK_PREFIXES=${FLOATING_NETWORK_PREFIXES:-10.11.12.0/24}
PUBLIC_INTERFACE=${PUBLIC_INTERFACE:-ens4}
@@ -184,30 +139,6 @@
mount -a
}
-function wait_condition_send {
- local status=${1:-SUCCESS}
- local reason=${2:-\"empty\"}
- local data=${3:-\"empty\"}
- local data_binary="{\"status\": \"$status\", \"reason\": \"$reason\", \"data\": $data}"
- echo "Trying to send signal to wait condition 5 times: $data_binary"
- WAIT_CONDITION_NOTIFY_EXIT_CODE=2
- i=0
- while (( ${WAIT_CONDITION_NOTIFY_EXIT_CODE} != 0 && ${i} < 5 )); do
- $wait_condition_notify -k --data-binary "$data_binary" && WAIT_CONDITION_NOTIFY_EXIT_CODE=0 || WAIT_CONDITION_NOTIFY_EXIT_CODE=2
- i=$((i + 1))
- sleep 1
- done
- if (( ${WAIT_CONDITION_NOTIFY_EXIT_CODE} !=0 && "${status}" == "SUCCESS" ))
- then
- status="FAILURE"
- reason="Can't reach metadata service to report about SUCCESS."
- fi
- if [ "$status" == "FAILURE" ]; then
- exit 1
- fi
-}
-
-
function configure_atop {
sed -i 's/INTERVAL=600/INTERVAL=60/' /usr/share/atop/atop.daily
systemctl restart atop
@@ -1034,163 +965,3 @@
EOF
udevadm control --reload-rules && udevadm trigger
}
-
-# Exit on any errors
-function handle_exit {
- if [ $? != 0 ] ; then
- wait_condition_send "FAILURE" "Script terminated with an error."
- fi
-}
-trap handle_exit EXIT
-
-if [[ "${BASH_SOURCE[0]}" == "${0}" ]]; then
- case "$NODE_TYPE" in
- # Please keep the "prepare_metadata_files", "disable-rp-filter", "network_config" and "prepare_network" functions
- # at the very beginning in the same order.
- ucp)
- setup_bind_mounts
- wait_for_external_network
- prepare_metadata_files
- disable_rp_filter
- network_config
- prepare_network
- if [[ "${KUBERNETES_INSTALLER}" == "ucp" ]]; then
- prepare_docker_config
- fi
- install_required_packages
- install_kubectl
- configure_ntp
- configure_atop
- workaround_default_forward_policy
- if [[ "${KUBERNETES_INSTALLER}" == "ucp" ]]; then
- install_docker
- swarm_init
- create_ucp_config
- cache_images
- install_ucp
- download_bundles
- rm_ucp_config
- elif [[ "${KUBERNETES_INSTALLER}" == "k0s" ]]; then
- download_k0s
- install_k0s
- fi
- if [[ "${KUBERNETES_INSTALLER}" == "ucp" ]]; then
- wait_for_node
- set_node_labels
- collect_ceph_metadata
- configure_contrack
- disable_iptables_for_bridges
- fi
- if [[ "${SINGLE_NODE}" == true ]]; then
- nested_virt_config
- disable_master_taint
- collect_interfaces_metadata
- fi
- cron_disable_calico_offloading
- ;;
- master)
- setup_bind_mounts
- wait_for_external_network
- nested_virt_config
- prepare_metadata_files
- disable_rp_filter
- network_config
- prepare_network
- if [[ "${KUBERNETES_INSTALLER}" == "ucp" ]]; then
- prepare_docker_config
- fi
- install_required_packages
- install_kubectl
- configure_ntp
- configure_atop
- workaround_default_forward_policy
- if [[ "${KUBERNETES_INSTALLER}" == "ucp" ]]; then
- install_docker
- cache_images
- download_bundles
- join_node manager
- fi
- wait_for_node
- set_node_labels
- collect_ceph_metadata
- configure_contrack
- if [[ "${KUBERNETES_INSTALLER}" == "ucp" ]]; then
- disable_iptables_for_bridges
- fi
- collect_interfaces_metadata
- cron_disable_calico_offloading
- increase_iscsi_timeout
- ;;
- worker)
- setup_bind_mounts
- wait_for_external_network
- if [[ "${CONFIGURE_HUGE_PAGES}" == true ]]; then
- configure_huge_pages
- fi
- nested_virt_config
- prepare_metadata_files
- disable_rp_filter
- network_config
- prepare_network
- if [[ "${KUBERNETES_INSTALLER}" == "ucp" ]]; then
- prepare_docker_config
- fi
- install_required_packages
- install_kubectl
- enable_iscsi
- configure_ntp
- configure_atop
- workaround_default_forward_policy
- if [[ "${KUBERNETES_INSTALLER}" == "ucp" ]]; then
- install_docker
- cache_images
- download_bundles
- join_node worker
- elif [[ "${KUBERNETES_INSTALLER}" == "k0s" ]]; then
- download_k0s
- download_k8s_metadata
- join_k0s_node worker
- fi
- wait_for_node
- set_node_labels
- collect_ceph_metadata
- configure_contrack
- if [[ "${KUBERNETES_INSTALLER}" == "ucp" ]]; then
- disable_iptables_for_bridges
- fi
- collect_interfaces_metadata
- configure_lvm
- cron_disable_calico_offloading
- increase_iscsi_timeout
- ;;
- frr)
- wait_for_external_network
- prepare_metadata_files
- disable_rp_filter
- network_config
- prepare_network
- if [[ "${KUBERNETES_INSTALLER}" == "ucp" ]]; then
- prepare_docker_config
- fi
- install_required_packages
- configure_ntp
- configure_atop
- if [[ "${KUBERNETES_INSTALLER}" == "ucp" ]]; then
- install_docker
- cache_images
- download_bundles
- fi
- workaround_default_forward_policy
- configure_contrack
- disable_iptables_for_bridges
- install_frr
- cron_disable_calico_offloading
- increase_iscsi_timeout
- ;;
- *)
- echo "Usage: $0 {ucp|master|worker}"
- exit 1
- esac
-
- wait_condition_send "SUCCESS" "Instance successfuly started." "${HW_METADATA}"
-fi
diff --git a/de/heat-templates/scripts/launch.sh b/de/heat-templates/scripts/launch.sh
new file mode 100644
index 0000000..09c4068
--- /dev/null
+++ b/de/heat-templates/scripts/launch.sh
@@ -0,0 +1,267 @@
+#!/bin/bash
+set -x
+set -e
+# allow access to the local variables from prepare-metadata.py
+set -a
+
+# ensure we don't re-source this in the same environment
+[[ -z "$_INSTALL_SCRIPT" ]] || return 0
+declare -r -g _INSTALL_SCRIPT=1
+
+#
+# Variables in this block are passed from heat template
+#
+CONTROL_NETWORK_CIDR=${CONTROL_NETWORK_CIDR:-$control_network_cidr}
+PUBLIC_INTERFACE=${PUBLIC_INTERFACE:-$private_floating_interface}
+PUBLIC_INTERFACE_IP=${PUBLIC_INTERFACE_IP:-$private_floating_interface_ip}
+PUBLIC_INTERFACE_CIDR=${PUBLIC_INTERFACE_CIDR:-$private_floating_network_cidr}
+DEFAULT_INTERFACE=${DEFAULT_INTERFACE:-$default_interface}
+STORAGE_BACKEND_INTERFACE=${STORAGE_BACKEND_INTERFACE:-$storage_backend_interface}
+STORAGE_BACKEND_INTERFACE_IP=${STORAGE_BACKEND_INTERFACE_IP:-$storage_backend_network_interface_ip}
+STORAGE_BACKEND_NETWORK=${STORAGE_BACKEND_NETWORK:-$storage_backend_network_cidr}
+STORAGE_FRONTEND_INTERFACE=${STORAGE_FRONTEND_INTERFACE:-$storage_frontend_interface}
+STORAGE_FRONTEND_INTERFACE_IP=${STORAGE_FRONTEND_INTERFACE_IP:-$storage_frontend_network_interface_ip}
+STORAGE_FRONTEND_NETWORK=${STORAGE_FRONTEND_NETWORK:-$storage_frontend_network_cidr}
+STORAGE_FRONTEND_NETWORK_NETMASK=$(echo ${STORAGE_FRONTEND_NETWORK} | cut -d'/' -f2)
+
+IRONIC_BAREMETAL_NETWORK=${IRONIC_BAREMETAL_NETWORK:-$ironic_baremetal_network_cidr}
+IRONIC_BAREMETAL_INTERFACE_IP=${IRONIC_BAREMETAL_INTERFACE_IP:-$ironic_baremetal_interface_ip}
+IRONIC_BAREMETAL_TUNNEL_NETWORK=${IRONIC_BAREMETAL_TUNNEL_NETWORK:-$ironic_baremetal_tunnel_cidr}
+TUNNEL_INTERFACE_IP=${TUNNEL_INTERFACE_IP:-$tunnel_interface_ip}
+FRR_BGP_NEIGHBORS=${FRR_BGP_NEIGHBORS:-$frr_bgp_neighbors}
+FRR_EVPN_TUNNELS_RANGE=${FRR_EVPN_TUNNELS_RANGE:-$frr_evpn_tunnels_range}
+FRR_EVPN_VXLAN_DST_PORT=${FRR_EVPN_VXLAN_DST_PORT:-$frr_evpn_vxlan_dst_port}
+
+NODE_TYPE=${NODE_TYPE:-$node_type}
+KUBERNETES_INSTALLER=${KUBERNETES_INSTALLER:-$kubernetes_installer}
+UCP_MASTER_HOST=${UCP_MASTER_HOST:-$ucp_master_host}
+NODE_METADATA=${NODE_METADATA:-'$node_metadata'}
+DOCKER_EE_URL=${DOCKER_EE_URL:-$docker_ee_url}
+DOCKER_EE_RELEASE=${DOCKER_EE_RELEASE:-$docker_ee_release}
+DOCKER_EE_PACKAGES=${DOCKER_EE_PACKAGES:-$docker_ee_packages}
+DOCKER_UCP_IMAGE=${DOCKER_UCP_IMAGE:-$docker_ucp_image}
+BINARY_BASE_URL=${BINARY_BASE_URL:-$binary_base_url}
+UCP_DOCKER_SWARM_DATA_PORT=${UCP_DOCKER_SWARM_DATA_PORT:-$docker_ucp_swarm_data_port}
+FLOATING_NETWORK_PREFIXES=${FLOATING_NETWORK_PREFIXES:-$private_floating_network_cidr}
+IRONIC_MT_ENABLED=${IRONIC_MT_ENABLED:-$ironic_mt_enabled}
+
+HUGE_PAGES=${HUGE_PAGES:-$huge_pages}
+TUNGSTENFABRIC_ENABLED=${TUNGSTENFABRIC_ENABLED:-$tungstenfabric_enabled}
+SINGLE_NODE=${SINGLE_NODE:-$single_node}
+DOCKER_DEFAULT_ADDRESS_POOL=${DOCKER_DEFAULT_ADDRESS_POOL:-$docker_default_address_pool}
+LVM_LOOP_DEVICE_SIZE=${LVM_LOOP_DEVICE_SIZE:-$lvm_loop_device_size}
+CINDER_LVM_LOOP_DEVICE_SIZE=${CINDER_LVM_LOOP_DEVICE_SIZE:-$cinder_lvm_loop_device_size}
+SECURE_OVERLAY_ENABLED=${SECURE_OVERLAY_ENABLED:-$secure_overlay_enabled}
+KUBECTL_VERSION=${KUBECTL_VERSION:-$kubectl_version}
+
+DEVOPS_UTILS_REFSPEC=${DEVOPS_UTILS_REFSPEC:-$devops_utils_refspec}
+
+#
+# End of block
+#
+
+DEVOPS_UTILS_REPO=${DEVOPS_UTILS_REPO:-'https://gerrit.mcp.mirantis.com/oscore-tools/devops-utils'}
+DEVOPS_UTILS_REFSPEC=${DEVOPS_UTILS_REFSPEC:-'master'}
+DEVOPS_UTILS_DST=/usr/share/devops-utils
+
+#Wait external network
+sed -i 's/#DNS=/DNS=4.2.2.1/g' /etc/systemd/resolved.conf
+systemctl restart systemd-resolved
+curl --connect-timeout 10 --retry 6 --retry-delay 10 ${DEVOPS_UTILS_REPO} || (sleep 1; /bin/false)
+
+git clone ${DEVOPS_UTILS_REPO} ${DEVOPS_UTILS_DST}
+pushd ${DEVOPS_UTILS_DST}
+if echo "$DEVOPS_UTILS_REFSPEC" |grep -q "^refs"; then
+ git fetch ${DEVOPS_UTILS_REPO} ${DEVOPS_UTILS_REFSPEC}
+ git checkout FETCH_HEAD
+else
+ git checkout ${DEVOPS_UTILS_REFSPEC}
+fi
+git log --oneline -10
+popd
+
+
+source ${DEVOPS_UTILS_DST}/de/heat-templates/scripts/functions.sh
+
+function wait_condition_send {
+ local status=${1:-SUCCESS}
+ local reason=${2:-\"empty\"}
+ local data=${3:-\"empty\"}
+ local data_binary="{\"status\": \"$status\", \"reason\": \"$reason\", \"data\": $data}"
+ echo "Trying to send signal to wait condition 5 times: $data_binary"
+ WAIT_CONDITION_NOTIFY_EXIT_CODE=2
+ i=0
+ while (( ${WAIT_CONDITION_NOTIFY_EXIT_CODE} != 0 && ${i} < 5 )); do
+ $wait_condition_notify -k --data-binary "$data_binary" && WAIT_CONDITION_NOTIFY_EXIT_CODE=0 || WAIT_CONDITION_NOTIFY_EXIT_CODE=2
+ i=$((i + 1))
+ sleep 1
+ done
+ if (( ${WAIT_CONDITION_NOTIFY_EXIT_CODE} !=0 && "${status}" == "SUCCESS" ))
+ then
+ status="FAILURE"
+ reason="Can't reach metadata service to report about SUCCESS."
+ fi
+ if [ "$status" == "FAILURE" ]; then
+ exit 1
+ fi
+}
+
+# Exit on any errors
+function handle_exit {
+ if [ $? != 0 ] ; then
+ wait_condition_send "FAILURE" "Script terminated with an error."
+ fi
+}
+trap handle_exit EXIT
+
+if [[ "${BASH_SOURCE[0]}" == "${0}" ]]; then
+ case "$NODE_TYPE" in
+ # Please keep the "prepare_metadata_files", "disable-rp-filter", "network_config" and "prepare_network" functions
+ # at the very beginning in the same order.
+ ucp)
+ setup_bind_mounts
+ wait_for_external_network
+ prepare_metadata_files
+ disable_rp_filter
+ network_config
+ prepare_network
+ if [[ "${KUBERNETES_INSTALLER}" == "ucp" ]]; then
+ prepare_docker_config
+ fi
+ install_required_packages
+ install_kubectl
+ configure_ntp
+ configure_atop
+ workaround_default_forward_policy
+ if [[ "${KUBERNETES_INSTALLER}" == "ucp" ]]; then
+ install_docker
+ swarm_init
+ create_ucp_config
+ cache_images
+ install_ucp
+ download_bundles
+ rm_ucp_config
+ elif [[ "${KUBERNETES_INSTALLER}" == "k0s" ]]; then
+ download_k0s
+ install_k0s
+ fi
+ if [[ "${KUBERNETES_INSTALLER}" == "ucp" ]]; then
+ wait_for_node
+ set_node_labels
+ collect_ceph_metadata
+ configure_contrack
+ disable_iptables_for_bridges
+ fi
+ if [[ "${SINGLE_NODE}" == true ]]; then
+ nested_virt_config
+ disable_master_taint
+ collect_interfaces_metadata
+ fi
+ cron_disable_calico_offloading
+ ;;
+ master)
+ setup_bind_mounts
+ wait_for_external_network
+ nested_virt_config
+ prepare_metadata_files
+ disable_rp_filter
+ network_config
+ prepare_network
+ if [[ "${KUBERNETES_INSTALLER}" == "ucp" ]]; then
+ prepare_docker_config
+ fi
+ install_required_packages
+ install_kubectl
+ configure_ntp
+ configure_atop
+ workaround_default_forward_policy
+ if [[ "${KUBERNETES_INSTALLER}" == "ucp" ]]; then
+ install_docker
+ cache_images
+ download_bundles
+ join_node manager
+ fi
+ wait_for_node
+ set_node_labels
+ collect_ceph_metadata
+ configure_contrack
+ if [[ "${KUBERNETES_INSTALLER}" == "ucp" ]]; then
+ disable_iptables_for_bridges
+ fi
+ collect_interfaces_metadata
+ cron_disable_calico_offloading
+ increase_iscsi_timeout
+ ;;
+ worker)
+ setup_bind_mounts
+ wait_for_external_network
+ if [[ "${CONFIGURE_HUGE_PAGES}" == true ]]; then
+ configure_huge_pages
+ fi
+ nested_virt_config
+ prepare_metadata_files
+ disable_rp_filter
+ network_config
+ prepare_network
+ if [[ "${KUBERNETES_INSTALLER}" == "ucp" ]]; then
+ prepare_docker_config
+ fi
+ install_required_packages
+ install_kubectl
+ enable_iscsi
+ configure_ntp
+ configure_atop
+ workaround_default_forward_policy
+ if [[ "${KUBERNETES_INSTALLER}" == "ucp" ]]; then
+ install_docker
+ cache_images
+ download_bundles
+ join_node worker
+ elif [[ "${KUBERNETES_INSTALLER}" == "k0s" ]]; then
+ download_k0s
+ download_k8s_metadata
+ join_k0s_node worker
+ fi
+ wait_for_node
+ set_node_labels
+ collect_ceph_metadata
+ configure_contrack
+ if [[ "${KUBERNETES_INSTALLER}" == "ucp" ]]; then
+ disable_iptables_for_bridges
+ fi
+ collect_interfaces_metadata
+ configure_lvm
+ cron_disable_calico_offloading
+ increase_iscsi_timeout
+ ;;
+ frr)
+ wait_for_external_network
+ prepare_metadata_files
+ disable_rp_filter
+ network_config
+ prepare_network
+ if [[ "${KUBERNETES_INSTALLER}" == "ucp" ]]; then
+ prepare_docker_config
+ fi
+ install_required_packages
+ configure_ntp
+ configure_atop
+ if [[ "${KUBERNETES_INSTALLER}" == "ucp" ]]; then
+ install_docker
+ cache_images
+ download_bundles
+ fi
+ workaround_default_forward_policy
+ configure_contrack
+ disable_iptables_for_bridges
+ install_frr
+ cron_disable_calico_offloading
+ increase_iscsi_timeout
+ ;;
+ *)
+ echo "Usage: $0 {ucp|master|worker}"
+ exit 1
+ esac
+
+ wait_condition_send "SUCCESS" "Instance successfuly started." "${HW_METADATA}"
+fi
diff --git a/de/heat-templates/top.yaml b/de/heat-templates/top.yaml
index dfbb370..e4dccf7 100644
--- a/de/heat-templates/top.yaml
+++ b/de/heat-templates/top.yaml
@@ -401,6 +401,9 @@
vmx_enabled:
type: boolean
default: false
+ devops_utils_refspec:
+ type: string
+ default: 'master'
conditions:
aio_deploy:
@@ -562,6 +565,7 @@
availability_zone: { get_param: availability_zone }
secure_overlay_enabled: { get_param: secure_overlay_enabled }
kubectl_version: { get_param: kubectl_version }
+ devops_utils_refspec: { get_param: devops_utils_refspec }
masters:
type: OS::Heat::ResourceGroup
@@ -597,6 +601,7 @@
hardware_metadata: { get_param: hardware_metadata}
availability_zone: { get_param: availability_zone }
kubectl_version: { get_param: kubectl_version }
+ devops_utils_refspec: { get_param: devops_utils_refspec }
workers:
type: OS::Heat::ResourceGroup
@@ -650,6 +655,7 @@
cinder_lvm_loop_device_size: { get_param: cmp_cinder_lvm_loop_device_size }
availability_zone: { get_param: availability_zone }
kubectl_version: { get_param: kubectl_version }
+ devops_utils_refspec: { get_param: devops_utils_refspec }
cmps:
type: OS::Heat::ResourceGroup
@@ -703,6 +709,7 @@
cinder_lvm_loop_device_size: { get_param: cmp_cinder_lvm_loop_device_size }
availability_zone: { get_param: availability_zone }
kubectl_version: { get_param: kubectl_version }
+ devops_utils_refspec: { get_param: devops_utils_refspec }
acmps:
type: OS::Heat::ResourceGroup
@@ -757,6 +764,7 @@
cinder_lvm_loop_device_size: { get_param: acmp_cinder_lvm_loop_device_size }
availability_zone: { get_param: availability_zone }
kubectl_version: { get_param: kubectl_version }
+ devops_utils_refspec: { get_param: devops_utils_refspec }
gtws:
type: OS::Heat::ResourceGroup
@@ -801,6 +809,7 @@
hardware_metadata: { get_param: hardware_metadata}
availability_zone: { get_param: availability_zone }
kubectl_version: { get_param: kubectl_version }
+ devops_utils_refspec: { get_param: devops_utils_refspec }
lmas:
type: OS::Heat::ResourceGroup
@@ -845,6 +854,7 @@
hardware_metadata: { get_param: hardware_metadata}
availability_zone: { get_param: availability_zone }
kubectl_version: { get_param: kubectl_version }
+ devops_utils_refspec: { get_param: devops_utils_refspec }
osds:
type: OS::Heat::ResourceGroup
@@ -897,6 +907,7 @@
num_volumes: { get_param: volumes_per_osd_instance }
availability_zone: { get_param: availability_zone }
kubectl_version: { get_param: kubectl_version }
+ devops_utils_refspec: { get_param: devops_utils_refspec }
frrs: # spares for osds/cmps
type: OS::Heat::ResourceGroup
@@ -999,6 +1010,7 @@
hardware_metadata: { get_param: hardware_metadata}
availability_zone: { get_param: availability_zone }
kubectl_version: { get_param: kubectl_version }
+ devops_utils_refspec: { get_param: devops_utils_refspec }
vbmcs:
type: OS::Heat::ResourceGroup
@@ -1043,6 +1055,7 @@
hardware_metadata: { get_param: hardware_metadata}
availability_zone: { get_param: availability_zone }
kubectl_version: { get_param: kubectl_version }
+ devops_utils_refspec: { get_param: devops_utils_refspec }
tsrvs:
type: OS::Heat::ResourceGroup
@@ -1087,6 +1100,7 @@
hardware_metadata: { get_param: hardware_metadata}
availability_zone: { get_param: availability_zone }
kubectl_version: { get_param: kubectl_version }
+ devops_utils_refspec: { get_param: devops_utils_refspec }
outputs:
ucp_ips: