Add tools to deploy DE UCP
Related-Prod: PRODX-2027
Change-Id: I8339518506588cdfddc818fa968c92df1088edc3
diff --git a/de/heat-templates/env/compute.yaml b/de/heat-templates/env/compute.yaml
new file mode 100644
index 0000000..19a04cd
--- /dev/null
+++ b/de/heat-templates/env/compute.yaml
@@ -0,0 +1,14 @@
+parameters:
+ key_name: devcloud
+ image: bionic-server-cloudimg-amd64-20190612
+ flavor: system.compact.openstack.control
+ public_net_id: public
+ masters_size: 2
+ worker_size: 3
+ cmp_size: 2
+ gtw_size: 0
+ ucp_boot_timeout: 1200
+ worker_metadata: {"labels": {"openstack-control-plane":"enabled","openvswitch":"enabled","role":"ceph-osd-node"}}
+ cmp_metadata: {"labels": {"openstack-compute-node":"enabled","openvswitch":"enabled", "role":"ceph-osd-node"}}
+ gtw_metadata: {"labels": {"openvswitch":"enabled"}}
+ cluster_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCp0evjOaK8c8SKYK4r2+0BN7g+8YSvQ2n8nFgOURCyvkJqOHi1qPGZmuN0CclYVdVuZiXbWw3VxRbSW3EH736VzgY1U0JmoTiSamzLHaWsXvEIW8VCi7boli539QJP0ikJiBaNAgZILyCrVPN+A6mfqtacs1KXdZ0zlMq1BPtFciR1JTCRcVs5vP2Wwz5QtY2jMIh3aiwkePjMTQPcfmh1TkOlxYu5IbQyZ3G1ahA0mNKI9a0dtF282av/F6pwB/N1R1nEZ/9VtcN2I1mf1NW/tTHEEcTzXYo1R/8K9vlqAN8QvvGLZtZduGviNVNoNWvoxaXxDt8CPv2B2NCdQFZp
diff --git a/de/heat-templates/env/converged.yaml b/de/heat-templates/env/converged.yaml
new file mode 100644
index 0000000..09ed375
--- /dev/null
+++ b/de/heat-templates/env/converged.yaml
@@ -0,0 +1,14 @@
+parameters:
+ key_name: devcloud
+ image: bionic-server-cloudimg-amd64-20190612
+ flavor: system.compact.openstack.control
+ public_net_id: public
+ masters_size: 2
+ worker_size: 3
+ cmp_size: 0
+ gtw_size: 0
+ ucp_boot_timeout: 1200
+ worker_metadata: {"labels": {"openstack-control-plane":"enabled","openstack-compute-node":"enabled","openvswitch":"enabled","role":"ceph-osd-node"}}
+ cmp_metadata: {"labels": {"openstack-compute-node":"enabled","openvswitch":"enabled", "role":"ceph-osd-node"}}
+ gtw_metadata: {"labels": {"openvswitch":"enabled"}}
+ cluster_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCp0evjOaK8c8SKYK4r2+0BN7g+8YSvQ2n8nFgOURCyvkJqOHi1qPGZmuN0CclYVdVuZiXbWw3VxRbSW3EH736VzgY1U0JmoTiSamzLHaWsXvEIW8VCi7boli539QJP0ikJiBaNAgZILyCrVPN+A6mfqtacs1KXdZ0zlMq1BPtFciR1JTCRcVs5vP2Wwz5QtY2jMIh3aiwkePjMTQPcfmh1TkOlxYu5IbQyZ3G1ahA0mNKI9a0dtF282av/F6pwB/N1R1nEZ/9VtcN2I1mf1NW/tTHEEcTzXYo1R/8K9vlqAN8QvvGLZtZduGviNVNoNWvoxaXxDt8CPv2B2NCdQFZp
diff --git a/de/heat-templates/env/telco.yaml b/de/heat-templates/env/telco.yaml
new file mode 100644
index 0000000..612083a
--- /dev/null
+++ b/de/heat-templates/env/telco.yaml
@@ -0,0 +1,14 @@
+parameters:
+ key_name: devcloud
+ image: bionic-server-cloudimg-amd64-20190612
+ flavor: system.compact.openstack.control
+ public_net_id: public
+ masters_size: 2
+ worker_size: 3
+ cmp_size: 2
+ gtw_size: 2
+ ucp_boot_timeout: 1200
+ worker_metadata: {"labels": {"openstack-control-plane":"enabled","openvswitch":"enabled","role":"ceph-osd-node"}}
+ cmp_metadata: {"labels": {"openstack-compute-node":"enabled","openvswitch":"enabled", "role":"ceph-osd-node"}}
+ gtw_metadata: {"labels": {"openvswitch":"enabled", "gateway": "enabled"}}
+ cluster_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCp0evjOaK8c8SKYK4r2+0BN7g+8YSvQ2n8nFgOURCyvkJqOHi1qPGZmuN0CclYVdVuZiXbWw3VxRbSW3EH736VzgY1U0JmoTiSamzLHaWsXvEIW8VCi7boli539QJP0ikJiBaNAgZILyCrVPN+A6mfqtacs1KXdZ0zlMq1BPtFciR1JTCRcVs5vP2Wwz5QtY2jMIh3aiwkePjMTQPcfmh1TkOlxYu5IbQyZ3G1ahA0mNKI9a0dtF282av/F6pwB/N1R1nEZ/9VtcN2I1mf1NW/tTHEEcTzXYo1R/8K9vlqAN8QvvGLZtZduGviNVNoNWvoxaXxDt8CPv2B2NCdQFZp
diff --git a/de/heat-templates/scripts/instance_boot.sh b/de/heat-templates/scripts/instance_boot.sh
new file mode 100644
index 0000000..a9ece06
--- /dev/null
+++ b/de/heat-templates/scripts/instance_boot.sh
@@ -0,0 +1,141 @@
+#!/bin/bash
+set -x
+
+DOCKER_DEFAULT_ADDRESS_POOL=${DOCKER_DEFAULT_ADDRESS_POOL:-10.10.1.0/16}
+# DOCKER_DEFAULT_ADDRESS_SIZE have to be less then netmask in DOCKER_DEFAULT_ADDRESS_POOL because
+# to the fact that actual netmask for docker_gwbridge is given from it
+DOCKER_DEFAULT_ADDRESS_SIZE=${DOCKER_DEFAULT_ADDRESS_SIZE:-24}
+HOST_INTERFACE=${HOST_INTERFACE:-ens3}
+UCP_USERNAME=${UCP_USERNAME:-admin}
+UCP_PASSWORD=${UCP_PASSWORD:-administrator}
+OS_CODENAME=$(lsb_release -c -s)
+
+NODE_TYPE=$node_type
+UCP_MASTER_HOST=$ucp_master_host
+
+function wait_condition_send {
+ local status=${1:-SUCCESS}
+ local reason=${2:-empty}
+ local data_binary="{\"status\": \"$status\", \"reason\": \"$reason\"}"
+ echo "Trying to send signal to wait condition 5 times: $data_binary"
+ WAIT_CONDITION_NOTIFY_EXIT_CODE=2
+ i=0
+ while (( ${WAIT_CONDITION_NOTIFY_EXIT_CODE} != 0 && ${i} < 5 )); do
+ $wait_condition_notify -k --data-binary "$data_binary" && WAIT_CONDITION_NOTIFY_EXIT_CODE=0 || WAIT_CONDITION_NOTIFY_EXIT_CODE=2
+ i=$((i + 1))
+ sleep 1
+ done
+ if (( ${WAIT_CONDITION_NOTIFY_EXIT_CODE} !=0 && "${status}" == "SUCCESS" ))
+ then
+ status="FAILURE"
+ reason="Can't reach metadata service to report about SUCCESS."
+ fi
+ if [ "$status" == "FAILURE" ]; then
+ exit 1
+ fi
+}
+
+function install_docker_ce {
+ apt install -y apt-transport-https ca-certificates curl software-properties-common
+ curl --retry 6 --retry-delay 5 -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
+ add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu ${OS_CODENAME} stable"
+ apt-get update
+ apt-get install -y docker-ce jq unzip
+}
+
+function update_docker_network {
+ mkdir -p /etc/docker
+ cat <<EOF > /etc/docker/daemon.json
+{
+ "default-address-pools": [
+ { "base": "${DOCKER_DEFAULT_ADDRESS_POOL}", "size": ${DOCKER_DEFAULT_ADDRESS_SIZE} }
+ ]
+}
+EOF
+
+}
+
+function install_ucp {
+ local tmpd
+ tmpd=$(mktemp -d)
+ cat <<EOF > ${tmpd}/docker_subscription.lic
+$ucp_license_key
+EOF
+
+ node_ip_address=$(ip addr show dev ${HOST_INTERFACE} |grep -Po 'inet \K[\d.]+' |egrep -v "127.0.|172.17")
+ docker container run --rm --name ucp \
+ -v /var/run/docker.sock:/var/run/docker.sock \
+ -v $tmpd/docker_subscription.lic:/config/docker_subscription.lic \
+ docker/ucp:3.2.4 install \
+ --host-address $node_ip_address \
+ --admin-username $UCP_USERNAME \
+ --admin-password $UCP_PASSWORD \
+ --existing-config
+}
+
+function download_bundles {
+ local tmpd
+ tmpd=$(mktemp -d)
+ # Download the bundle https://docs.docker.com/ee/ucp/user-access/cli/
+ # Create an environment variable with the user security token
+ AUTHTOKEN=$(curl -sk -d '{"username":"'$UCP_USERNAME'","password":"'$UCP_PASSWORD'"}' https://${UCP_MASTER_HOST}/auth/login | jq -r .auth_token)
+
+ # Download the client certificate bundle
+ curl -k -H "Authorization: Bearer $AUTHTOKEN" https://${UCP_MASTER_HOST}/api/clientbundle -o ${tmpd}/bundle.zip
+
+ pushd $tmpd
+ # Unzip the bundle.
+ unzip bundle.zip
+
+ # Run the utility script.
+ eval "$(<env.sh)"
+ popd
+}
+
+function join_node {
+ env -i $(docker swarm join-token $1 |grep 'docker swarm join' | xargs)
+}
+
+function create_ucp_config {
+ echo "[scheduling_configuration]
+ enable_admin_ucp_scheduling = true
+ default_node_orchestrator = \"kubernetes\"" | docker config create com.docker.ucp.config -
+}
+
+function swarm_init {
+ docker swarm init --advertise-addr ${HOST_INTERFACE}
+}
+
+function rm_ucp_config {
+ docker config rm com.docker.ucp.config
+}
+
+
+case "$NODE_TYPE" in
+ ucp)
+ update_docker_network
+ install_docker_ce
+ swarm_init
+ create_ucp_config
+ install_ucp
+ rm_ucp_config
+ ;;
+ master)
+ update_docker_network
+ install_docker_ce
+ download_bundles
+ join_node manager
+ ;;
+ worker)
+ update_docker_network
+ install_docker_ce
+ download_bundles
+ join_node worker
+ ;;
+ *)
+ echo "Usage: $0 {ucp|master|worker}"
+ exit 1
+esac
+
+
+wait_condition_send "SUCCESS" "Instance successfuly started."
diff --git a/de/heat-templates/scripts/license.lic b/de/heat-templates/scripts/license.lic
new file mode 100644
index 0000000..a923a91
--- /dev/null
+++ b/de/heat-templates/scripts/license.lic
@@ -0,0 +1 @@
+{"key_id":"lm_DfgeIFfNITZK-oUibhI4poRik1cMA-OFmuofxyeAz","private_key":"tExKBK6pHfFdWoYAeqBbRKQuV8UTCG4jRh7j9WyW43ih","authorization":"ewogICAicGF5bG9hZCI6ICJleUpsZUhCcGNtRjBhVzl1SWpvaU1qQXlNQzB3TVMweE9GUXhORG93T1RvMU9Gb2lMQ0owYjJ0bGJpSTZJa3BGVUU5MVIzVnRObmROYjNRM056VnRVbEpwZEdGZlZXTndaakZHZVRWb2RYWnRhMU5aUkZJMldFRTlJaXdpYldGNFJXNW5hVzVsY3lJNk1UQXNJbk5qWVc1dWFXNW5SVzVoWW14bFpDSTZkSEoxWlN3aWJHbGpaVzV6WlZSNWNHVWlPaUpQYm14cGJtVWlMQ0owYVdWeUlqb2lWSEpwWVd3aUxDSnpkV0p6WTNKcGNIUnBiMjVmYVdRaU9pSnpkV0l0WlRFMVl6ZGhOell0WldKaU1pMDBZekF4TFRobE9HWXRaakkyTUdKak5ETTJORE5sSWl3aWNISnZaSFZqZEY5cFpDSTZJbVJ2WTJ0bGNpMWxaUzF6WlhKMlpYSXRkV0oxYm5SMUlpd2ljbUYwWlY5d2JHRnVYMmxrSWpvaVpHOWphMlZ5TFdWbExYTmxjblpsY2kxMVluVnVkSFV0ZEhKcFlXd3RkR2xsY2lJc0luWmxjbk5wYjI0aU9qRXNJbWR5WVdObFgyUmhlWE1pT2pFc0ltMWxkR0ZrWVhSaElqcDdJblZ6WlhKdVlXMWxJam9pYW5WdGNHOXFiM2tpTENKamIyMXdZVzU1SWpvaWVHRjBZU0o5TENKd2NtbGphVzVuWDJOdmJYQnZibVZ1ZEhNaU9sdDdJbTVoYldVaU9pSk9iMlJsY3lJc0luWmhiSFZsSWpveE1IMWRmUSIsCiAgICJzaWduYXR1cmVzIjogWwogICAgICB7CiAgICAgICAgICJoZWFkZXIiOiB7CiAgICAgICAgICAgICJqd2siOiB7CiAgICAgICAgICAgICAgICJlIjogIkFRQUIiLAogICAgICAgICAgICAgICAia2V5SUQiOiAiSjdMRDo2N1ZSOkw1SFo6VTdCQToyTzRHOjRBTDM6T0YyTjpKSEdCOkVGVEg6NUNWUTpNRkVPOkFFSVQiLAogICAgICAgICAgICAgICAia2lkIjogIko3TEQ6NjdWUjpMNUhaOlU3QkE6Mk80Rzo0QUwzOk9GMk46SkhHQjpFRlRIOjVDVlE6TUZFTzpBRUlUIiwKICAgICAgICAgICAgICAgImt0eSI6ICJSU0EiLAogICAgICAgICAgICAgICAibiI6ICJ5ZEl5LWxVN283UGNlWS00LXMtQ1E1T0VnQ3lGOEN4SWNRSVd1Szg0cElpWmNpWTY3MzB5Q1lud0xTS1Rsdy1VNlVDX1FSZVdSaW9NTk5FNURzNVRZRVhiR0c2b2xtMnFkV2JCd2NDZy0yVVVIX09jQjlXdVA2Z1JQSHBNRk1zeER6V3d2YXk4SlV1SGdZVUxVcG0xSXYtbXE3bHA1blFfUnhyVDBLWlJBUVRZTEVNRWZHd20zaE1PX2dlTFBTLWhnS1B0SUhsa2c2X1djb3hUR29LUDc5ZF93YUhZeEdObDdXaFNuZWlCU3hicGJRQUtrMjFsZzc5OFhiN3ZaeUVBVERNclJSOU1lRTZBZGo1SEpwWTNDb3lSQVBDbWFLR1JDSzR1b1pTb0l1MGhGVmxLVVB5YmJ3MDAwR08td2EyS044VXdnSUltMGk1STF1VzlHa3E0empCeTV6aGdxdVVYYkc5YldQQU9ZcnE1UWE4MUR4R2NCbEp5SFlBcC1ERFBFOVRHZzR6WW1YakpueFpxSEVkdUdxZGV2WjhYTUkwdWtma0dJSTE0d1VPaU1JSUlyWGxFY0JmXzQ2SThnUVdEenh5Y1plX0pHWC1MQXVheVhyeXJVRmVoVk5VZFpVbDl3WE5hSkIta2FDcXo1UXdhUjkzc0d3LVFTZnREME52TGU3Q3lPSC1FNnZnNlN0X05lVHZndjhZbmhDaVhJbFo4SE9mSXdOZTd0RUZfVWN6NU9iUHlrbTN0eWxyTlVqdDBWeUFtdHRhY1ZJMmlHaWhjVVBybWs0bFZJWjdWRF9MU1ctaTd5b1N1cnRwc1BYY2UycEtESW8zMGxKR2hPXzNLVW1sMlNVWkNxekoxeUVtS3B5c0g1SERXOWNzSUZDQTNkZUFqZlpVdk43VSIKICAgICAgICAgICAgfSwKICAgICAgICAgICAgImFsZyI6ICJSUzI1NiIKICAgICAgICAgfSwKICAgICAgICAgInNpZ25hdHVyZSI6ICJLOTdEcC1yMTlPV1k0c0taZ0c0SGdGWGk0ZHJZX056a3hzeDJIbGY0NXJ5a0tWU3VOWWtrblYyaFMwdlY3ODB3anM5M1B3cTJLR2l2dkVteG5vc05qeWVodnJxYktDVFBUWEE5S1REQnNIT052QlEwT3gzSFpPdWRCXzF6Y2xSY3NIX3oyT1I0RFV5NFBXNGhHNUVSa055WnJHbE5FSFBXR2hMT1RVckU4OEIxRjc4eFAtU2RncGxtb3dkVVdaUEhPdzJxZUlxQ1NNUHZ2cHJ3OGVDSE1vYnJvNnZ2d00wTThxdkMxMkI1Tk5JUkMwTXVrbFhYdnVuN3pQTnU0c0dId2xfUDlfaU1VMkVCajlhOTBYRkJRWklQVFJlVG9JbmdPT2xuNExRTk9DdzNsYXZzRjhrbWRxZkdwSWRhNHFINWhYSmx6eEVWdlZEelNFUDlTbWY2QVFaUlNHT0h1TkZ0THZYRTB5TlBYOXNzV3B2Zy1GM0VZUUl2S0ZXdDBUbG9lS3lhdllnZEc4bFQ5VE8xdEZYdEJrdEdFb3hYa0FKb3BKWGJpc3pTdzJnd2c2dGJ6N0J6bFdQbWZzelBHUk94UTY3ZjFrQ3VGOXhKQ0RTcFhJb1BzM1FIWWJZMUVQMkJNUzRqNFVHQVVTaV8yanJQajhmZEpBaWFIeF9LWUZaSGMtMmZKd2R5VUQ4eWhmWk02UXlMRF95cDZyeGNiYkpuU1Z4TVlwRjc5SGxRR0d2UzBSN3Y5VUl2czhDRW9NLXBDeEJrc1NabjZtVVpnMTRjdGZLYVQ4cEtYd3NGRnFBZmthandxWDk4WjI5T1dtZTJlNmoxRWRXZWtOSVU5dzA0THlYR0lITmk5cHlpbzlVX2V5bnFFbkZNbjZ1aW5mcDVMa3U2TnZHUXgzQSIsCiAgICAgICAgICJwcm90ZWN0ZWQiOiAiZXlKbWIzSnRZWFJNWlc1bmRHZ2lPalExTUN3aVptOXliV0YwVkdGcGJDSTZJbVpSSWl3aWRHbHRaU0k2SWpJd01Ua3RNVEl0TVRkVU1UVTZORFU2TkROYUluMCIKICAgICAgfQogICBdCn0="}
\ No newline at end of file
diff --git a/de/heat-templates/srv-group.yaml b/de/heat-templates/srv-group.yaml
new file mode 100644
index 0000000..e654bb2
--- /dev/null
+++ b/de/heat-templates/srv-group.yaml
@@ -0,0 +1,68 @@
+heat_template_version: queens
+
+parameters:
+ key_name:
+ type: string
+ description: Name of keypair to assign to servers
+ image:
+ type: string
+ description: Name of image to use for servers
+ flavor:
+ type: string
+ description: Flavor to use for servers
+ public_net_id:
+ type: string
+ description: >
+ ID of public network for which floating IP addresses will be allocated
+ private_net_id:
+ type: string
+ description: ID of private network into which servers get deployed
+ private_subnet_id:
+ type: string
+ description: ID of private subnet
+ private_floating_network:
+ type: string
+ description: ID of network that will be used for floating in nested openstack
+ user_data:
+ type: string
+ metadata:
+ type: json
+
+resources:
+
+ server:
+ type: OS::Nova::Server
+ properties:
+ image: { get_param: image }
+ flavor: { get_param: flavor }
+ key_name: { get_param: key_name }
+ availability_zone: nova
+ networks:
+ - port: { get_resource: server_port }
+ - network: { get_param: private_floating_network }
+ user_data_format: RAW
+ user_data: { get_param: user_data }
+ metadata: { get_param: metadata }
+
+ server_port:
+ type: OS::Neutron::Port
+ properties:
+ network_id: { get_param: private_net_id }
+ port_security_enabled: false
+ fixed_ips:
+ - subnet: { get_param: private_subnet_id }
+
+ server_floating_ip:
+ type: OS::Neutron::FloatingIP
+ properties:
+ floating_network_id: { get_param: public_net_id }
+ port_id: { get_resource: server_port }
+
+
+outputs:
+ server_private_ip:
+ description: IP address of server in private network
+ value: { get_attr: [ server_port, fixed_ips, 0, ip_address] }
+ server_public_ip:
+ description: Floating IP address of server in public network
+ value: { get_attr: [ server_floating_ip, floating_ip_address ] }
diff --git a/de/heat-templates/top.yaml b/de/heat-templates/top.yaml
new file mode 100644
index 0000000..2d3f64a
--- /dev/null
+++ b/de/heat-templates/top.yaml
@@ -0,0 +1,236 @@
+heat_template_version: queens
+
+parameters:
+ key_name:
+ type: string
+ description: Name of keypair to assign to servers
+ image:
+ type: string
+ description: Name of image to use for servers
+ flavor:
+ type: string
+ description: Flavor to use for servers
+ public_net_id:
+ type: string
+ description: >
+ ID of public network for which floating IP addresses will be allocated
+ masters_size:
+ type: number
+ description: Number of masters instances to deploy
+ default: 2
+ worker_size:
+ type: number
+ description: Number of workers to deploy
+ default: 5
+ cmp_size:
+ type: number
+ description: Number of cmp workers to deploy
+ default: 0
+ gtw_size:
+ type: number
+ description: Number of gtw workers to deploy
+ default: 0
+ ucp_boot_timeout:
+ type: number
+ description: Boot timeout for UCP instance
+ default: 1200
+ cluster_public_key:
+ type: string
+ worker_metadata:
+ type: json
+ cmp_metadata:
+ type: json
+ gtw_metadata:
+ type: json
+
+resources:
+ key_pair:
+ type: OS::Nova::KeyPair
+ properties:
+ name: { get_param: "OS::stack_name" }
+ public_key: { get_param: cluster_public_key}
+ save_private_key: false
+
+ network:
+ type: OS::Neutron::Net
+ subnet:
+ type: OS::Neutron::Subnet
+ properties:
+ network: { get_resource: network }
+ cidr: 10.10.0.0/24
+ dns_nameservers:
+ - 172.18.208.44
+ - 4.2.2.1
+ router:
+ type: OS::Neutron::Router
+ properties:
+ external_gateway_info:
+ network: { get_param: public_net_id }
+ router_iface:
+ type: OS::Neutron::RouterInterface
+ properties:
+ router: { get_resource: router }
+ subnet: { get_resource: subnet }
+
+ private_floating_network:
+ type: OS::Neutron::Net
+ properties:
+ port_security_enabled: false
+ private_floating_subnet:
+ type: OS::Neutron::Subnet
+ properties:
+ network: { get_resource: private_floating_network }
+ cidr: 10.11.12.0/24
+ enable_dhcp: false
+ gateway_ip: ~
+
+ ucp_config:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ group: ungrouped
+ config:
+ str_replace:
+ template: { get_file: ./scripts/instance_boot.sh }
+ params:
+ $node_type: ucp
+ $wait_condition_notify: { get_attr: [ ucp_wait_handle, curl_cli ] }
+ $ucp_license_key: { get_file: ./scripts/license.lic }
+ ucp:
+ depends_on: router_iface
+ type: ./srv-group.yaml
+ properties:
+ image: { get_param: image }
+ flavor: { get_param: flavor }
+ key_name: { get_param: "OS::stack_name" }
+ public_net_id: { get_param: public_net_id }
+ private_net_id: { get_resource: network }
+ private_subnet_id: { get_resource: subnet }
+ private_floating_network: { get_resource: private_floating_network }
+ user_data: { get_resource: ucp_config }
+ metadata: {"role":"ucp"}
+
+ ucp_wait_handle:
+ type: OS::Heat::WaitConditionHandle
+ ucp_wait_condition:
+ type: OS::Heat::WaitCondition
+ properties:
+ handle: { get_resource: ucp_wait_handle }
+ timeout: { get_param: ucp_boot_timeout }
+
+ master_config:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ group: ungrouped
+ config:
+ str_replace:
+ template: { get_file: ./scripts/instance_boot.sh }
+ params:
+ $node_type: master
+ $wait_condition_notify: { get_attr: [ ucp_wait_handle, curl_cli ] }
+ $ucp_license_key: { get_file: ./scripts/license.lic }
+ $ucp_master_host: { get_attr: [ucp, server_private_ip] }
+
+ masters:
+ type: OS::Heat::ResourceGroup
+ depends_on:
+ - ucp_wait_condition
+ properties:
+ count: { get_param: masters_size }
+ resource_def:
+ type: ./srv-group.yaml
+ properties:
+ image: { get_param: image }
+ flavor: { get_param: flavor }
+ key_name: { get_param: "OS::stack_name" }
+ public_net_id: { get_param: public_net_id }
+ private_net_id: { get_resource: network }
+ private_subnet_id: { get_resource: subnet }
+ private_floating_network: { get_resource: private_floating_network }
+ user_data: { get_resource: master_config }
+ metadata: {"role":"master"}
+
+ worker_config:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ group: ungrouped
+ config:
+ str_replace:
+ template: { get_file: ./scripts/instance_boot.sh }
+ params:
+ $node_type: worker
+ $wait_condition_notify: { get_attr: [ ucp_wait_handle, curl_cli ] }
+ $ucp_license_key: { get_file: ./scripts/license.lic }
+ $ucp_master_host: { get_attr: [ucp, server_private_ip] }
+
+ workers:
+ type: OS::Heat::ResourceGroup
+ depends_on:
+ - ucp_wait_condition
+ properties:
+ count: { get_param: worker_size }
+ resource_def:
+ type: ./srv-group.yaml
+ properties:
+ image: { get_param: image }
+ flavor: { get_param: flavor }
+ key_name: { get_param: "OS::stack_name" }
+ public_net_id: { get_param: public_net_id }
+ private_net_id: { get_resource: network }
+ private_subnet_id: { get_resource: subnet }
+ private_floating_network: { get_resource: private_floating_network }
+ user_data: { get_resource: worker_config }
+ metadata: { get_param: worker_metadata}
+ cmps:
+ type: OS::Heat::ResourceGroup
+ depends_on:
+ - ucp_wait_condition
+ properties:
+ count: { get_param: cmp_size }
+ resource_def:
+ type: ./srv-group.yaml
+ properties:
+ image: { get_param: image }
+ flavor: { get_param: flavor }
+ key_name: { get_param: "OS::stack_name" }
+ public_net_id: { get_param: public_net_id }
+ private_net_id: { get_resource: network }
+ private_subnet_id: { get_resource: subnet }
+ private_floating_network: { get_resource: private_floating_network }
+ user_data: { get_resource: worker_config }
+ metadata: { get_param: cmp_metadata}
+ gtws:
+ type: OS::Heat::ResourceGroup
+ depends_on:
+ - ucp_wait_condition
+ properties:
+ count: { get_param: gtw_size }
+ resource_def:
+ type: ./srv-group.yaml
+ properties:
+ image: { get_param: image }
+ flavor: { get_param: flavor }
+ key_name: { get_param: "OS::stack_name" }
+ public_net_id: { get_param: public_net_id }
+ private_net_id: { get_resource: network }
+ private_subnet_id: { get_resource: subnet }
+ private_floating_network: { get_resource: private_floating_network }
+ user_data: { get_resource: worker_config }
+ metadata: { get_param: gtw_metadata}
+
+
+outputs:
+ ucp_ips:
+ description: Private IP addresses of the deployed ucp instances
+ value: { get_attr: [ucp, server_public_ip] }
+ masters_ips:
+ description: Private IP addresses of the deployed masters instances
+ value: { get_attr: [masters, server_public_ip] }
+ workers_ips:
+ description: Private IP addresses of the deployed worker instances
+ value: { get_attr: [workers, server_public_ip] }
+ cmps_ips:
+ description: Private IP addresses of the deployed cmp instances
+ value: { get_attr: [cmps, server_public_ip] }
+ gtws_ips:
+ description: Private IP addresses of the deployed gtw instances
+ value: { get_attr: [gtws, server_public_ip] }