[TryMOS] Add heat templates

Related-Prod: PRODX-11817
Change-Id: Ib32bf0c95c6b309e36f6b2c7e6b2914aafa0b3ef
diff --git a/trymos/heat-templates/env/aio.yaml b/trymos/heat-templates/env/aio.yaml
new file mode 100644
index 0000000..21c5226
--- /dev/null
+++ b/trymos/heat-templates/env/aio.yaml
@@ -0,0 +1,30 @@
+resource_registry:
+  "MCP2::NetworkAcc": ../fragments/NetworkAccVM.yaml
+  "MCP2::SrvInstances": ../fragments/SrvInstancesVM.yaml
+
+parameters:
+  image: trymos-bionic-amd64-20210318-154934
+  public_net_id: public
+  cluster_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCp0evjOaK8c8SKYK4r2+0BN7g+8YSvQ2n8nFgOURCyvkJqOHi1qPGZmuN0CclYVdVuZiXbWw3VxRbSW3EH736VzgY1U0JmoTiSamzLHaWsXvEIW8VCi7boli539QJP0ikJiBaNAgZILyCrVPN+A6mfqtacs1KXdZ0zlMq1BPtFciR1JTCRcVs5vP2Wwz5QtY2jMIh3aiwkePjMTQPcfmh1TkOlxYu5IbQyZ3G1ahA0mNKI9a0dtF282av/F6pwB/N1R1nEZ/9VtcN2I1mf1NW/tTHEEcTzXYo1R/8K9vlqAN8QvvGLZtZduGviNVNoNWvoxaXxDt8CPv2B2NCdQFZp
+  private_floating_network_cidr: '10.11.12.0/24'
+  private_floating_interface: ''
+  #compact.cid: RAM 32768 | Disk 100 | VCPU 8
+  ucp_flavor: 'mosk.aio.ephemeral'
+  hardware_metadata: |
+    '00:00:00:00:00:00':
+      write_files:
+        - path: /usr/share/metadata/ceph.yaml
+          content: |
+            storageDevices:
+              - name: vdb
+                role: hdd
+                sizeGb: 2
+            ramGb: 8
+            cores: 2
+            # The roles will be assigned based on node labels.
+            # roles:
+            #   - mon
+            #   - mgr
+            ips:
+              - 192.168.122.101
+            crushPath: {}
diff --git a/trymos/heat-templates/fragments/NetworkAccVM.yaml b/trymos/heat-templates/fragments/NetworkAccVM.yaml
new file mode 100644
index 0000000..66515d2
--- /dev/null
+++ b/trymos/heat-templates/fragments/NetworkAccVM.yaml
@@ -0,0 +1,38 @@
+heat_template_version: queens
+
+parameters:
+  public_net_id:
+    type: string
+  control_network_cidr:
+    type: string
+
+resources:
+
+  network:
+    type: OS::Neutron::Net
+  subnet:
+    type: OS::Neutron::Subnet
+    properties:
+      network: { get_resource: network }
+      enable_dhcp: true
+      cidr: { get_param: control_network_cidr }
+      dns_nameservers:
+        - 172.18.224.6
+        - 172.18.176.6
+  router:
+    type: OS::Neutron::Router
+    properties:
+      external_gateway_info:
+        network: { get_param: public_net_id }
+  router_iface:
+    type: OS::Neutron::RouterInterface
+    properties:
+      router: { get_resource: router }
+      subnet: { get_resource: subnet }
+
+
+outputs:
+  public_network:
+    value: { get_resource: network }
+  accessible_subnet_id:
+    value: { get_resource: subnet }
diff --git a/trymos/heat-templates/fragments/SrvInstancesVM.yaml b/trymos/heat-templates/fragments/SrvInstancesVM.yaml
new file mode 100644
index 0000000..94c792b
--- /dev/null
+++ b/trymos/heat-templates/fragments/SrvInstancesVM.yaml
@@ -0,0 +1,135 @@
+heat_template_version: queens
+
+parameters:
+
+  metadata:
+    type: json
+    default: {}
+  key_name:
+    type: string
+    description: Name of keypair to assign to servers
+  image:
+    type: string
+    description: Name of image to use for servers
+  flavor:
+    type: string
+    description: Flavor to use for servers
+  accessible_network:
+    type: string
+  accessible_subnet_id:
+    type: string
+  control_network_cidr:
+    type: string
+  boot_timeout:
+    type: number
+    description: Boot timeout for instance
+    default: 3600
+  public_net_id:
+    type: string
+  docker_ee_release:
+    type: string
+  docker_ee_url:
+    type: string
+  docker_ucp_image:
+    type: string
+  docker_ucp_swarm_data_port:
+    type: string
+    default: 4789
+  docker_default_address_pool:
+    type: string
+  hardware_metadata:
+    description: The content of lab metadata.
+    default: ''
+    type: string
+  user_data_config:
+    description: This is part of clout-config which denies to mount drive with label ephemeral0 to /mnt
+    type: string
+    default: |
+      #cloud-config
+      #
+      # Don't mount ephemeral0 to /mnt as it's by default
+      mounts:
+        - [ ephemeral0, null ]
+
+resources:
+
+  software_config:
+    type: OS::Heat::SoftwareConfig
+    properties:
+      group: ungrouped
+      config:
+        str_replace:
+          template: { get_file: ../scripts/instance_boot.sh }
+          params:
+            $wait_condition_notify: { get_attr: [ wait_handle, curl_cli ] }
+            $docker_ee_url: { get_param: docker_ee_url }
+            $docker_ee_release: { get_param: docker_ee_release }
+            $node_metadata: { get_param: metadata }
+            $control_network_cidr: { get_param: control_network_cidr }
+            $docker_ucp_image: { get_param: docker_ucp_image }
+            $docker_ucp_swarm_data_port: { get_param: docker_ucp_swarm_data_port }
+            $docker_default_address_pool: { get_param: docker_default_address_pool }
+
+  inject_files:
+    type: "OS::Heat::CloudConfig"
+    properties:
+      cloud_config:
+        write_files:
+          - path: /usr/share/metadata/lab-metadata.yaml
+            owner: "root:root"
+            permissions: "0644"
+            content: { get_param: hardware_metadata}
+
+  install_config_agent:
+    type: "OS::Heat::MultipartMime"
+    properties:
+      parts:
+      - config: {get_resource: software_config}
+      - config: {get_resource: inject_files}
+      - config: {get_param: user_data_config}
+
+  server:
+    type: OS::Nova::Server
+    properties:
+      image: { get_param: image }
+      flavor: { get_param: flavor }
+      key_name: { get_param: key_name }
+      availability_zone: nova
+      networks:
+        - port: { get_resource: accessible_server_port }
+      user_data_format: SOFTWARE_CONFIG
+      user_data: { get_resource:  install_config_agent}
+      metadata: { get_param: metadata }
+
+  accessible_server_port:
+    type: OS::Neutron::Port
+    properties:
+      network_id: { get_param: accessible_network }
+      port_security_enabled: false
+      fixed_ips:
+        - subnet: { get_param: accessible_subnet_id }
+
+  server_floating_ip:
+    type: OS::Neutron::FloatingIP
+    properties:
+      floating_network_id: { get_param: public_net_id }
+      port_id: { get_resource: accessible_server_port }
+
+  wait_handle:
+    type: OS::Heat::WaitConditionHandle
+  wait_condition:
+    type: OS::Heat::WaitCondition
+    properties:
+      handle: { get_resource: wait_handle }
+      timeout: { get_param: boot_timeout }
+
+outputs:
+  server_private_ip:
+    description: IP address of server in private network
+    value: { get_attr: [server, networks, { get_param: accessible_network}, 0]}
+  server_public_ip:
+    description: Floating IP address of server in public network
+    value: { get_attr: [ server_floating_ip, floating_ip_address ] }
+  wc_data:
+    description: Metadata from instance
+    value: { get_attr: [wait_condition, data]}
diff --git a/trymos/heat-templates/scripts/instance_boot.sh b/trymos/heat-templates/scripts/instance_boot.sh
new file mode 100644
index 0000000..5b3261c
--- /dev/null
+++ b/trymos/heat-templates/scripts/instance_boot.sh
@@ -0,0 +1,41 @@
+#!/bin/bash
+set -x
+
+export PUBLIC_INTERFACE=$private_floating_interface
+export NODE_METADATA=$node_metadata
+export DOCKER_EE_URL=$docker_ee_url
+export DOCKER_EE_RELEASE=$docker_ee_release
+export DOCKER_UCP_IMAGE=$docker_ucp_image
+export UCP_DOCKER_SWARM_DATA_PORT=$docker_ucp_swarm_data_port
+export DOCKER_DEFAULT_ADDRESS_POOL=$docker_default_address_pool
+
+function wait_condition_send {
+    local status=${1:-SUCCESS}
+    local reason=${2:-\"empty\"}
+    local data=${3:-\"empty\"}
+    local data_binary="{\"status\": \"$status\", \"reason\": \"$reason\", \"data\": $data}"
+    echo "Trying to send signal to wait condition 5 times: $data_binary"
+    WAIT_CONDITION_NOTIFY_EXIT_CODE=2
+    i=0
+    while (( ${WAIT_CONDITION_NOTIFY_EXIT_CODE} != 0 && ${i} < 5 )); do
+        $wait_condition_notify -k --data-binary "$data_binary" && WAIT_CONDITION_NOTIFY_EXIT_CODE=0 || WAIT_CONDITION_NOTIFY_EXIT_CODE=2
+        i=$((i + 1))
+        sleep 1
+    done
+    if (( ${WAIT_CONDITION_NOTIFY_EXIT_CODE} !=0 && "${status}" == "SUCCESS" ))
+    then
+        status="FAILURE"
+        reason="Can't reach metadata service to report about SUCCESS."
+    fi
+    if [ "$status" == "FAILURE" ]; then
+        exit 1
+    fi
+}
+
+bash /srv/bin/bootstrap_trymos_aws.sh
+
+if [[ "$?" == "0" ]]; then
+    wait_condition_send "SUCCESS" "Deploying TryMOS successfuly ."
+else
+    wait_condition_send "FAILURE" "Deploying TryMOS failed."
+fi
diff --git a/trymos/heat-templates/top.yaml b/trymos/heat-templates/top.yaml
new file mode 100644
index 0000000..f8b330f
--- /dev/null
+++ b/trymos/heat-templates/top.yaml
@@ -0,0 +1,89 @@
+heat_template_version: queens
+
+parameters:
+  image:
+    type: string
+    description: Name of image to use for servers
+  cluster_public_key:
+    type: string
+  ucp_flavor:
+    type: string
+  public_net_id:
+    type: string
+    description: >
+      ID of public network for which floating IP addresses will be allocated/
+      for baremetal case flat provision network for nodes
+  control_network_cidr:
+    type: string
+    description: The CIDR of control network, used to detect control interface.
+    default: '10.10.0.0/24'
+  docker_default_address_pool:
+    type: string
+    description: Default address pool for Docker ucp specific local networks
+    default: '10.10.1.0/16'
+  docker_ee_url:
+    type: string
+    default: 'https://storebits.docker.com/ubuntu'
+  docker_ee_release:
+    type: string
+    default: 'stable-19.03'
+  docker_ucp_image:
+    type: string
+    default: 'mirantis.azurecr.io/lcm/docker/ucp:3.3.6'
+  docker_ucp_swarm_data_port:
+    type: string
+    default: 4789
+  private_floating_interface:
+    description: Interface which carries floating network for child OpenStack.
+    type: string
+  private_floating_network_cidr:
+    type: string
+    default: '10.11.12.0/24'
+  hardware_metadata:
+    description: The content of lab metadata.
+    default: ''
+    type: string
+
+resources:
+  keypair_name:
+    type: OS::Heat::RandomString
+    properties:
+      character_classes: [{"class": "hexdigits", "min": 1}]
+      length: 128
+      salt: constant
+  key_pair:
+    type: OS::Nova::KeyPair
+    properties:
+      name: { get_attr: [keypair_name, value] }
+      public_key: { get_param: cluster_public_key }
+      save_private_key: false
+
+  accessible_network:
+    type: MCP2::NetworkAcc
+    properties:
+      public_net_id: { get_param: public_net_id }
+      control_network_cidr: { get_param: control_network_cidr }
+
+  ucp:
+    depends_on:
+     - accessible_network
+    type: MCP2::SrvInstances
+    properties:
+      docker_ee_url: { get_param: docker_ee_url }
+      docker_ee_release: { get_param: docker_ee_release }
+      docker_ucp_image: { get_param: docker_ucp_image}
+      docker_ucp_swarm_data_port: { get_param: docker_ucp_swarm_data_port }
+      docker_default_address_pool: { get_param: docker_default_address_pool }
+      key_name: { get_attr: [keypair_name, value] }
+      image: { get_param: image }
+      flavor: { get_param: ucp_flavor }
+      accessible_network: { get_attr: [accessible_network, public_network] }
+      accessible_subnet_id: { get_attr: [accessible_network, accessible_subnet_id]}
+      public_net_id: { get_param: public_net_id }
+      control_network_cidr: { get_param: control_network_cidr }
+      hardware_metadata: { get_param: hardware_metadata}
+
+outputs:
+  ucp_ips:
+    description: Private IP addresses of the deployed ucp instances
+    value: { get_attr: [ucp, server_public_ip] }