Add scripts to deploy MOS for tryMOS
The patch adds scripts and related files to deploy MOS in
AWS in case of tryMOS deployment.
Related-PRODX: PRODX-11818
Change-Id: I8e2d06473608a22ddb70a7ab9f3bea82ef91a77c
diff --git a/de/heat-templates/scripts/instance_boot.sh b/de/heat-templates/scripts/instance_boot.sh
index 5ff0c2e..e4405d0 100644
--- a/de/heat-templates/scripts/instance_boot.sh
+++ b/de/heat-templates/scripts/instance_boot.sh
@@ -197,8 +197,9 @@
fi
fi
function install_retry {
+ add-apt-repository ppa:rmescandon/yq -y
apt update
- export DEBIAN_FRONTEND=noninteractive; apt install -y $pkg_list
+ export DEBIAN_FRONTEND=noninteractive; apt install -y $pkg_list yq
}
retry 10 "Failed to install required packages" install_retry
}
diff --git a/trymos/README b/trymos/README
new file mode 100644
index 0000000..f9368d2
--- /dev/null
+++ b/trymos/README
@@ -0,0 +1,48 @@
+How to deploy TryMOS on AWS
+===========================
+
+1. Dowload tryMOS image from Mirantis repo
+ wget https://artifactory.mcp.mirantis.net/artifactory/binary-dev-kaas-local/trymos/bin/trymos-bionic-amd64-master-20210316183204.qcow2
+
+2. Convert QCOW2 image to RAW format
+ qemu-img convert -f qcow2 -O raw ./trymos-bionic-amd64-master-20210316183204.qcow2 ./trymos-bionic-amd64-master-20210316183204.raw
+
+3. Upload RAW image to S3 storage, where `trymos-raw` name of a bucket
+ aws s3 cp ./trymos-bionic-amd64-master-20210316183204.raw s3://trymos-raw
+
+4. Create a snapshot from the image which had been uploaded
+ aws ec2 import-snapshot --disk-container file://containers.json
+
+containers.json
+
+{
+ "Description": "TryMOS RAW",
+ "Format": "RAW",
+ "UserBucket": {
+ "S3Bucket": "trymos-raw",
+ "S3Key": "trymos-bionic-amd64-master-20210316183204.raw"
+ }
+}
+
+5. Create an image from the snapshot in EC2 Service -> Elastic Block Store -> Snapshots -> Actions -> Create image
+ with root storage 30Gb and additional volume 20Gb (volume type EBS (gp3))
+
+6. Launch instance from the image EC2 Service -> Images -> AMIs -> Launch with flavor minimal 16 CPUs and
+ 30Gb RAM (c4.4xlarge).
+
+7. Connect to intance via external IP with keyfile which was defined during instance was brought up as root
+ ssh 13.59.177.99 -i ./trymos-pair.pem -l root
+
+8. Run following commands to prepare env for deployment
+ sed -i 's/vdb/xvdb/g' /usr/share/metadata/ceph.yaml
+
+ and export control network CIDR
+ root@ip-172-31-11-89:/srv/bin# ip route
+ default via 172.31.0.1 dev eth0 proto dhcp src 172.31.11.89 metric 100
+ 172.31.0.0/20 dev eth0 proto kernel scope link src 172.31.11.89
+ 172.31.0.1 dev eth0 proto dhcp scope link src 172.31.11.89 metric 100
+
+ root@ip-172-31-11-89:/srv/bin# export CONTROL_NETWORK_CIDR='172.31.0.0/20'
+
+9. Run installation script
+ /srv/bin/bootstrap_trymos_aws.sh
diff --git a/trymos/image_build/Makefile b/trymos/image_build/Makefile
new file mode 100644
index 0000000..94e349a
--- /dev/null
+++ b/trymos/image_build/Makefile
@@ -0,0 +1,10 @@
+.PHONY: build cloud-config
+default: cloud-config build
+
+all: cloud-config build
+
+build:
+ ./build_image.sh
+
+cloud-config:
+ ./config_drive.sh
diff --git a/trymos/image_build/build_image.sh b/trymos/image_build/build_image.sh
new file mode 100755
index 0000000..e935332
--- /dev/null
+++ b/trymos/image_build/build_image.sh
@@ -0,0 +1,12 @@
+#!/bin/bash
+
+export IMAGE_BASE_URL="https://cloud-images.ubuntu.com/bionic/current/bionic-server-cloudimg-amd64.img"
+export IMAGE_MD5_URL="https://cloud-images.ubuntu.com/bionic/current/MD5SUMS"
+export VM_NAME="trymos-bionic-amd64.qcow2"
+
+export PACKER_LOG=1
+export PACKER_IMAGES_CACHE="./packer_images_cache"
+mkdir -p "${PACKER_IMAGES_CACHE}"
+
+./packer build -on-error=ask template.json
+
diff --git a/trymos/image_build/config-drive/meta-data b/trymos/image_build/config-drive/meta-data
new file mode 100644
index 0000000..2a01804
--- /dev/null
+++ b/trymos/image_build/config-drive/meta-data
@@ -0,0 +1 @@
+hostname: tryMOS
diff --git a/trymos/image_build/config-drive/user-data b/trymos/image_build/config-drive/user-data
new file mode 100644
index 0000000..76cd1d4
--- /dev/null
+++ b/trymos/image_build/config-drive/user-data
@@ -0,0 +1,14 @@
+#cloud-config
+debug: True
+ssh_pwauth: True
+disable_root: false
+chpasswd:
+ list: |
+ root:r00tme
+# cloud-user:r00tme
+ expire: False
+output: {all: '| tee -a /var/log/cloud-init-output.log'}
+runcmd:
+ - sed -i'.orig' -e's/\#PermitRootLogin.*/PermitRootLogin yes/g' -e's/PasswordAuthentication.*/PasswordAuthentication yes/g' /etc/ssh/sshd_config
+ - service sshd restart
+ - sed -i'.orig' -e's/PasswordAuthentication yes/PasswordAuthentication no/g' /etc/ssh/sshd_config
diff --git a/trymos/image_build/config_drive.sh b/trymos/image_build/config_drive.sh
new file mode 100755
index 0000000..9596b21
--- /dev/null
+++ b/trymos/image_build/config_drive.sh
@@ -0,0 +1,3 @@
+#!/bin/bash
+[ -f config-drive/cloudata.iso ] && rm -v config-drive/cloudata.iso
+mkisofs -o config-drive/cloudata.iso -V cidata -r -J --quiet config-drive
diff --git a/trymos/image_build/files/etc/cloud/cloud.cfg b/trymos/image_build/files/etc/cloud/cloud.cfg
new file mode 100644
index 0000000..94b618d
--- /dev/null
+++ b/trymos/image_build/files/etc/cloud/cloud.cfg
@@ -0,0 +1,76 @@
+# If this is set, 'root' will not be able to ssh in and they
+# will get a message to login instead as the above $user (ubuntu)
+disable_root: false
+
+# This will cause the set+update hostname module to not operate (if true)
+preserve_hostname: false
+apt_preserve_sources_list: true
+
+# Better let managing of /etc/hosts on salt and others
+# manage_etc_hosts: localhost
+
+# The modules that run in the 'init' stage
+cloud_init_modules:
+ - migrator
+ - seed_random
+ - bootcmd
+ - write-files
+ - growpart
+ - resizefs
+ - set_hostname
+ - update_hostname
+ - update_etc_hosts
+ - ca-certs
+ - rsyslog
+ - users-groups
+ - ssh
+
+# The modules that run in the 'config' stage
+cloud_config_modules:
+# Emit the cloud config ready event
+# this can be used by upstart jobs for 'start on cloud-config'.
+ - emit_upstart
+ - disk_setup
+ - mounts
+ - ssh-import-id
+ - locale
+ - set-passwords
+ - grub-dpkg
+ - apt-pipelining
+ - apt-configure
+ - package-update-upgrade-install
+ - landscape
+ - timezone
+ - puppet
+ - chef
+ - salt-minion
+ - mcollective
+ - disable-ec2-metadata
+ - runcmd
+ - byobu
+
+# The modules that run in the 'final' stage
+cloud_final_modules:
+ - rightscale_userdata
+ - scripts-vendor
+ - scripts-per-once
+ - scripts-per-boot
+ - scripts-per-instance
+ - scripts-user
+ - ssh-authkey-fingerprints
+ - keys-to-console
+ - phone-home
+ - final-message
+ - power-state-change
+
+datasource_list: [ NoCloud, ConfigDrive, Ec2, OpenStack, OVF, MAAS, None ]
+datasource:
+ Ec2:
+ timeout: 5 # (defaults to 50 seconds)
+ max_wait: 10 # (defaults to 120 seconds)
+ OpenStack:
+ timeout: 5 # (defaults to 50 seconds)
+ max_wait: 10 # (defaults to 120 seconds)
+ MAAS:
+ timeout: 5 # (defaults to 50 seconds)
+ max_wait: 10 # (defaults to 120 seconds)
diff --git a/trymos/image_build/files/etc/cloud/cloud.cfg.d/99_tcp.cfg b/trymos/image_build/files/etc/cloud/cloud.cfg.d/99_tcp.cfg
new file mode 100644
index 0000000..9bb8b42
--- /dev/null
+++ b/trymos/image_build/files/etc/cloud/cloud.cfg.d/99_tcp.cfg
@@ -0,0 +1,29 @@
+datasource_list: [ NoCloud, ConfigDrive, Ec2, OpenStack, OVF, MAAS, None ]
+datasource:
+ Ec2:
+ timeout: 5 # (defaults to 50 seconds)
+ max_wait: 10 # (defaults to 120 seconds)
+ strict_id: false
+ OpenStack:
+ timeout: 5 # (defaults to 50 seconds)
+ max_wait: 10 # (defaults to 120 seconds)
+ MAAS:
+ timeout: 5 # (defaults to 50 seconds)
+ max_wait: 10 # (defaults to 120 seconds)
+
+warnings:
+ dsid_missing_source: off
+
+system_info:
+ # This will affect which distro class gets used
+ distro: ubuntu
+ # Other config here will be given to the distro class and/or path classes
+ paths:
+ cloud_dir: /var/lib/cloud/
+ templates_dir: /etc/cloud/templates/
+ upstart_dir: /etc/init/
+ ssh_svcname: ssh
+
+growpart:
+ mode: auto
+ devices: ['/']
diff --git a/trymos/image_build/files/srv/bin/bootstrap_trymos_aws.sh b/trymos/image_build/files/srv/bin/bootstrap_trymos_aws.sh
new file mode 100755
index 0000000..659551e
--- /dev/null
+++ b/trymos/image_build/files/srv/bin/bootstrap_trymos_aws.sh
@@ -0,0 +1,304 @@
+#!/bin/bash
+# Redirect all outputs
+exec > >(tee -i /tmp/bootstart-trymos-output.log) 2>&1
+
+set -x
+# allow access to the local variables from prepare-metadata.py
+set -a
+
+export NODE_TYPE='trymos'
+export NODE_METADATA='{"labels": {"local-volume-provisioner": "enabled", "openstack-compute-node": "enabled", "openstack-control-plane": "enabled", "openstack-gateway": "enabled", "openvswitch": "enabled", "role": "ceph-osd-node"}}'
+
+
+export STORAGE_CLUSTER_CIDR=${CONTROL_NETWORK_CIDR}
+export STORAGE_PUBLIC_CIDR=${CONTROL_NETWORK_CIDR}
+export NTP_SERVERS=${NTP_SERVERS:-169.254.169.123}
+
+#TryMOS variables
+export DOCKER_UCP_VERSION=3.3.3
+export DOCKER_UCP_IMAGE=${DOCKER_UCP_IMAGE:-mirantis.azurecr.io/lcm/docker/ucp:${DOCKER_UCP_VERSION}}
+export RELEASE_OPENSTACK_K8S=${RELEASE_OPENSTACK_K8S:-/srv/release-openstack-k8s}
+export CERT_REPO_DIR=${CERT_REPO_DIR:-/srv/certs}
+export CSR_CONF="${CERT_REPO_DIR}/api-csr.yaml"
+export SSL_BARE_NAME='api'
+export MIRANTIS_ARTIFACTORY_URL=${MIRANTIS_ARTIFACTORY_URL:-https://binary.mirantis.com}
+export CEPH_CLUSTER_FILE="${RELEASE_OPENSTACK_K8S}/examples/miraceph/aio-ceph_local_drives_openstack.yaml"
+export OPENSTACK_CONTEXT_NAME=${OPENSTACK_CONTEXT_NAME:-"aio-core-ceph-local-non-dvr"}
+export TUNNEL_INTERFACE=${TUNNEL_INTERFACE:-${DEFAULT_INTERFACE}}
+export LIVE_MIGRATION_INTERFACE=${LIVE_MIGRATION_INTERFACE:-${DEFAULT_INTERFACE}}
+
+_DEPLOY_FINISHED_LOCK="/var/tmp/trymos-deploy-finished.lock"
+
+source /usr/share/trymos/functions
+
+if [[ -f ${_DEPLOY_FINISHED_LOCK} ]]; then
+ echo "TryMOS deploy has been performed on this node."
+ echo "If you want to redeploy please delete lock file"
+ echo "${_DEPLOY_FINISHED_LOCK}"
+ exit 1
+fi
+
+function info {
+ local msg="[INFO]: $1"
+ echo "$msg"
+}
+
+
+function configure_virt_public_iface {
+
+cat << EOF > /etc/systemd/network/20-public-int.netdev
+[NetDev]
+Name=ens4
+Kind=dummy
+
+[Match]
+Name=ens4
+EOF
+
+cat << EOF > /etc/systemd/network/20-public-int.network
+[Match]
+Name=ens4
+
+[Network]
+Address=10.11.12.23/24
+Broadcast=10.11.12.255
+EOF
+
+ systemctl restart systemd-networkd
+ sleep 15
+
+}
+
+function network_config {
+ PUBLIC_NODE_IP_ADDRESS=${PUBLIC_INTERFACE_IP:-$(ip addr show dev ${PUBLIC_INTERFACE} | grep -Po 'inet \K[\d.]+' | egrep -v "127.0.|172.17")}
+ PUBLIC_NODE_IP_NETMASK=${PUBLIC_INTERFACE_NETMASK:-$(ip addr show dev ${PUBLIC_INTERFACE} | grep -Po 'inet \K[\d.]+\/[\d]+' | egrep -v "127.0.|172.17" | cut -d'/' -f2)}
+
+ local public_interface=${1:-${PUBLIC_INTERFACE}}
+ local cloud_netplan_cfg="/etc/netplan/50-cloud-init.yaml"
+ local match_ip_line
+
+ DEBIAN_FRONTEND=noninteractive apt -y install bridge-utils
+
+cat << EOF > /etc/systemd/network/10-veth-phy-br.netdev
+[NetDev]
+Name=veth-phy
+Kind=veth
+[Peer]
+Name=veth-br
+EOF
+
+ sed -i 's/.*ethernets:.*/&\n veth-phy: {}/' ${cloud_netplan_cfg}
+ sed -i 's/.*ethernets:.*/&\n veth-br: {}/' ${cloud_netplan_cfg}
+ # NOTE(ohryhorov): have to be disabled if PUBLIC_INTERFACE is defined by
+ # cloud-init.
+ sed -i "s/.*ethernets:.*/&\n ${PUBLIC_INTERFACE}: {}/" ${cloud_netplan_cfg}
+ sed -i "s/${DEFAULT_INTERFACE}:/&\n critical: true/" ${cloud_netplan_cfg}
+
+ public_address_match_ip_line=$(grep -nm1 "${PUBLIC_NODE_IP_ADDRESS}/${PUBLIC_NODE_IP_NETMASK}" ${cloud_netplan_cfg} | cut -d: -f1)
+ sed -i "$((${public_address_match_ip_line}-1)),$((${public_address_match_ip_line}))d" ${cloud_netplan_cfg}
+
+cat << EOF >> ${cloud_netplan_cfg}
+ bridges:
+ br-public:
+ dhcp4: false
+ interfaces:
+ - ${PUBLIC_INTERFACE}
+ - veth-br
+ addresses:
+ - ${PUBLIC_NODE_IP_ADDRESS}/${PUBLIC_NODE_IP_NETMASK}
+EOF
+
+# Remove Tunnel interface from netplan
+if [[ $TUNNEL_INTERFACE_NETPLAN_MANAGE == false ]]; then
+ sed -i "/ ${TUNNEL_INTERFACE}/,/ set-name: ${TUNNEL_INTERFACE}/d" ${cloud_netplan_cfg}
+fi
+
+ netplan --debug apply
+
+ # NOTE(vsaienko): give some time to apply changes
+ sleep 15
+
+# Remove Tunnel interface from netplan
+if [[ $TUNNEL_INTERFACE_NETPLAN_MANAGE == false ]]; then
+ ip addr flush ${TUNNEL_INTERFACE}
+ ip link set ${TUNNEL_INTERFACE} up
+fi
+
+}
+
+function collect_ceph_metadata {
+ local ceph_osd_node
+ ceph_osd_node=$(kubectl get nodes -l role=ceph-osd-node -o jsonpath={.items[?\(@.metadata.name==\"$(hostname)\"\)].metadata.name})
+
+ if [[ -f /usr/share/metadata/ceph.yaml && ${ceph_osd_node} ]]; then
+ HW_METADATA="{\"ceph\": {\"$(hostname)\": \"$(base64 -w 0 /usr/share/metadata/ceph.yaml)\"}}"
+ ceph_store_drive=$(cat /usr/share/metadata/ceph.yaml | egrep '\- name\: ' | awk '{print $3}')
+ if [[ -b /dev/${ceph_store_drive} ]]; then
+ sgdisk --zap-all /dev/${ceph_store_drive}
+ fi
+ fi
+}
+
+
+function install_release_controllers {
+
+ for release_object in $(ls ${RELEASE_OPENSTACK_K8S}/release/*.yaml -1); do
+ info "Installing ${release_object}"
+ function apply_retry {
+ kubectl apply -f ${release_object}
+ }
+ retry 3 "Can't apply ${release_object}" apply_retry
+# sleep 5
+ done
+}
+
+function prepare_dns_and_ssl {
+
+ local tmpd
+ tmpd=$(mktemp -d)
+
+ info "Preparing DNS and SSL configuration."
+
+ kubectl apply -f ${RELEASE_OPENSTACK_K8S}/release/ci/30-coredns.yaml
+ sleep 300
+ EXTERNAL_DNS_IP=$(kubectl -n coredns get services coredns-coredns -o jsonpath='{.status.loadBalancer.ingress[].ip}')
+ if [ -z ${EXTERNAL_DNS_IP} ]; then
+ EXTERNAL_DNS_IP=$(kubectl -n coredns get services coredns-coredns -o jsonpath='{.spec.clusterIP}')
+ fi
+
+ LOCAL_DOMAIN=$(kubectl get configmap -n kube-system coredns -o jsonpath='{.data.Corefile}' | awk '/in-addr.arpa.*{/ {print $2}')
+
+ pushd $tmpd
+ info "Generating SSL certificates."
+
+ for ssl_app in cfssl cfssljson; do
+ curl --retry 5 --retry-delay 10 -L ${MIRANTIS_ARTIFACTORY_URL}/openstack/bin/utils/cfssl/${ssl_app} -o ${tmpd}/${ssl_app} && chmod +x ${tmpd}/${ssl_app}
+ done
+
+ # Generate SSL certs
+ yq w ${CSR_CONF} "CN" "*.openstack.svc.${LOCAL_DOMAIN}" | \
+ yq w -j - "hosts[+]" "*.openstack.svc.${LOCAL_DOMAIN}" | \
+ ${tmpd}/cfssl gencert -ca=${CERT_REPO_DIR}/ca.crt -ca-key=${CERT_REPO_DIR}/ca.key - | ${tmpd}/cfssljson -bare ${SSL_BARE_NAME}
+ mv ${tmpd}/${SSL_BARE_NAME}* ${CERT_REPO_DIR}
+ popd
+}
+
+function install_3rd_party {
+
+ info "Installing metallb."
+ kubectl apply -f ${RELEASE_OPENSTACK_K8S}/release/3rd-party/30-metallb.yaml
+ # NOTE(vsaienko): unless PRODX-7154 is resolved, update calico daemonset to satisfy metallb podsecuritypolicy.
+ kubectl -n kube-system patch ds calico-node --type='json' -p='[{"op": "replace", "path": "/spec/template/spec/containers/0/securityContext/allowPrivilegeEscalation", "value": true}]'
+}
+
+function install_ceph {
+
+ info "Installing Ceph."
+ ceph_osd_name=$(kubectl get nodes -l role=ceph-osd-node -o=jsonpath='{.items[*].metadata.name}')
+ sed -i "s/Values.CHANGE_ME_node1_name/${ceph_osd_name}/" ${CEPH_CLUSTER_FILE}
+ yq w -i ${CEPH_CLUSTER_FILE} "spec.network.clusterNet" ${STORAGE_CLUSTER_CIDR}
+ yq w -i ${CEPH_CLUSTER_FILE} "spec.network.publicNet" ${STORAGE_PUBLIC_CIDR}
+ yq w -i ${CEPH_CLUSTER_FILE} "spec.nodes[0].devices[0].name" $(cat /usr/share/metadata/ceph.yaml | egrep '\- name\: ' | awk '{print $3}')
+ kubectl apply -f ${CEPH_CLUSTER_FILE}
+
+ function get_ceph_secret_retry {
+ kubectl -n openstack-ceph-shared get secrets openstack-ceph-keys
+ }
+ retry 30 "Get secret openstack-ceph-keys failed" get_ceph_secret_retry
+
+ # NOTE(ohryhorov): the patch below is required until ceph-operator has ability to pass toleration values for rook-ceph
+ kubectl patch cephcluster cephcluster -n rook-ceph --patch '{"spec": {"placement": {"all": {"tolerations": [{"effect": "NoSchedule", "key": "com.docker.ucp.manager", "operator": "Exists"}]}}}}' --type=merge
+}
+
+function install_openstack {
+ gateway_ip=${PUBLIC_INTERFACE_IP:-$(ip addr show dev br-public | grep -Po 'inet \K[\d.]+' | egrep -v "127.0.|172.17")}
+
+ sed -i "s/cluster.local/${LOCAL_DOMAIN}/g" ${RELEASE_OPENSTACK_K8S}/examples/osdpl/${OPENSTACK_CONTEXT_NAME}.yaml
+
+ yq w -i ${RELEASE_OPENSTACK_K8S}/examples/osdpl/${OPENSTACK_CONTEXT_NAME}.yaml "spec.features.neutron.dns_servers[+]" ${EXTERNAL_DNS_IP}
+ yq w -i -- ${RELEASE_OPENSTACK_K8S}/examples/osdpl/${OPENSTACK_CONTEXT_NAME}.yaml "spec.features.ssl.public_endpoints.ca_cert" "$(cat ${CERT_REPO_DIR}/ca.crt)"
+ yq w -i -- ${RELEASE_OPENSTACK_K8S}/examples/osdpl/${OPENSTACK_CONTEXT_NAME}.yaml "spec.features.ssl.public_endpoints.api_cert" "$(cat ${CERT_REPO_DIR}/api.pem)"
+ yq w -i -- ${RELEASE_OPENSTACK_K8S}/examples/osdpl/${OPENSTACK_CONTEXT_NAME}.yaml "spec.features.ssl.public_endpoints.api_key" "$(cat ${CERT_REPO_DIR}/api-key.pem)"
+ yq w -i ${RELEASE_OPENSTACK_K8S}/examples/osdpl/${OPENSTACK_CONTEXT_NAME}.yaml "spec.features.neutron.floating_network.subnet.gateway" ${gateway_ip}
+ yq w -i ${RELEASE_OPENSTACK_K8S}/examples/osdpl/${OPENSTACK_CONTEXT_NAME}.yaml "spec.features.neutron.tunnel_interface" ${TUNNEL_INTERFACE}
+ yq w -i ${RELEASE_OPENSTACK_K8S}/examples/osdpl/${OPENSTACK_CONTEXT_NAME}.yaml "spec.features.nova.live_migration_interface" ${LIVE_MIGRATION_INTERFACE}
+ yq w -i ${RELEASE_OPENSTACK_K8S}/examples/osdpl/${OPENSTACK_CONTEXT_NAME}.yaml "spec.features.services[+]" "object-storage"
+ #NOTE (ohryhorov): libvirt_type is set to qemu because kvm is not supported by average flavors in AWS
+ yq w -i ${RELEASE_OPENSTACK_K8S}/examples/osdpl/${OPENSTACK_CONTEXT_NAME}.yaml "spec.services.compute.nova.values.conf.nova.libvirt.virt_type" "qemu"
+
+ kubectl apply -f ${RELEASE_OPENSTACK_K8S}/examples/osdpl/${OPENSTACK_CONTEXT_NAME}.yaml
+ sleep 1800
+
+ kubectl -n openstack create job --from=cronjob/nova-cell-setup nova-cell-setup-pd01-$(cat /dev/urandom | tr -dc '[a-z]' | head -c3)
+ info "Openstack was deployed successfully..."
+}
+
+function configure_public_resolve {
+ local tmpd
+ tmpd=$(mktemp -d)
+
+ openstack_ingress_ip=$(kubectl get services ingress -n openstack -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
+ if [ -z ${openstack_ingress_ip} ]; then
+ openstack_ingress_ip=$(kubectl get services ingress -n openstack -o jsonpath='{.spec.clusterIP}')
+ fi
+
+ kubectl -n kube-system get configmap coredns -o yaml > ${tmpd}/coredns-config.yaml
+ yq w -i ${tmpd}/coredns-config.yaml "data.Corefile" "$(kubectl -n kube-system get configmap coredns -ojsonpath='{.data.Corefile}')
+it.just.works:53 {
+ errors
+ cache 30
+ forward . ${EXTERNAL_DNS_IP}
+}"
+ kubectl -n kube-system apply -f ${tmpd}/coredns-config.yaml
+ kubectl -n kube-system delete pod -l k8s-app=kube-dns
+ sed -i "s/1.2.3.4/${openstack_ingress_ip}/g" -i ${RELEASE_OPENSTACK_K8S}/release/ci/30-coredns.yaml
+ kubectl apply -f ${RELEASE_OPENSTACK_K8S}/release/ci/30-coredns.yaml
+}
+
+function deploy_finished {
+ touch ${DEPLOY_FINISHED_LOCK}
+}
+
+
+
+case "$NODE_TYPE" in
+ # Please keep the "prepare_metadata_files", "disable-rp-filter", "network_config" and "prepare_network" functions
+ # at the very beginning in the same order.
+ trymos)
+ disable_rp_filter
+ configure_virt_public_iface
+ network_config
+ prepare_network
+ prepare_docker_config
+ install_required_packages
+ configure_ntp
+ configure_atop
+ workaround_default_forward_policy
+ install_docker
+ swarm_init
+ create_ucp_config
+ cache_images
+ install_ucp
+ download_bundles
+ rm_ucp_config
+ install_kubectl
+ wait_for_node
+ set_node_labels
+ collect_ceph_metadata
+ configure_contrack
+ disable_iptables_for_bridges
+ nested_virt_config
+ disable_master_taint
+ install_release_controllers
+ prepare_dns_and_ssl
+ install_3rd_party
+ install_ceph
+ install_openstack
+ configure_public_resolve
+ deploy_finished
+ ;;
+ *)
+ echo "Usage: $0 {trymos}"
+ exit 1
+esac
+
diff --git a/trymos/image_build/files/usr/share/metadata/ceph.yaml b/trymos/image_build/files/usr/share/metadata/ceph.yaml
new file mode 100644
index 0000000..24f92f2
--- /dev/null
+++ b/trymos/image_build/files/usr/share/metadata/ceph.yaml
@@ -0,0 +1,6 @@
+storageDevices:
+ - name: vdb
+ role: hdd
+ sizeGb: 2
+ramGb: 8
+cores: 2
diff --git a/trymos/image_build/template.json b/trymos/image_build/template.json
new file mode 100644
index 0000000..9af97a3
--- /dev/null
+++ b/trymos/image_build/template.json
@@ -0,0 +1,89 @@
+{
+ "variables": {
+ "user": "root",
+ "password": "r00tme",
+ "disk_size": "2500M",
+ "images_cache": "{{ env `PACKER_IMAGES_CACHE` }}",
+ "image_base_url": "{{ env `IMAGE_BASE_URL` }}",
+ "image_md5_url": "{{ env `IMAGE_MD5_URL` }}",
+ "vm_name": "{{ env `VM_NAME` }}"
+ },
+ "provisioners": [
+ {
+ "type": "shell",
+ "inline": ["mkdir -p /usr/share/trymos/"]
+ },
+ {
+ "type": "file",
+ "source": "../../de/heat-templates/scripts/instance_boot.sh",
+ "destination": "/usr/share/trymos/functions"
+ },
+ {
+ "type": "file",
+ "source": "./certs",
+ "destination": "/srv/"
+ },
+ {
+ "type": "file",
+ "source": "./release-openstack-k8s",
+ "destination": "/srv/"
+ },
+ {
+ "type": "file",
+ "source": "files/srv/",
+ "destination": "/srv"
+ },
+ {
+ "type": "file",
+ "source": "files/usr/share",
+ "destination": "/usr"
+ },
+ {
+ "type": "file",
+ "source": "files/etc/",
+ "destination": "/etc"
+ }
+ ],
+ "builders": [
+ {
+ "type": "qemu",
+ "qemuargs": [
+ [
+ "-m",
+ "8096M"
+ ],
+ [
+ "-smp",
+ "4"
+ ],
+ [
+ "-cdrom",
+ "config-drive/cloudata.iso"
+ ],
+ ["-device", "virtio-net,netdev=user.0"],
+ ["-object","rng-random,id=objrng0,filename=/dev/urandom"],
+ ["-device", "virtio-rng-pci,rng=objrng0,id=rng0,bus=pci.0,addr=0x10" ]
+ ],
+ "vm_name": "{{user `vm_name`}}",
+ "output_directory": "images",
+ "disk_compression": true,
+ "disk_size": "{{ user `disk_size`}}",
+ "format": "qcow2",
+ "iso_url": "{{ user `image_base_url`}}",
+ "iso_checksum": "file:{{ user `image_md5_url`}}",
+ "iso_target_path": "{{ user `images_cache`}}",
+ "disk_image": true,
+ "accelerator": "kvm",
+ "headless": true,
+ "ssh_username": "{{user `user`}}",
+ "ssh_password": "{{user `password`}}",
+ "ssh_host_port_min": 7000,
+ "ssh_host_port_max": 7050,
+ "vnc_port_max": "5956",
+ "vnc_port_min": "5956",
+ "shutdown_command": "shutdown -P now",
+ "boot_wait": "10s",
+ "ssh_wait_timeout": "360s"
+ }
+ ]
+}