Merge "Retry pulling and tagging image"
diff --git a/de/heat-templates/scripts/functions.sh b/de/heat-templates/scripts/functions.sh
index 5ffa909..b8b6969 100644
--- a/de/heat-templates/scripts/functions.sh
+++ b/de/heat-templates/scripts/functions.sh
@@ -791,7 +791,7 @@
 
 # Remove Tunnel interface from netplan
 if [[ $TUNNEL_INTERFACE_NETPLAN_MANAGE == false ]]; then
-    sed -i "/    ${TUNNEL_INTERFACE}/,/      set-name: ${TUNNEL_INTERFACE}/d" ${cloud_netplan_cfg}
+yq d -i ${cloud_netplan_cfg} network.ethernets.${TUNNEL_INTERFACE}
 fi
 
 if [[ -n ${IRONIC_BAREMETAL_INTERFACE} ]]; then
@@ -932,7 +932,11 @@
 
     if [[ "${apply_grub}" == true ]]; then
         update-grub
-        cloud-init clean -r
+        cloud-init clean --reboot
+        # waiting for reboot to prevent sending wait condition status failure
+        sleep 600
+        echo "Failed to reboot node"
+        /bin/false
     else
         echo "GRUB already configured with huge pages"
     fi
diff --git a/trymos/image_build/build_image.sh b/trymos/image_build/build_image.sh
index e667636..a66aad1 100755
--- a/trymos/image_build/build_image.sh
+++ b/trymos/image_build/build_image.sh
@@ -1,12 +1,18 @@
 #!/bin/bash
+set -ex
 
-export IMAGE_BASE_URL="http://images.mcp.mirantis.net/bionic-server-cloudimg-amd64-20190612.img"
-export IMAGE_MD5_URL="http://images.mcp.mirantis.net/bionic-server-cloudimg-amd64-20190612.img.md5"
-export VM_NAME="trymos-bionic-amd64.qcow2"
+export ENVIRONMENT=${ENVIRONMENT:-'mosk-25.1'}
 
-export PACKER_LOG=1
-export PACKER_IMAGES_CACHE="./packer_images_cache"
+source env/${ENVIRONMENT}/packer.env
+
+export VM_NAME=${VM_NAME:-"trymos-amd64-${ENVIRONMENT}-$(date +"%Y%m%d%H%M%S").qcow2"}
+export PACKER_LOG=${PACKER_LOG:-"1"}
+export PACKER_IMAGES_CACHE=${PACKER_IMAGES_CACHE:-"./packer_images_cache"}
+
 mkdir -p "${PACKER_IMAGES_CACHE}"
 
-./packer build -on-error=ask template.json
+PACKER_BINARY=${PACKER_BINARY:-"$(which packer)"}
 
+./config_drive.sh
+
+$PACKER_BINARY build -on-error=cleanup template.json
diff --git a/trymos/image_build/config-drive/user-data b/trymos/image_build/config-drive/user-data
index 76cd1d4..28d97fd 100644
--- a/trymos/image_build/config-drive/user-data
+++ b/trymos/image_build/config-drive/user-data
@@ -5,7 +5,6 @@
 chpasswd:
   list: |
     root:r00tme
-#    cloud-user:r00tme
   expire: False
 output: {all: '| tee -a /var/log/cloud-init-output.log'}
 runcmd:
diff --git a/trymos/image_build/env/master/packer.env b/trymos/image_build/env/master/packer.env
new file mode 100644
index 0000000..b63300e
--- /dev/null
+++ b/trymos/image_build/env/master/packer.env
@@ -0,0 +1,3 @@
+export IMAGE_BASE_URL=${IMAGE_BASE_URL:-"https://cloud-images.ubuntu.com/jammy/current/jammy-server-cloudimg-amd64.img"}
+export IMAGE_MD5_URL=${IMAGE_MD5_URL:-"https://cloud-images.ubuntu.com/jammy/current/MD5SUMS"}
+export INVENTORY_FILE=${INVENTORY_FILE:-"./env/master/single_node_inventory.yaml"}
diff --git a/trymos/image_build/env/master/single_node_inventory.yaml b/trymos/image_build/env/master/single_node_inventory.yaml
new file mode 100644
index 0000000..7d50aea
--- /dev/null
+++ b/trymos/image_build/env/master/single_node_inventory.yaml
@@ -0,0 +1,33 @@
+---
+all:
+  vars:
+    ansible_ssh_user: ubuntu
+    k0s_worker_on_controller: true
+    k0s_no_taints: true
+    k0s_version: 1.31.8+k0s.0
+    ansible_ssh_common_args: '-o StrictHostKeyChecking=no'
+    rvl_images_build_rockoon: false
+    rvl_images_build_loci: false
+    rvl_images_build_tag: 1.1.4-dev72
+    rvl_images_rockoon: docker-dev-kaas-virtual.docker.mirantis.net/openstack/rockoon
+    rvl_chart_ref_rockoon: https://artifactory.mcp.mirantis.net/artifactory/binary-dev-kaas-local/openstack/helm/rockoon/rockoon-1.1.4-dev72.tgz
+    rvl_dns_nameservers:
+    - 172.18.176.6
+
+  hosts:
+    oc-virtual-lab-server-ctl-01:
+      ansible_host: localhost
+      ansible_connection: local
+
+  children:
+    computes:
+      hosts:
+        oc-virtual-lab-server-ctl-01:
+    controllers:
+      hosts:
+        oc-virtual-lab-server-ctl-01:
+    k8s_controllers:
+      hosts:
+        oc-virtual-lab-server-ctl-01:
+    k8s_workers:
+      hosts:
diff --git a/trymos/image_build/env/mosk-25.1/packer.env b/trymos/image_build/env/mosk-25.1/packer.env
new file mode 100644
index 0000000..96348ec
--- /dev/null
+++ b/trymos/image_build/env/mosk-25.1/packer.env
@@ -0,0 +1,3 @@
+export IMAGE_BASE_URL=${IMAGE_BASE_URL:-"https://cloud-images.ubuntu.com/jammy/current/jammy-server-cloudimg-amd64.img"}
+export IMAGE_MD5_URL=${IMAGE_MD5_URL:-"https://cloud-images.ubuntu.com/jammy/current/MD5SUMS"}
+export INVENTORY_FILE=${INVENTORY_FILE:-"./env/mosk-25.1/single_node_inventory.yaml"}
diff --git a/trymos/image_build/env/mosk-25.1/single_node_inventory.yaml b/trymos/image_build/env/mosk-25.1/single_node_inventory.yaml
new file mode 100644
index 0000000..7401e3b
--- /dev/null
+++ b/trymos/image_build/env/mosk-25.1/single_node_inventory.yaml
@@ -0,0 +1,33 @@
+---
+all:
+  vars:
+    ansible_ssh_user: ubuntu
+    k0s_worker_on_controller: true
+    k0s_no_taints: true
+    k0s_version: 1.27.16+k0s.0
+    ansible_ssh_common_args: '-o StrictHostKeyChecking=no'
+    rvl_images_build_rockoon: false
+    rvl_images_build_loci: false
+    rvl_images_build_tag: 1.0.7
+    rvl_images_rockoon: mirantis.azurecr.io/openstack/rockoon
+    rvl_chart_ref_rockoon: https://binary.mirantis.com/openstack/helm/rockoon/rockoon-1.0.7.tgz
+    rvl_images_public_base_url: mirantis.azurecr.io
+    rvl_binary_public_base_url: https://binary.mirantis.com
+
+  hosts:
+    oc-virtual-lab-server-ctl-01:
+      ansible_host: localhost
+      ansible_connection: local
+
+  children:
+    computes:
+      hosts:
+        oc-virtual-lab-server-ctl-01:
+    controllers:
+      hosts:
+        oc-virtual-lab-server-ctl-01:
+    k8s_controllers:
+      hosts:
+        oc-virtual-lab-server-ctl-01:
+    k8s_workers:
+      hosts:
diff --git a/trymos/image_build/files/etc/cloud/cloud.cfg b/trymos/image_build/files/etc/cloud/cloud.cfg
deleted file mode 100644
index 19f04cd..0000000
--- a/trymos/image_build/files/etc/cloud/cloud.cfg
+++ /dev/null
@@ -1,75 +0,0 @@
-# If this is set, 'root' will not be able to ssh in and they
-# will get a message to login instead as the above $user (ubuntu)
-disable_root: false
-
-# This will cause the set+update hostname module to not operate (if true)
-preserve_hostname: false
-apt_preserve_sources_list: true
-
-# Better let managing of /etc/hosts on salt and others
-# manage_etc_hosts: localhost
-
-# The modules that run in the 'init' stage
-cloud_init_modules:
- - migrator
- - seed_random
- - bootcmd
- - write-files
- - growpart
- - resizefs
- - set_hostname
- - update_hostname
- - update_etc_hosts
- - ca-certs
- - rsyslog
- - users-groups
- - ssh
-
-# The modules that run in the 'config' stage
-cloud_config_modules:
-# Emit the cloud config ready event
-# this can be used by upstart jobs for 'start on cloud-config'.
- - emit_upstart
- - disk_setup
- - mounts
- - ssh-import-id
- - locale
- - set-passwords
- - grub-dpkg
- - apt-pipelining
- - apt-configure
- - landscape
- - timezone
- - puppet
- - chef
- - salt-minion
- - mcollective
- - disable-ec2-metadata
- - runcmd
- - byobu
-
-# The modules that run in the 'final' stage
-cloud_final_modules:
- - rightscale_userdata
- - scripts-vendor
- - scripts-per-once
- - scripts-per-boot
- - scripts-per-instance
- - scripts-user
- - ssh-authkey-fingerprints
- - keys-to-console
- - phone-home
- - final-message
- - power-state-change
-
-datasource_list: [ NoCloud, ConfigDrive, Ec2, OpenStack, OVF, MAAS, None ]
-datasource:
-    Ec2:
-      timeout: 5 # (defaults to 50 seconds)
-      max_wait: 10 # (defaults to 120 seconds)
-    OpenStack:
-      timeout: 5 # (defaults to 50 seconds)
-      max_wait: 10 # (defaults to 120 seconds)
-    MAAS:
-      timeout: 5 # (defaults to 50 seconds)
-      max_wait: 10 # (defaults to 120 seconds)
diff --git a/trymos/image_build/files/etc/cloud/cloud.cfg.d/99_tcp.cfg b/trymos/image_build/files/etc/cloud/cloud.cfg.d/99_tcp.cfg
deleted file mode 100644
index 9bb8b42..0000000
--- a/trymos/image_build/files/etc/cloud/cloud.cfg.d/99_tcp.cfg
+++ /dev/null
@@ -1,29 +0,0 @@
-datasource_list: [ NoCloud, ConfigDrive, Ec2, OpenStack, OVF, MAAS, None ]
-datasource:
-    Ec2:
-      timeout: 5 # (defaults to 50 seconds)
-      max_wait: 10 # (defaults to 120 seconds)
-      strict_id: false
-    OpenStack:
-      timeout: 5 # (defaults to 50 seconds)
-      max_wait: 10 # (defaults to 120 seconds)
-    MAAS:
-      timeout: 5 # (defaults to 50 seconds)
-      max_wait: 10 # (defaults to 120 seconds)
-
-warnings:
-   dsid_missing_source: off
-
-system_info:
-   # This will affect which distro class gets used
-   distro: ubuntu
-   # Other config here will be given to the distro class and/or path classes
-   paths:
-      cloud_dir: /var/lib/cloud/
-      templates_dir: /etc/cloud/templates/
-      upstart_dir: /etc/init/
-   ssh_svcname: ssh
-
-growpart:
-   mode: auto
-   devices: ['/']
diff --git a/trymos/image_build/files/usr/share/trymos/environment/amazon b/trymos/image_build/files/usr/share/trymos/environment/amazon
deleted file mode 100644
index 9daaf17..0000000
--- a/trymos/image_build/files/usr/share/trymos/environment/amazon
+++ /dev/null
@@ -1,4 +0,0 @@
-export NTP_SERVERS=169.254.169.123
-export CEPH_STORAGE_OSD_DEVICE="xvdb"
-export CEPH_CLUSTER_FILE="${RELEASE_OPENSTACK_K8S}/examples/miraceph/aio-ceph_local_drives_openstack.yaml"
-export OPENSTACK_CONTEXT_NAME="aio-core-ceph-local-non-dvr"
diff --git a/trymos/image_build/files/usr/share/trymos/environment/openstack b/trymos/image_build/files/usr/share/trymos/environment/openstack
deleted file mode 100644
index 36680f8..0000000
--- a/trymos/image_build/files/usr/share/trymos/environment/openstack
+++ /dev/null
@@ -1,3 +0,0 @@
-export CEPH_STORAGE_OSD_DEVICE="vdb"
-export CEPH_CLUSTER_FILE="${RELEASE_OPENSTACK_K8S}/examples/miraceph/aio-ceph_local_drives_openstack.yaml"
-export OPENSTACK_CONTEXT_NAME="aio-core-ceph-local-non-dvr"
diff --git a/trymos/image_build/files/usr/share/trymos/environment/virtualbox b/trymos/image_build/files/usr/share/trymos/environment/virtualbox
deleted file mode 100644
index 4a40381..0000000
--- a/trymos/image_build/files/usr/share/trymos/environment/virtualbox
+++ /dev/null
@@ -1,4 +0,0 @@
-export NTP_SERVERS=169.254.169.123
-export CEPH_STORAGE_OSD_DEVICE="sdb"
-export CEPH_CLUSTER_FILE="${RELEASE_OPENSTACK_K8S}/examples/miraceph/aio-ceph_local_drives_openstack.yaml"
-export OPENSTACK_CONTEXT_NAME="aio-core-ceph-local-non-dvr"
diff --git a/trymos/image_build/files/usr/share/trymos/launch.sh b/trymos/image_build/files/usr/share/trymos/launch.sh
deleted file mode 100755
index ff6002f..0000000
--- a/trymos/image_build/files/usr/share/trymos/launch.sh
+++ /dev/null
@@ -1,478 +0,0 @@
-#!/bin/bash
-# Redirect all outputs
-exec > >(tee -i /tmp/bootstart-trymos-output.log) 2>&1
-
-set -x
-# allow access to the local variables from prepare-metadata.py
-set -a
-
-export NODE_TYPE='trymos'
-export NODE_METADATA='{"labels": {"local-volume-provisioner": "enabled", "openstack-compute-node": "enabled", "openstack-control-plane": "enabled", "openstack-gateway": "enabled", "openvswitch": "enabled", "role": "ceph-osd-node"}}'
-
-_ENVIRONMENT_FOLDER="/usr/share/trymos/environment"
-_DEPLOY_FINISHED_LOCK="/var/tmp/trymos-deploy-finished.lock"
-_TRYMOS_INFO_FILE="/etc/trymos-info"
-
-
-export RELEASE_OPENSTACK_K8S=/srv/release-openstack-k8s
-export CERT_REPO_DIR=/srv/certs
-export CSR_CONF="${CERT_REPO_DIR}/api-csr.json"
-export SSL_BARE_NAME='api'
-
-function get_instance_type {
-    if grep "OpenStack" -i /sys/devices/virtual/dmi/id/product_name  -q; then
-        echo "openstack"
-    elif grep -iq 'amazon' /sys/devices/virtual/dmi/id/{product_version,sys_vendor}; then
-        echo "amazon"
-    elif grep "virtualbox" -i /sys/devices/virtual/dmi/id/product_name  -q; then
-        echo "virtualbox"
-    elif grep "QEMU" -i /sys/devices/virtual/dmi/id/sys_vendor  -q; then
-        echo "openstack"
-    else
-        echo "UNKNOWN"
-    fi
-}
-export _INSTANCE_TYPE=$(get_instance_type)
-
-if [[ -f "$_ENVIRONMENT_FOLDER/${_INSTANCE_TYPE}" ]]; then
-    source $_ENVIRONMENT_FOLDER/${_INSTANCE_TYPE}
-else
-    echo "Unknown instance type $_INSTANCE_TYPE"
-    exit 1
-fi
-
-source $_ENVIRONMENT_FOLDER/common
-source /usr/share/trymos/functions
-
-export STORAGE_CLUSTER_CIDR=${CONTROL_NETWORK_CIDR}
-export STORAGE_PUBLIC_CIDR=${CONTROL_NETWORK_CIDR}
-export TUNNEL_INTERFACE=${TUNNEL_INTERFACE:-${DEFAULT_INTERFACE}}
-export LIVE_MIGRATION_INTERFACE=${LIVE_MIGRATION_INTERFACE:-${DEFAULT_INTERFACE}}
-
-if [[ -f ${_DEPLOY_FINISHED_LOCK} ]]; then
-    echo "TryMOS deploy has been performed on this node."
-    echo "If you want to redeploy please delete lock file"
-    echo "${_DEPLOY_FINISHED_LOCK}"
-    exit 1
-fi
-
-function info {
-    local msg="[INFO]: $1"
-    echo "$msg"
-}
-
-function generate_ceph_metadata {
-    mkdir -p /usr/share/metadata
-    cat << EOF > /usr/share/metadata/ceph.yaml
-storageDevices:
-  - name: ${CEPH_STORAGE_OSD_DEVICE}
-    role: hdd
-    sizeGb: 2
-ramGb: 8
-cores: 2
-EOF
-
-}
-
-
-function configure_virt_public_iface {
-
-cat << EOF > /etc/systemd/network/20-public-int.netdev
-[NetDev]
-Name=ens4
-Kind=dummy
-
-[Match]
-Name=ens4
-EOF
-
-cat << EOF > /etc/systemd/network/20-public-int.network
-[Match]
-Name=ens4
-
-[Network]
-Address=10.11.12.23/24
-Broadcast=10.11.12.255
-EOF
-
-    systemctl restart systemd-networkd
-    sleep 15
-
-}
-
-function network_config {
-    PUBLIC_NODE_IP_ADDRESS=${PUBLIC_INTERFACE_IP:-$(ip addr show dev ${PUBLIC_INTERFACE} | grep -Po 'inet \K[\d.]+' | egrep -v "127.0.|172.17")}
-    PUBLIC_NODE_IP_NETMASK=${PUBLIC_INTERFACE_NETMASK:-$(ip addr show dev ${PUBLIC_INTERFACE} | grep -Po 'inet \K[\d.]+\/[\d]+' | egrep -v "127.0.|172.17" | cut -d'/' -f2)}
-
-    local public_interface=${1:-${PUBLIC_INTERFACE}}
-    local cloud_netplan_cfg="/etc/netplan/50-cloud-init.yaml"
-    local match_ip_line
-
-    function install_bridgeutils_retry {
-        DEBIAN_FRONTEND=noninteractive apt -y install bridge-utils
-    }
-    retry 20 "Failed to install bridge-utils" install_bridgeutils_retry
-
-cat << EOF > /etc/systemd/network/10-veth-phy-br.netdev
-[NetDev]
-Name=veth-phy
-Kind=veth
-[Peer]
-Name=veth-br
-EOF
-
-    sed -i 's/.*ethernets:.*/&\n        veth-phy: {}/' ${cloud_netplan_cfg}
-    sed -i 's/.*ethernets:.*/&\n        veth-br: {}/' ${cloud_netplan_cfg}
-    # NOTE(ohryhorov): have to be disabled if PUBLIC_INTERFACE is defined by
-    # cloud-init.
-    sed -i "s/.*ethernets:.*/&\n        ${PUBLIC_INTERFACE}: {}/" ${cloud_netplan_cfg}
-    sed -i "s/${DEFAULT_INTERFACE}:/&\n            critical: true/" ${cloud_netplan_cfg}
-
-    public_address_match_ip_line=$(grep -nm1 "${PUBLIC_NODE_IP_ADDRESS}/${PUBLIC_NODE_IP_NETMASK}" ${cloud_netplan_cfg} | cut -d: -f1)
-    if [ -n "${public_address_match_ip_line}" ] ; then
-        sed -i "$((${public_address_match_ip_line}-1)),$((${public_address_match_ip_line}))d" ${cloud_netplan_cfg}
-    fi
-
-cat << EOF >> ${cloud_netplan_cfg}
-    bridges:
-        br-public:
-            dhcp4: false
-            interfaces:
-            - ${PUBLIC_INTERFACE}
-            - veth-br
-            addresses:
-            - ${PUBLIC_NODE_IP_ADDRESS}/${PUBLIC_NODE_IP_NETMASK}
-EOF
-
-# Remove Tunnel interface from netplan
-if [[ $TUNNEL_INTERFACE_NETPLAN_MANAGE == false ]]; then
-    sed -i "/        ${TUNNEL_INTERFACE}/,/            set-name: ${TUNNEL_INTERFACE}/d" ${cloud_netplan_cfg}
-fi
-
-    netplan --debug apply
-
-    # NOTE(vsaienko): give some time to apply changes
-    sleep 15
-
-# Remove Tunnel interface from netplan
-if [[ $TUNNEL_INTERFACE_NETPLAN_MANAGE == false ]]; then
-    ip addr flush ${TUNNEL_INTERFACE}
-    ip link set ${TUNNEL_INTERFACE} up
-fi
-
-}
-
-function collect_ceph_metadata {
-    local ceph_osd_node
-    ceph_osd_node=$(kubectl get nodes -l role=ceph-osd-node -o jsonpath={.items[?\(@.metadata.name==\"$(hostname)\"\)].metadata.name})
-
-    if [[ -f /usr/share/metadata/ceph.yaml && ${ceph_osd_node} ]]; then
-        HW_METADATA="{\"ceph\": {\"$(hostname)\": \"$(base64 -w 0 /usr/share/metadata/ceph.yaml)\"}}"
-        ceph_store_drive=$(cat /usr/share/metadata/ceph.yaml | egrep '\- name\: ' | awk '{print $3}')
-        if [[ -b /dev/${ceph_store_drive} ]]; then
-            sgdisk --zap-all /dev/${ceph_store_drive}
-        fi
-    fi
-}
-
-function wait_for_pods {
-    local namespace=$1
-    local component_filter=${2:-""}
-    local timeout=${3:-900}
-    local delay=${4:-30}
-
-
-    end=$(date +%s)
-    end=$((end + timeout))
-    while true; do
-        sleep $delay
-        if kubectl get pods --namespace=${namespace} $component_filter 2>&1 |grep -q 'No resources found'; then
-            continue
-        fi
-
-        kubectl get pods --namespace=${namespace} $component_filter -o json | jq -r \
-            '.items[].status.phase' | grep Pending > /dev/null && \
-            PENDING="True" || PENDING="False"
-
-        query='.items[]|select(.status.phase=="Running")'
-        query="$query|.status.containerStatuses[].ready"
-        kubectl get pods --namespace=${namespace} $component_filter -o json | jq -r "$query" | \
-            grep false > /dev/null && READY="False" || READY="True"
-
-        kubectl get jobs --namespace=${namespace} $component_filter -o json | jq -r \
-            '.items[] | .spec.completions == .status.succeeded' | \
-            grep false > /dev/null && JOBR="False" || JOBR="True"
-        [ $PENDING == "False" -a $READY == "True" -a $JOBR == "True" ] && \
-            break || true
-        sleep 5
-        now=$(date +%s)
-        if [ $now -gt $end ] ; then
-            echo "Containers failed to start after $timeout seconds"
-            echo
-            kubectl get pods --namespace ${namespace} $component_filter -o wide
-            echo
-            if [ $PENDING == "True" ] ; then
-                echo "Some pods are in pending state:"
-                kubectl get pods $component_filter --field-selector=status.phase=Pending -n ${namespace} -o wide
-            fi
-            [ $READY == "False" ] && echo "Some pods are not ready"
-            [ $JOBR == "False" ] && echo "Some jobs have not succeeded"
-            exit -1
-        fi
-    done
-}
-
-function wait_for_os_component {
-    local component=$1
-    echo "Waiting for: $component"
-    wait_for_pods openstack "-l application=$component"
-    echo "All resources for component $component Ready"
-}
-
-function install_release_controllers {
-    if [[ -f "${RELEASE_OPENSTACK_K8S}/release/ci/50-nodemaintenance.yaml" ]]; then
-        kubectl apply -f ${RELEASE_OPENSTACK_K8S}/release/ci/50-nodemaintenance.yaml
-    fi
-
-    for release_object in $(ls ${RELEASE_OPENSTACK_K8S}/release/*.yaml -1); do
-        info "Installing ${release_object}"
-        function apply_retry {
-            kubectl apply -f ${release_object}
-        }
-        retry 3 "Can't apply ${release_object}" apply_retry
-    done
-}
-
-function prepare_dns_and_ssl {
-
-    local tmpd
-    tmpd=$(mktemp -d)
-
-    info "Preparing DNS and SSL configuration."
-
-    kubectl apply -f ${RELEASE_OPENSTACK_K8S}/release/ci/30-coredns.yaml
-
-    wait_for_pods coredns "" 600
-
-    EXTERNAL_DNS_IP=$(kubectl -n coredns get services coredns-coredns -o jsonpath='{.status.loadBalancer.ingress[].ip}')
-    if [ -z ${EXTERNAL_DNS_IP} ]; then
-        EXTERNAL_DNS_IP=$(kubectl -n coredns get services coredns-coredns -o jsonpath='{.spec.clusterIP}')
-    fi
-
-    LOCAL_DOMAIN=$(kubectl get configmap -n kube-system coredns -o jsonpath='{.data.Corefile}' | awk '/in-addr.arpa.*{/ {print $2}')
-
-    pushd $tmpd
-    info "Generating SSL certificates."
-    yq w ${CSR_CONF} "CN" "*.openstack.svc.${LOCAL_DOMAIN}" | \
-    yq w -j - "hosts[+]" "*.openstack.svc.${LOCAL_DOMAIN}" | \
-    cfssl gencert -ca=${CERT_REPO_DIR}/ca.crt -ca-key=${CERT_REPO_DIR}/ca.key - | cfssljson -bare ${SSL_BARE_NAME}
-    mv ${tmpd}/${SSL_BARE_NAME}* ${CERT_REPO_DIR}
-    popd
-}
-
-function install_3rd_party {
-
-    info "Installing metallb."
-    kubectl apply -f ${RELEASE_OPENSTACK_K8S}/release/3rd-party/30-metallb.yaml
-    # NOTE(vsaienko): unless PRODX-7154 is resolved, update calico daemonset to satisfy metallb podsecuritypolicy.
-    kubectl -n kube-system patch ds calico-node --type='json' -p='[{"op": "replace", "path": "/spec/template/spec/containers/0/securityContext/allowPrivilegeEscalation", "value": true}]'
-    info "Waiting for MetalLB pods to be deployed"
-    wait_for_pods metallb "" 600
-    kubectl apply -f ${RELEASE_OPENSTACK_K8S}/release/3rd-party/31-metallb-address-pools.yaml
-}
-
-function install_ceph {
-
-    info "Installing Ceph."
-    ceph_osd_name=$(kubectl get nodes -l role=ceph-osd-node -o=jsonpath='{.items[*].metadata.name}')
-    sed -i "s/Values.CHANGE_ME_node1_name/${ceph_osd_name}/" ${CEPH_CLUSTER_FILE}
-    yq w -i ${CEPH_CLUSTER_FILE} "spec.network.clusterNet" ${STORAGE_CLUSTER_CIDR}
-    yq w -i ${CEPH_CLUSTER_FILE} "spec.network.publicNet" ${STORAGE_PUBLIC_CIDR}
-    yq w -i ${CEPH_CLUSTER_FILE} "spec.nodes[0].devices[0].name" $(cat /usr/share/metadata/ceph.yaml | egrep '\- name\: ' | awk '{print $3}')
-    kubectl apply -f ${CEPH_CLUSTER_FILE}
-
-    function get_ceph_secret_retry {
-        kubectl -n openstack-ceph-shared get secrets openstack-ceph-keys
-    }
-    retry 30 "Get secret openstack-ceph-keys failed" get_ceph_secret_retry
-
-# NOTE(okononenko): do scale deploy for ceph-controller here because it not relised as config yet
-# TODO(okononenko): drop this when number of replicas for ceph controller might be specified via chart values. Related Task:PRODX-18984
-    kubectl scale --replicas=1 deployments/ceph-controller -n ceph-lcm-mirantis
-}
-
-function install_openstack {
-    gateway_ip=${PUBLIC_INTERFACE_IP:-$(ip addr show dev br-public | grep -Po 'inet \K[\d.]+' | egrep -v "127.0.|172.17")}
-
-    sed -i "s/cluster.local/${LOCAL_DOMAIN}/g" ${RELEASE_OPENSTACK_K8S}/examples/osdpl/${OPENSTACK_CONTEXT_NAME}.yaml
-
-    yq w -i ${RELEASE_OPENSTACK_K8S}/examples/osdpl/${OPENSTACK_CONTEXT_NAME}.yaml "spec.features.neutron.dns_servers[+]" ${EXTERNAL_DNS_IP}
-    yq w -i -- ${RELEASE_OPENSTACK_K8S}/examples/osdpl/${OPENSTACK_CONTEXT_NAME}.yaml "spec.features.ssl.public_endpoints.ca_cert" "$(cat ${CERT_REPO_DIR}/ca.crt)"
-    yq w -i -- ${RELEASE_OPENSTACK_K8S}/examples/osdpl/${OPENSTACK_CONTEXT_NAME}.yaml "spec.features.ssl.public_endpoints.api_cert" "$(cat ${CERT_REPO_DIR}/api.pem)"
-    yq w -i -- ${RELEASE_OPENSTACK_K8S}/examples/osdpl/${OPENSTACK_CONTEXT_NAME}.yaml "spec.features.ssl.public_endpoints.api_key" "$(cat ${CERT_REPO_DIR}/api-key.pem)"
-    yq w -i ${RELEASE_OPENSTACK_K8S}/examples/osdpl/${OPENSTACK_CONTEXT_NAME}.yaml "spec.features.neutron.floating_network.subnet.gateway" ${gateway_ip}
-    yq w -i ${RELEASE_OPENSTACK_K8S}/examples/osdpl/${OPENSTACK_CONTEXT_NAME}.yaml "spec.features.neutron.tunnel_interface" ${TUNNEL_INTERFACE}
-    yq w -i ${RELEASE_OPENSTACK_K8S}/examples/osdpl/${OPENSTACK_CONTEXT_NAME}.yaml "spec.features.nova.live_migration_interface" ${LIVE_MIGRATION_INTERFACE}
-    yq w -i ${RELEASE_OPENSTACK_K8S}/examples/osdpl/${OPENSTACK_CONTEXT_NAME}.yaml "spec.features.services[+]" "object-storage"
-    #NOTE (ohryhorov): libvirt_type is set to qemu because kvm is not supported by average flavors in AWS
-    yq w -i ${RELEASE_OPENSTACK_K8S}/examples/osdpl/${OPENSTACK_CONTEXT_NAME}.yaml "spec.services.compute.nova.values.conf.nova.libvirt.virt_type" "qemu"
-
-    # NOTE(vsaienko): unless PRODX-24474 is fixed
-    yq w -i ${RELEASE_OPENSTACK_K8S}/examples/osdpl/${OPENSTACK_CONTEXT_NAME}.yaml "spec.services.dashboard.horizon.values.endpoints.dashboard.port.web.default" "4999"
-
-    kubectl apply -f ${RELEASE_OPENSTACK_K8S}/examples/osdpl/${OPENSTACK_CONTEXT_NAME}.yaml
-
-    wait_for_pods openstack "-l app.kubernetes.io/name=cache" 1800
-
-    for component in mariadb rabbitmq memcached openvswitch libvirt keystone glance cinder nova neutron barbican octavia cinder designate heat; do
-        wait_for_os_component $component
-    done
-    wait_for_pods openstack
-
-    kubectl -n openstack create job --from=cronjob/nova-cell-setup  nova-cell-setup-pd01-$(cat /dev/urandom | tr -dc a-z | head -c3)
-    info "Openstack was deployed successfully..."
-}
-
-function configure_public_resolve {
-    local tmpd
-    tmpd=$(mktemp -d)
-
-    openstack_ingress_ip=$(kubectl get services ingress -n openstack -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
-    if [ -z ${openstack_ingress_ip} ]; then
-        openstack_ingress_ip=$(kubectl get services ingress -n openstack -o jsonpath='{.spec.clusterIP}')
-    fi
-
-    kubectl -n kube-system get configmap coredns -o yaml > ${tmpd}/coredns-config.yaml
-    yq w -i ${tmpd}/coredns-config.yaml "data.Corefile" "$(kubectl -n kube-system get configmap coredns -ojsonpath='{.data.Corefile}')
-it.just.works:53 {
-    errors
-    cache 30
-    forward . ${EXTERNAL_DNS_IP}
-}"
-    kubectl -n kube-system apply -f ${tmpd}/coredns-config.yaml
-    kubectl -n kube-system delete pod -l k8s-app=kube-dns
-    sed -i "s/1.2.3.4/${openstack_ingress_ip}/g" -i ${RELEASE_OPENSTACK_K8S}/release/ci/30-coredns.yaml
-    kubectl apply -f ${RELEASE_OPENSTACK_K8S}/release/ci/30-coredns.yaml
-}
-
-function deploy_finished {
-    touch ${_DEPLOY_FINISHED_LOCK}
-}
-
-function write_trymos_info {
-    local distro_info
-    local os_controller_version
-    local ceph_controller_version
-    local os_version
-    local keystone_client_pod
-    local os_admin_username
-    local os_admin_password
-
-    distro_info=$(lsb_release -sd)
-    os_controller_version=$(kubectl -n osh-system get helmbundles openstack-operator -o jsonpath='{.status.releaseStatuses.openstack-operator.version}')
-    ceph_controller_version=$(kubectl -n osh-system get helmbundles ceph-operator -o jsonpath='{.status.releaseStatuses.ceph-operator.version}')
-    os_version=$(kubectl -n openstack get osdpl osh-dev -o jsonpath='{.spec.openstack_version}')
-    keystone_client_pod=$(kubectl -n openstack get pods -l application=keystone,component=client -o jsonpath='{.items[*].metadata.name}')
-    os_admin_username=$(kubectl -n openstack exec -it $keystone_client_pod -- bash -c "echo \$OS_USERNAME")
-    os_admin_password=$(kubectl -n openstack exec -it $keystone_client_pod -- bash -c "echo \$OS_PASSWORD")
-
-    cat << EOF > ${_TRYMOS_INFO_FILE}
-####################################################
-#################### TryMOS ########################
-####################################################
-
-############### Components Versions ################
-
-OS:                       ${distro_info}
-TryMOS Version:           ${TRYMOS_VERSION}
-OpenStack Controller:     ${os_controller_version}
-Ceph Controller:          ${ceph_controller_version}
-OpenStack Version:        ${os_version}
-
-############# UCP access information ###############
-UCP Admin Username:       ${UCP_USERNAME}
-UCP Admin Password:       ${UCP_PASSWORD}
-
-########### OpenStack access information ###########
-OpenStack Admin Username: ${os_admin_username}
-OpenStack Admin Password: ${os_admin_password}
-
-####################################################
-EOF
-
-    cat <<EOF > /etc/update-motd.d/99-trymos-info
-#!/bin/sh
-cat ${_TRYMOS_INFO_FILE}
-EOF
-
-    chmod +x /etc/update-motd.d/99-trymos-info
-
-    cat ${_TRYMOS_INFO_FILE}
-}
-
-function wait_dpkg_finished {
-    local timeout=${1:-600}
-    local delay=${2:-10}
-    dpkg_lock="/var/lib/dpkg/lock-frontend"
-
-    end=$(date +%s)
-    end=$((end + timeout))
-
-    while true; do
-        if lsof ${dpkg_lock} 2>&1 > /dev/null ; then
-            sleep $delay
-            now=$(date +%s)
-            if [ $now -gt $end ] ; then
-                echo "dpkg process still running"
-                exit 1
-            fi
-        else
-            break
-        fi
-    done
-}
-
-case "$NODE_TYPE" in
-    # Please keep the "prepare_metadata_files", "disable-rp-filter", "network_config" and "prepare_network" functions
-    # at the very beginning in the same order.
-    trymos)
-        install_required_packages
-        wait_dpkg_finished
-        disable_rp_filter
-        configure_virt_public_iface
-        network_config
-        prepare_network
-        prepare_docker_config
-        configure_ntp
-        configure_atop
-        workaround_default_forward_policy
-        install_docker
-        swarm_init
-        create_ucp_config
-        cache_images
-        install_ucp
-        download_bundles
-        rm_ucp_config
-        install_kubectl
-        wait_for_node
-        set_node_labels
-        generate_ceph_metadata
-        collect_ceph_metadata
-        configure_contrack
-        disable_iptables_for_bridges
-        nested_virt_config
-        disable_master_taint
-        install_release_controllers
-        prepare_dns_and_ssl
-        install_3rd_party
-        install_ceph
-        install_openstack
-        configure_public_resolve
-        write_trymos_info
-        deploy_finished
-        ;;
-    *)
-        echo "Usage: $0 {trymos}"
-        exit 1
-esac
-
diff --git a/trymos/image_build/scripts/binary.sh b/trymos/image_build/scripts/binary.sh
deleted file mode 100644
index f0d41ea..0000000
--- a/trymos/image_build/scripts/binary.sh
+++ /dev/null
@@ -1,20 +0,0 @@
-set -ex
-
-LOCAL_BINARY_PATH='/usr/bin'
-
-declare -A binaries=(
-    ["${LOCAL_BINARY_PATH}/yq"]="${BINARY_BASE_URL}/openstack/bin/utils/yq/yq-v3.3.2"
-    ["${LOCAL_BINARY_PATH}/cfssl"]="${BINARY_BASE_URL}/openstack/bin/utils/cfssl/cfssl"
-    ["${LOCAL_BINARY_PATH}/cfssljson"]="${BINARY_BASE_URL}/openstack/bin/utils/cfssl/cfssljson"
-)
-
-function download {
-    local url=$1
-    local dst=$2
-    curl --retry 6 --retry-delay 10 -L ${url} -o ${dst}
-    chmod +x ${dst}
-}
-
-for bin in "${!binaries[@]}"; do
-    download ${binaries[${bin}]} ${bin}
-done
diff --git a/trymos/image_build/scripts/launch.sh b/trymos/image_build/scripts/launch.sh
new file mode 100755
index 0000000..5c84200
--- /dev/null
+++ b/trymos/image_build/scripts/launch.sh
@@ -0,0 +1,7 @@
+#!/bin/bash
+
+set -ex
+
+export INVENTORY_FILE=/srv/single_node.yaml
+
+/srv/virtual_lab/install.sh
diff --git a/trymos/image_build/template.json b/trymos/image_build/template.json
index 3c2ee67..be7d8b9 100644
--- a/trymos/image_build/template.json
+++ b/trymos/image_build/template.json
@@ -7,47 +7,34 @@
     "image_base_url": "{{ env `IMAGE_BASE_URL` }}",
     "image_md5_url": "{{ env `IMAGE_MD5_URL` }}",
     "vm_name": "{{ env `VM_NAME` }}",
-    "binary_base_url": "{{ env `BINARY_BASE_URL` }}"
+    "inventory_file": "{{ env `INVENTORY_FILE` }}"
   },
   "provisioners": [
     {
       "type": "shell",
-      "environment_vars": [
-          "BINARY_BASE_URL={{user `binary_base_url`}}"
-      ],
-      "scripts": [
-          "scripts/binary.sh"
+      "inline": [
+         "mkdir -p /usr/share/trymos /srv"
       ]
     },
     {
       "type": "file",
-      "source": "./certs",
+      "source": "{{user `inventory_file`}}",
+      "destination": "/srv/single_node.yaml"
+    },
+    {
+      "type": "file",
+      "source": "../rockoon/charts",
       "destination": "/srv/"
     },
     {
       "type": "file",
-      "source": "./release-openstack-k8s",
+      "source": "../rockoon/virtual_lab",
       "destination": "/srv/"
     },
     {
       "type": "file",
-      "source": "files/usr/share",
-      "destination": "/usr"
-    },
-    {
-      "type": "file",
-      "source": "../../de/heat-templates/scripts/instance_boot.sh",
-      "destination": "/usr/share/trymos/functions"
-    },
-    {
-      "type": "file",
-      "source": "files/etc/",
-      "destination": "/etc"
-    },
-    {
-      "type": "file",
-      "source": "./release-openstack-k8s/trymos/files/environment/common",
-      "destination": "/usr/share/trymos/environment/common"
+      "source": "./scripts/launch.sh",
+      "destination": "/usr/share/trymos/"
     }
   ],
   "builders": [
@@ -56,7 +43,7 @@
       "qemuargs": [
         [
           "-m",
-          "8096M"
+          "4096M"
         ],
         [
           "-smp",