Merge "Slightly increase RAM for cmps (8 to 10Gb)"
diff --git a/de/heat-templates/env/k0s-aio.yaml b/de/heat-templates/env/k0s-aio.yaml
index 056b0ab..eca9149 100644
--- a/de/heat-templates/env/k0s-aio.yaml
+++ b/de/heat-templates/env/k0s-aio.yaml
@@ -21,6 +21,7 @@
   ucp_boot_timeout: 3600
   cluster_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCp0evjOaK8c8SKYK4r2+0BN7g+8YSvQ2n8nFgOURCyvkJqOHi1qPGZmuN0CclYVdVuZiXbWw3VxRbSW3EH736VzgY1U0JmoTiSamzLHaWsXvEIW8VCi7boli539QJP0ikJiBaNAgZILyCrVPN+A6mfqtacs1KXdZ0zlMq1BPtFciR1JTCRcVs5vP2Wwz5QtY2jMIh3aiwkePjMTQPcfmh1TkOlxYu5IbQyZ3G1ahA0mNKI9a0dtF282av/F6pwB/N1R1nEZ/9VtcN2I1mf1NW/tTHEEcTzXYo1R/8K9vlqAN8QvvGLZtZduGviNVNoNWvoxaXxDt8CPv2B2NCdQFZp
   private_floating_network_cidr: '10.11.12.0/24'
+  private_floating_network_gateway: '10.11.12.1'
   private_floating_interface: 'ens4'
   tunnel_interface: 'ens3'
   ucp_metadata: {"labels": {"openstack-control-plane":"enabled","openstack-compute-node":"enabled","openvswitch":"enabled", "openstack-gateway":"enabled","role":"ceph-osd-node","local-volume-provisioner": "enabled", "openstack-compute-node":"enabled","openvswitch":"enabled", "role":"ceph-osd-node"}}
@@ -33,6 +34,9 @@
   workers_flavor: 'system.compact.openstack.control.ephemeral'
   cmps_flavor: 'mosk.s.compute.ephemeral'
   storage_frontend_network_cidr: '10.12.1.0/24'
+  storage_frontend_network_gateway: '10.12.1.1'
+  storage_frontend_network_ipam_pool_start: '10.12.1.3'
+  storage_frontend_network_ipam_pool_end: '10.12.1.254'
   storage_backend_network_cidr: '10.12.0.0/24'
   kubernetes_installer: k0s
   single_node: 'true'
diff --git a/de/heat-templates/env/k0s-mstr1-wrkr3-cmp0-gtw0.yaml b/de/heat-templates/env/k0s-mstr1-wrkr3-cmp0-gtw0.yaml
index fce638f..0c8b0e8 100644
--- a/de/heat-templates/env/k0s-mstr1-wrkr3-cmp0-gtw0.yaml
+++ b/de/heat-templates/env/k0s-mstr1-wrkr3-cmp0-gtw0.yaml
@@ -30,6 +30,9 @@
   workers_flavor: 'system.compact.openstack.control.ephemeral'
   cmps_flavor: 'mosk.s.compute.ephemeral'
   storage_frontend_network_cidr: '10.12.1.0/24'
+  storage_frontend_network_gateway: '10.12.1.1'
+  storage_frontend_network_ipam_pool_start: '10.12.1.3'
+  storage_frontend_network_ipam_pool_end: '10.12.1.254'
   storage_backend_network_cidr: '10.12.0.0/24'
   kubernetes_installer: k0s
   hardware_metadata: |
diff --git a/de/heat-templates/env/mstr1-wrkr3-cmp0-gtw0-vbmc2.yaml b/de/heat-templates/env/mstr1-wrkr3-cmp0-gtw0-vbmc2.yaml
index 29c32c3..5db0db5 100644
--- a/de/heat-templates/env/mstr1-wrkr3-cmp0-gtw0-vbmc2.yaml
+++ b/de/heat-templates/env/mstr1-wrkr3-cmp0-gtw0-vbmc2.yaml
@@ -21,6 +21,7 @@
   ucp_boot_timeout: 3600
   cluster_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCp0evjOaK8c8SKYK4r2+0BN7g+8YSvQ2n8nFgOURCyvkJqOHi1qPGZmuN0CclYVdVuZiXbWw3VxRbSW3EH736VzgY1U0JmoTiSamzLHaWsXvEIW8VCi7boli539QJP0ikJiBaNAgZILyCrVPN+A6mfqtacs1KXdZ0zlMq1BPtFciR1JTCRcVs5vP2Wwz5QtY2jMIh3aiwkePjMTQPcfmh1TkOlxYu5IbQyZ3G1ahA0mNKI9a0dtF282av/F6pwB/N1R1nEZ/9VtcN2I1mf1NW/tTHEEcTzXYo1R/8K9vlqAN8QvvGLZtZduGviNVNoNWvoxaXxDt8CPv2B2NCdQFZp
   private_floating_network_cidr: '10.11.12.0/24'
+  private_floating_network_gateway: '10.11.12.1'
   private_floating_interface: 'ens4'
   tunnel_interface: 'ens8'
   worker_metadata: {"labels": {"openstack-control-plane":"enabled","openvswitch":"enabled","openstack-gateway": "enabled","role":"ceph-osd-node","local-volume-provisioner": "enabled"}}
@@ -39,6 +40,9 @@
   cmps_flavor: 'mosk.s.compute.ephemeral'
   vbmcs_flavor: 'system.compact.openstack.control'
   storage_frontend_network_cidr: '10.12.1.0/24'
+  storage_frontend_network_gateway: '10.12.1.1'
+  storage_frontend_network_ipam_pool_start: '10.12.1.3'
+  storage_frontend_network_ipam_pool_end: '10.12.1.254'
   storage_backend_network_cidr: '10.12.0.0/24'
   hardware_metadata: |
     '00:00:00:00:00:00':
diff --git a/de/heat-templates/env/mstr1-wrkr3-cmp0-gtw0.yaml b/de/heat-templates/env/mstr1-wrkr3-cmp0-gtw0.yaml
index 623f130..4e491a9 100644
--- a/de/heat-templates/env/mstr1-wrkr3-cmp0-gtw0.yaml
+++ b/de/heat-templates/env/mstr1-wrkr3-cmp0-gtw0.yaml
@@ -20,6 +20,7 @@
   ucp_boot_timeout: 3600
   cluster_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCp0evjOaK8c8SKYK4r2+0BN7g+8YSvQ2n8nFgOURCyvkJqOHi1qPGZmuN0CclYVdVuZiXbWw3VxRbSW3EH736VzgY1U0JmoTiSamzLHaWsXvEIW8VCi7boli539QJP0ikJiBaNAgZILyCrVPN+A6mfqtacs1KXdZ0zlMq1BPtFciR1JTCRcVs5vP2Wwz5QtY2jMIh3aiwkePjMTQPcfmh1TkOlxYu5IbQyZ3G1ahA0mNKI9a0dtF282av/F6pwB/N1R1nEZ/9VtcN2I1mf1NW/tTHEEcTzXYo1R/8K9vlqAN8QvvGLZtZduGviNVNoNWvoxaXxDt8CPv2B2NCdQFZp
   private_floating_network_cidr: '10.11.12.0/24'
+  private_floating_network_gateway: '10.11.12.1'
   private_floating_interface: 'ens4'
   tunnel_interface: 'ens8'
   worker_metadata: {"labels": {"openstack-control-plane":"enabled","openstack-compute-node":"enabled","openvswitch":"enabled", "openstack-gateway":"enabled","role":"ceph-osd-node","local-volume-provisioner": "enabled"}}
@@ -30,6 +31,9 @@
   workers_flavor: 'system.compact.openstack.control.ephemeral'
   cmps_flavor: 'mosk.s.compute.ephemeral'
   storage_frontend_network_cidr: '10.12.1.0/24'
+  storage_frontend_network_gateway: '10.12.1.1'
+  storage_frontend_network_ipam_pool_start: '10.12.1.3'
+  storage_frontend_network_ipam_pool_end: '10.12.1.254'
   storage_backend_network_cidr: '10.12.0.0/24'
   hardware_metadata: |
     '00:00:00:00:00:00':
diff --git a/de/heat-templates/env/mstr1-wrkr3-cmp2-acmp2-gtw0.yaml b/de/heat-templates/env/mstr1-wrkr3-cmp2-acmp2-gtw0.yaml
index 1f5ac99..f382d9e 100644
--- a/de/heat-templates/env/mstr1-wrkr3-cmp2-acmp2-gtw0.yaml
+++ b/de/heat-templates/env/mstr1-wrkr3-cmp2-acmp2-gtw0.yaml
@@ -22,6 +22,7 @@
   ucp_boot_timeout: 3600
   cluster_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCp0evjOaK8c8SKYK4r2+0BN7g+8YSvQ2n8nFgOURCyvkJqOHi1qPGZmuN0CclYVdVuZiXbWw3VxRbSW3EH736VzgY1U0JmoTiSamzLHaWsXvEIW8VCi7boli539QJP0ikJiBaNAgZILyCrVPN+A6mfqtacs1KXdZ0zlMq1BPtFciR1JTCRcVs5vP2Wwz5QtY2jMIh3aiwkePjMTQPcfmh1TkOlxYu5IbQyZ3G1ahA0mNKI9a0dtF282av/F6pwB/N1R1nEZ/9VtcN2I1mf1NW/tTHEEcTzXYo1R/8K9vlqAN8QvvGLZtZduGviNVNoNWvoxaXxDt8CPv2B2NCdQFZp
   private_floating_network_cidr: '10.11.12.0/24'
+  private_floating_network_gateway: '10.11.12.1'
   private_floating_interface: 'ens4'
   tunnel_interface: 'ens8'
   worker_metadata: {"labels": {"openstack-control-plane":"enabled","openvswitch":"enabled","openstack-gateway":"enabled","local-volume-provisioner": "enabled"}}
@@ -34,6 +35,9 @@
   cmps_flavor: 'mosk.s.compute.ephemeral'
   acmps_flavor: 'mosk.s.compute.ephemeral.numa'
   storage_frontend_network_cidr: '10.12.1.0/24'
+  storage_frontend_network_gateway: '10.12.1.1'
+  storage_frontend_network_ipam_pool_start: '10.12.1.3'
+  storage_frontend_network_ipam_pool_end: '10.12.1.254'
   storage_backend_network_cidr: '10.12.0.0/24'
   # Enable only 1 size of huge pages because of https://mirantis.jira.com/browse/PRODX-8809
   huge_pages: '0,5000'
diff --git a/de/heat-templates/env/mstr1-wrkr3-cmp2-acmp2-ntw3.yaml b/de/heat-templates/env/mstr1-wrkr3-cmp2-acmp2-ntw3.yaml
index 01dfb94..911b071 100644
--- a/de/heat-templates/env/mstr1-wrkr3-cmp2-acmp2-ntw3.yaml
+++ b/de/heat-templates/env/mstr1-wrkr3-cmp2-acmp2-ntw3.yaml
@@ -43,6 +43,9 @@
   cmps_flavor: 'mosk.s.compute.ephemeral'
   acmps_flavor: 'mosk.s.compute.ephemeral.numa'
   storage_frontend_network_cidr: '10.12.1.0/24'
+  storage_frontend_network_gateway: '10.12.1.1'
+  storage_frontend_network_ipam_pool_start: '10.12.1.3'
+  storage_frontend_network_ipam_pool_end: '10.12.1.254'
   storage_backend_network_cidr: '10.12.0.0/24'
   # Enable only 1 size of huge pages because of https://mirantis.jira.com/browse/PRODX-8809
   huge_pages: '0,5000'
diff --git a/de/heat-templates/env/mstr1-wrkr3-cmp2-gtw0-lma3.yaml b/de/heat-templates/env/mstr1-wrkr3-cmp2-gtw0-lma3.yaml
index 06ca333..8bee567 100644
--- a/de/heat-templates/env/mstr1-wrkr3-cmp2-gtw0-lma3.yaml
+++ b/de/heat-templates/env/mstr1-wrkr3-cmp2-gtw0-lma3.yaml
@@ -21,6 +21,7 @@
   ucp_boot_timeout: 3600
   cluster_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCp0evjOaK8c8SKYK4r2+0BN7g+8YSvQ2n8nFgOURCyvkJqOHi1qPGZmuN0CclYVdVuZiXbWw3VxRbSW3EH736VzgY1U0JmoTiSamzLHaWsXvEIW8VCi7boli539QJP0ikJiBaNAgZILyCrVPN+A6mfqtacs1KXdZ0zlMq1BPtFciR1JTCRcVs5vP2Wwz5QtY2jMIh3aiwkePjMTQPcfmh1TkOlxYu5IbQyZ3G1ahA0mNKI9a0dtF282av/F6pwB/N1R1nEZ/9VtcN2I1mf1NW/tTHEEcTzXYo1R/8K9vlqAN8QvvGLZtZduGviNVNoNWvoxaXxDt8CPv2B2NCdQFZp
   private_floating_network_cidr: '10.11.12.0/24'
+  private_floating_network_gateway: '10.11.12.1'
   private_floating_interface: 'ens4'
   tunnel_interface: 'ens8'
   worker_metadata: {"labels": {"openstack-control-plane":"enabled","openvswitch":"enabled","openstack-gateway":"enabled","local-volume-provisioner": "enabled"}}
@@ -32,6 +33,9 @@
   workers_flavor: 'mosk.l.control.ephemeral'
   cmps_flavor: 'mosk.s.compute.ephemeral'
   storage_frontend_network_cidr: '10.12.1.0/24'
+  storage_frontend_network_gateway: '10.12.1.1'
+  storage_frontend_network_ipam_pool_start: '10.12.1.3'
+  storage_frontend_network_ipam_pool_end: '10.12.1.254'
   storage_backend_network_cidr: '10.12.0.0/24'
   hardware_metadata: |
     '00:00:00:00:00:00':
diff --git a/de/heat-templates/env/mstr1-wrkr3-cmp2-gtw0-vbmc5.yaml b/de/heat-templates/env/mstr1-wrkr3-cmp2-gtw0-vbmc5.yaml
index 735d29a..ba93aaa 100644
--- a/de/heat-templates/env/mstr1-wrkr3-cmp2-gtw0-vbmc5.yaml
+++ b/de/heat-templates/env/mstr1-wrkr3-cmp2-gtw0-vbmc5.yaml
@@ -12,7 +12,7 @@
   image: bionic-server-cloudimg-amd64-20190612
   public_net_id: public
   masters_size: 0
-  worker_size: 5
+  worker_size: 3
   cmp_size: 2
   gtw_size: 0
   lma_size: 0
@@ -21,6 +21,7 @@
   ucp_boot_timeout: 3600
   cluster_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCp0evjOaK8c8SKYK4r2+0BN7g+8YSvQ2n8nFgOURCyvkJqOHi1qPGZmuN0CclYVdVuZiXbWw3VxRbSW3EH736VzgY1U0JmoTiSamzLHaWsXvEIW8VCi7boli539QJP0ikJiBaNAgZILyCrVPN+A6mfqtacs1KXdZ0zlMq1BPtFciR1JTCRcVs5vP2Wwz5QtY2jMIh3aiwkePjMTQPcfmh1TkOlxYu5IbQyZ3G1ahA0mNKI9a0dtF282av/F6pwB/N1R1nEZ/9VtcN2I1mf1NW/tTHEEcTzXYo1R/8K9vlqAN8QvvGLZtZduGviNVNoNWvoxaXxDt8CPv2B2NCdQFZp
   private_floating_network_cidr: '10.11.12.0/24'
+  private_floating_network_gateway: '10.11.12.1'
   private_floating_interface: 'ens4'
   tunnel_interface: 'ens8'
   worker_metadata: {"labels": {"openstack-control-plane":"enabled","openvswitch":"enabled","openstack-gateway": "enabled","local-volume-provisioner": "enabled"}}
@@ -42,6 +43,9 @@
   cmps_flavor: 'mosk.s.compute.ephemeral'
   vbmcs_flavor: 'system.compact.openstack.control'
   storage_frontend_network_cidr: '10.12.1.0/24'
+  storage_frontend_network_gateway: '10.12.1.1'
+  storage_frontend_network_ipam_pool_start: '10.12.1.3'
+  storage_frontend_network_ipam_pool_end: '10.12.1.254'
   storage_backend_network_cidr: '10.12.0.0/24'
   hardware_metadata: |
     '00:00:00:00:00:00':
diff --git a/de/heat-templates/env/mstr1-wrkr3-cmp2-gtw0-vsrx1.yaml b/de/heat-templates/env/mstr1-wrkr3-cmp2-gtw0-vsrx1.yaml
index 48852fe..0b2917f 100644
--- a/de/heat-templates/env/mstr1-wrkr3-cmp2-gtw0-vsrx1.yaml
+++ b/de/heat-templates/env/mstr1-wrkr3-cmp2-gtw0-vsrx1.yaml
@@ -37,6 +37,9 @@
   workers_flavor: 'system.compact.openstack.control.ephemeral'
   cmps_flavor: 'mosk.s.compute.ephemeral'
   storage_frontend_network_cidr: '10.12.1.0/24'
+  storage_frontend_network_gateway: '10.12.1.1'
+  storage_frontend_network_ipam_pool_start: '10.12.1.3'
+  storage_frontend_network_ipam_pool_end: '10.12.1.254'
   storage_backend_network_cidr: '10.12.0.0/24'
   # Simulate changed default port for docker overlay vxlan
   # https://mirantis.jira.com/browse/PRODX-11679
diff --git a/de/heat-templates/env/mstr1-wrkr3-cmp2-gtw0.yaml b/de/heat-templates/env/mstr1-wrkr3-cmp2-gtw0.yaml
index 612d399..f1c26f1 100644
--- a/de/heat-templates/env/mstr1-wrkr3-cmp2-gtw0.yaml
+++ b/de/heat-templates/env/mstr1-wrkr3-cmp2-gtw0.yaml
@@ -20,6 +20,7 @@
   ucp_boot_timeout: 3600
   cluster_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCp0evjOaK8c8SKYK4r2+0BN7g+8YSvQ2n8nFgOURCyvkJqOHi1qPGZmuN0CclYVdVuZiXbWw3VxRbSW3EH736VzgY1U0JmoTiSamzLHaWsXvEIW8VCi7boli539QJP0ikJiBaNAgZILyCrVPN+A6mfqtacs1KXdZ0zlMq1BPtFciR1JTCRcVs5vP2Wwz5QtY2jMIh3aiwkePjMTQPcfmh1TkOlxYu5IbQyZ3G1ahA0mNKI9a0dtF282av/F6pwB/N1R1nEZ/9VtcN2I1mf1NW/tTHEEcTzXYo1R/8K9vlqAN8QvvGLZtZduGviNVNoNWvoxaXxDt8CPv2B2NCdQFZp
   private_floating_network_cidr: '10.11.12.0/24'
+  private_floating_network_gateway: '10.11.12.1'
   private_floating_interface: 'ens4'
   tunnel_interface: 'ens8'
   worker_metadata: {"labels": {"openstack-control-plane":"enabled","openvswitch":"enabled","openstack-gateway": "enabled","local-volume-provisioner": "enabled"}}
@@ -30,6 +31,9 @@
   workers_flavor: 'mosk.l.control.ephemeral'
   cmps_flavor: 'mosk.s.compute.ephemeral'
   storage_frontend_network_cidr: '10.12.1.0/24'
+  storage_frontend_network_gateway: '10.12.1.1'
+  storage_frontend_network_ipam_pool_start: '10.12.1.3'
+  storage_frontend_network_ipam_pool_end: '10.12.1.254'
   storage_backend_network_cidr: '10.12.0.0/24'
   cmp_lvm_loop_device_size: 50
   cmp_cinder_lvm_loop_device_size: 50
diff --git a/de/heat-templates/env/mstr1-wrkr3-cmp2-ntw3-lma3.yaml b/de/heat-templates/env/mstr1-wrkr3-cmp2-ntw3-lma3.yaml
index b917c7d..f5ae9a3 100644
--- a/de/heat-templates/env/mstr1-wrkr3-cmp2-ntw3-lma3.yaml
+++ b/de/heat-templates/env/mstr1-wrkr3-cmp2-ntw3-lma3.yaml
@@ -42,6 +42,9 @@
   workers_flavor: 'mosk.l.control.ephemeral'
   cmps_flavor: 'mosk.s.compute.ephemeral'
   storage_frontend_network_cidr: '10.12.1.0/24'
+  storage_frontend_network_gateway: '10.12.1.1'
+  storage_frontend_network_ipam_pool_start: '10.12.1.3'
+  storage_frontend_network_ipam_pool_end: '10.12.1.254'
   storage_backend_network_cidr: '10.12.0.0/24'
   hardware_metadata: |
     '00:00:00:00:00:00':
diff --git a/de/heat-templates/env/mstr1-wrkr3-cmp2-ntw3-vbmc2.yaml b/de/heat-templates/env/mstr1-wrkr3-cmp2-ntw3-vbmc2.yaml
index f036e43..01ef904 100644
--- a/de/heat-templates/env/mstr1-wrkr3-cmp2-ntw3-vbmc2.yaml
+++ b/de/heat-templates/env/mstr1-wrkr3-cmp2-ntw3-vbmc2.yaml
@@ -49,6 +49,9 @@
   cmps_flavor: 'mosk.s.compute.ephemeral'
   vbmcs_flavor: 'system.compact.openstack.control'
   storage_frontend_network_cidr: '10.12.1.0/24'
+  storage_frontend_network_gateway: '10.12.1.1'
+  storage_frontend_network_ipam_pool_start: '10.12.1.3'
+  storage_frontend_network_ipam_pool_end: '10.12.1.254'
   storage_backend_network_cidr: '10.12.0.0/24'
   hardware_metadata: |
     '00:00:00:00:00:00':
diff --git a/de/heat-templates/env/mstr1-wrkr3-cmp2-ntw3-vmx.yaml b/de/heat-templates/env/mstr1-wrkr3-cmp2-ntw3-vmx.yaml
index b160edc..fafe70e 100644
--- a/de/heat-templates/env/mstr1-wrkr3-cmp2-ntw3-vmx.yaml
+++ b/de/heat-templates/env/mstr1-wrkr3-cmp2-ntw3-vmx.yaml
@@ -54,6 +54,9 @@
   workers_flavor: 'system.compact.openstack.control.ephemeral'
   cmps_flavor: 'mosk.s.compute.ephemeral'
   storage_frontend_network_cidr: '10.12.1.0/24'
+  storage_frontend_network_gateway: '10.12.1.1'
+  storage_frontend_network_ipam_pool_start: '10.12.1.3'
+  storage_frontend_network_ipam_pool_end: '10.12.1.254'
   storage_backend_network_cidr: '10.12.0.0/24'
   hardware_metadata: |
     '00:00:00:00:00:00':
diff --git a/de/heat-templates/env/mstr1-wrkr3-cmp2-ntw3.yaml b/de/heat-templates/env/mstr1-wrkr3-cmp2-ntw3.yaml
index df8738d..c42a8d5 100644
--- a/de/heat-templates/env/mstr1-wrkr3-cmp2-ntw3.yaml
+++ b/de/heat-templates/env/mstr1-wrkr3-cmp2-ntw3.yaml
@@ -40,6 +40,9 @@
   workers_flavor: 'system.compact.openstack.control.ephemeral'
   cmps_flavor: 'mosk.s.compute.ephemeral.numa'
   storage_frontend_network_cidr: '10.12.1.0/24'
+  storage_frontend_network_gateway: '10.12.1.1'
+  storage_frontend_network_ipam_pool_start: '10.12.1.3'
+  storage_frontend_network_ipam_pool_end: '10.12.1.254'
   storage_backend_network_cidr: '10.12.0.0/24'
   hardware_metadata: |
     '00:00:00:00:00:00':
diff --git a/de/heat-templates/env/mstr1-wrkr3-cmp3-gtw0.yaml b/de/heat-templates/env/mstr1-wrkr3-cmp3-gtw0.yaml
index ffb67ac..7202563 100644
--- a/de/heat-templates/env/mstr1-wrkr3-cmp3-gtw0.yaml
+++ b/de/heat-templates/env/mstr1-wrkr3-cmp3-gtw0.yaml
@@ -20,6 +20,7 @@
   ucp_boot_timeout: 3600
   cluster_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCp0evjOaK8c8SKYK4r2+0BN7g+8YSvQ2n8nFgOURCyvkJqOHi1qPGZmuN0CclYVdVuZiXbWw3VxRbSW3EH736VzgY1U0JmoTiSamzLHaWsXvEIW8VCi7boli539QJP0ikJiBaNAgZILyCrVPN+A6mfqtacs1KXdZ0zlMq1BPtFciR1JTCRcVs5vP2Wwz5QtY2jMIh3aiwkePjMTQPcfmh1TkOlxYu5IbQyZ3G1ahA0mNKI9a0dtF282av/F6pwB/N1R1nEZ/9VtcN2I1mf1NW/tTHEEcTzXYo1R/8K9vlqAN8QvvGLZtZduGviNVNoNWvoxaXxDt8CPv2B2NCdQFZp
   private_floating_network_cidr: '10.11.12.0/24'
+  private_floating_network_gateway: '10.11.12.1'
   private_floating_interface: 'ens4'
   tunnel_interface: 'ens8'
   worker_metadata: {"labels": {"openstack-control-plane":"enabled","openvswitch":"enabled","openstack-gateway":"enabled","local-volume-provisioner": "enabled"}}
@@ -30,6 +31,9 @@
   workers_flavor: 'mosk.l.control.ephemeral'
   cmps_flavor: 'mosk.s.compute.ephemeral'
   storage_frontend_network_cidr: '10.12.1.0/24'
+  storage_frontend_network_gateway: '10.12.1.1'
+  storage_frontend_network_ipam_pool_start: '10.12.1.3'
+  storage_frontend_network_ipam_pool_end: '10.12.1.254'
   storage_backend_network_cidr: '10.12.0.0/24'
   cmp_lvm_loop_device_size: 50
   cmp_cinder_lvm_loop_device_size: 50
diff --git a/de/heat-templates/env/mstr1-wrkr3-cmp3-ntw3.yaml b/de/heat-templates/env/mstr1-wrkr3-cmp3-ntw3.yaml
index 671bd4e..4978f77 100644
--- a/de/heat-templates/env/mstr1-wrkr3-cmp3-ntw3.yaml
+++ b/de/heat-templates/env/mstr1-wrkr3-cmp3-ntw3.yaml
@@ -40,6 +40,9 @@
   workers_flavor: 'system.compact.openstack.control.ephemeral'
   cmps_flavor: 'mosk.s.compute.ephemeral'
   storage_frontend_network_cidr: '10.12.1.0/24'
+  storage_frontend_network_gateway: '10.12.1.1'
+  storage_frontend_network_ipam_pool_start: '10.12.1.3'
+  storage_frontend_network_ipam_pool_end: '10.12.1.254'
   storage_backend_network_cidr: '10.12.0.0/24'
   hardware_metadata: |
     '00:00:00:00:00:00':
diff --git a/de/heat-templates/env/mstr1-wrkr3-cmp5-gtw0.yaml b/de/heat-templates/env/mstr1-wrkr3-cmp5-gtw0.yaml
index 51e676e..450f5a8 100644
--- a/de/heat-templates/env/mstr1-wrkr3-cmp5-gtw0.yaml
+++ b/de/heat-templates/env/mstr1-wrkr3-cmp5-gtw0.yaml
@@ -20,6 +20,7 @@
   ucp_boot_timeout: 3600
   cluster_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCp0evjOaK8c8SKYK4r2+0BN7g+8YSvQ2n8nFgOURCyvkJqOHi1qPGZmuN0CclYVdVuZiXbWw3VxRbSW3EH736VzgY1U0JmoTiSamzLHaWsXvEIW8VCi7boli539QJP0ikJiBaNAgZILyCrVPN+A6mfqtacs1KXdZ0zlMq1BPtFciR1JTCRcVs5vP2Wwz5QtY2jMIh3aiwkePjMTQPcfmh1TkOlxYu5IbQyZ3G1ahA0mNKI9a0dtF282av/F6pwB/N1R1nEZ/9VtcN2I1mf1NW/tTHEEcTzXYo1R/8K9vlqAN8QvvGLZtZduGviNVNoNWvoxaXxDt8CPv2B2NCdQFZp
   private_floating_network_cidr: '10.11.12.0/24'
+  private_floating_network_gateway: '10.11.12.1'
   private_floating_interface: 'ens4'
   tunnel_interface: 'ens8'
   worker_metadata: {"labels": {"openstack-control-plane":"enabled","openvswitch":"enabled","openstack-gateway":"enabled","local-volume-provisioner": "enabled"}}
@@ -30,6 +31,9 @@
   workers_flavor: 'mosk.l.control.ephemeral'
   cmps_flavor: 'mosk.s.compute.ephemeral'
   storage_frontend_network_cidr: '10.12.1.0/24'
+  storage_frontend_network_gateway: '10.12.1.1'
+  storage_frontend_network_ipam_pool_start: '10.12.1.3'
+  storage_frontend_network_ipam_pool_end: '10.12.1.254'
   storage_backend_network_cidr: '10.12.0.0/24'
   cmp_lvm_loop_device_size: 50
   cmp_cinder_lvm_loop_device_size: 50
diff --git a/de/heat-templates/env/mstr1-wrkr5-cmp2-gtw0-vsrx1.yaml b/de/heat-templates/env/mstr1-wrkr5-cmp2-gtw0-vsrx1.yaml
index e8f89c2..dda7de4 100644
--- a/de/heat-templates/env/mstr1-wrkr5-cmp2-gtw0-vsrx1.yaml
+++ b/de/heat-templates/env/mstr1-wrkr5-cmp2-gtw0-vsrx1.yaml
@@ -37,6 +37,9 @@
   workers_flavor: 'system.compact.openstack.control.ephemeral'
   cmps_flavor: 'mosk.s.compute.ephemeral'
   storage_frontend_network_cidr: '10.12.1.0/24'
+  storage_frontend_network_gateway: '10.12.1.1'
+  storage_frontend_network_ipam_pool_start: '10.12.1.3'
+  storage_frontend_network_ipam_pool_end: '10.12.1.254'
   storage_backend_network_cidr: '10.12.0.0/24'
   # Simulate changed default port for docker overlay vxlan
   # https://mirantis.jira.com/browse/PRODX-11679
diff --git a/de/heat-templates/env/mstr1-wrkr5-cmp2-ntw3.yaml b/de/heat-templates/env/mstr1-wrkr5-cmp2-ntw3.yaml
index 73c9439..caeaa0e 100644
--- a/de/heat-templates/env/mstr1-wrkr5-cmp2-ntw3.yaml
+++ b/de/heat-templates/env/mstr1-wrkr5-cmp2-ntw3.yaml
@@ -40,6 +40,9 @@
   workers_flavor: 'system.compact.openstack.control.ephemeral'
   cmps_flavor: 'mosk.s.compute.ephemeral.numa'
   storage_frontend_network_cidr: '10.12.1.0/24'
+  storage_frontend_network_gateway: '10.12.1.1'
+  storage_frontend_network_ipam_pool_start: '10.12.1.3'
+  storage_frontend_network_ipam_pool_end: '10.12.1.254'
   storage_backend_network_cidr: '10.12.0.0/24'
   hardware_metadata: |
     '00:00:00:00:00:00':
diff --git a/de/heat-templates/env/mstr1-wrkr5-cmp3-cmpgw2-ntw3.yaml b/de/heat-templates/env/mstr1-wrkr5-cmp3-cmpgw2-ntw3.yaml
index a763cae..948c627 100644
--- a/de/heat-templates/env/mstr1-wrkr5-cmp3-cmpgw2-ntw3.yaml
+++ b/de/heat-templates/env/mstr1-wrkr5-cmp3-cmpgw2-ntw3.yaml
@@ -42,6 +42,9 @@
   workers_flavor: 'system.compact.openstack.control.ephemeral'
   cmps_flavor: 'mosk.s.compute.ephemeral'
   storage_frontend_network_cidr: '10.12.1.0/24'
+  storage_frontend_network_gateway: '10.12.1.1'
+  storage_frontend_network_ipam_pool_start: '10.12.1.3'
+  storage_frontend_network_ipam_pool_end: '10.12.1.254'
   storage_backend_network_cidr: '10.12.0.0/24'
   hardware_metadata: |
     '00:00:00:00:00:00':
diff --git a/de/heat-templates/env/mstr1-wrkr5-cmp3-gtw0.yaml b/de/heat-templates/env/mstr1-wrkr5-cmp3-gtw0.yaml
index 58f9717..8387f60 100644
--- a/de/heat-templates/env/mstr1-wrkr5-cmp3-gtw0.yaml
+++ b/de/heat-templates/env/mstr1-wrkr5-cmp3-gtw0.yaml
@@ -20,6 +20,7 @@
   ucp_boot_timeout: 3600
   cluster_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCp0evjOaK8c8SKYK4r2+0BN7g+8YSvQ2n8nFgOURCyvkJqOHi1qPGZmuN0CclYVdVuZiXbWw3VxRbSW3EH736VzgY1U0JmoTiSamzLHaWsXvEIW8VCi7boli539QJP0ikJiBaNAgZILyCrVPN+A6mfqtacs1KXdZ0zlMq1BPtFciR1JTCRcVs5vP2Wwz5QtY2jMIh3aiwkePjMTQPcfmh1TkOlxYu5IbQyZ3G1ahA0mNKI9a0dtF282av/F6pwB/N1R1nEZ/9VtcN2I1mf1NW/tTHEEcTzXYo1R/8K9vlqAN8QvvGLZtZduGviNVNoNWvoxaXxDt8CPv2B2NCdQFZp
   private_floating_network_cidr: '10.11.12.0/24'
+  private_floating_network_gateway: '10.11.12.1'
   private_floating_interface: 'ens4'
   tunnel_interface: 'ens8'
   worker_metadata: {"labels": {"openstack-control-plane":"enabled","openvswitch":"enabled","openstack-gateway":"enabled","local-volume-provisioner": "enabled"}}
@@ -28,6 +29,9 @@
   workers_flavor: 'mosk.s.control.ephemeral'
   cmps_flavor: 'mosk.s.compute.ephemeral'
   storage_frontend_network_cidr: '10.12.1.0/24'
+  storage_frontend_network_gateway: '10.12.1.1'
+  storage_frontend_network_ipam_pool_start: '10.12.1.3'
+  storage_frontend_network_ipam_pool_end: '10.12.1.254'
   storage_backend_network_cidr: '10.12.0.0/24'
   cmp_lvm_loop_device_size: 50
   cmp_cinder_lvm_loop_device_size: 50
diff --git a/de/heat-templates/fragments/NetworkAccVM.yaml b/de/heat-templates/fragments/NetworkAccVM.yaml
index 2e1b3f2..cee4951 100644
--- a/de/heat-templates/fragments/NetworkAccVM.yaml
+++ b/de/heat-templates/fragments/NetworkAccVM.yaml
@@ -21,6 +21,24 @@
   private_floating_network_cidr:
     type: string
     default: ''
+  private_floating_network:
+    type: string
+    default: ''
+  private_floating_subnet:
+    type: string
+    default: ''
+  private_floating_network_gateway:
+    type: string
+    default: ''
+  storage_frontend_network:
+    type: string
+    default: ''
+  storage_frontend_subnet:
+    type: string
+    default: ''
+  storage_frontend_network_gateway:
+    type: string
+    default: ''
 
 conditions:
   tf:
@@ -48,6 +66,26 @@
       cidr: { get_param: control_network_cidr }
       dns_nameservers: { get_param: dns_nameservers }
       host_routes: { get_param: control_network_host_routes }
+
+  private_floating_router_port:
+    type: OS::Neutron::Port
+    condition: {not: cond_extra_routes}
+    properties:
+      network_id: { get_param: private_floating_network }
+      port_security_enabled: false
+      fixed_ips:
+        - subnet: { get_param: private_floating_subnet }
+          ip_address: { get_param: private_floating_network_gateway }
+
+  public_storage_router_port:
+    type: OS::Neutron::Port
+    properties:
+      network_id: { get_param: storage_frontend_network }
+      port_security_enabled: false
+      fixed_ips:
+        - subnet: { get_param: storage_frontend_subnet }
+          ip_address: { get_param: storage_frontend_network_gateway }
+
   router:
     type: OS::Neutron::Router
     properties:
@@ -58,6 +96,18 @@
     properties:
       router: { get_resource: router }
       subnet: { get_resource: subnet }
+  private_floating_router_iface:
+    type: OS::Neutron::RouterInterface
+    condition: {not: cond_extra_routes}
+    properties:
+      router: { get_resource: router }
+      port: { get_resource: private_floating_router_port }
+  public_storage_iface:
+    type: OS::Neutron::RouterInterface
+    properties:
+      router: { get_resource: router }
+      port: { get_resource: public_storage_router_port }
+
   extra_routes:
     type: OS::Neutron::ExtraRoute
     condition: cond_extra_routes
diff --git a/de/heat-templates/fragments/NetworkAccVMStorage.yaml b/de/heat-templates/fragments/NetworkAccVMStorage.yaml
index f1475b3..1d3a4ea 100644
--- a/de/heat-templates/fragments/NetworkAccVMStorage.yaml
+++ b/de/heat-templates/fragments/NetworkAccVMStorage.yaml
@@ -5,6 +5,10 @@
     type: string
   storage_frontend_network_cidr:
     type: string
+  storage_frontend_network_ipam_pool_start:
+    type: string
+  storage_frontend_network_ipam_pool_end:
+    type: string
 
 resources:
 
@@ -26,6 +30,9 @@
       network: { get_resource: storage_frontend_network }
       enable_dhcp: false
       cidr: { get_param: storage_frontend_network_cidr }
+      allocation_pools:
+        - start: { get_param: storage_frontend_network_ipam_pool_start }
+          end: { get_param: storage_frontend_network_ipam_pool_end }
       gateway_ip: ~
 
 outputs:
diff --git a/de/heat-templates/fragments/vmx-components/bridges/bridges_internal.yaml b/de/heat-templates/fragments/vmx-components/bridges/bridges_internal.yaml
index 4e203bf..684b1a4 100644
--- a/de/heat-templates/fragments/vmx-components/bridges/bridges_internal.yaml
+++ b/de/heat-templates/fragments/vmx-components/bridges/bridges_internal.yaml
@@ -18,6 +18,9 @@
       network_id: {get_resource: bridge_net_re_pfe}
       enable_dhcp: False
       gateway_ip: 128.0.0.3
+      allocation_pools:
+        - start: 128.0.0.250
+          end: 128.0.0.254
     type: OS::Neutron::Subnet
 
 outputs:
diff --git a/de/heat-templates/scripts/functions.sh b/de/heat-templates/scripts/functions.sh
index f3d5e39..4073b15 100644
--- a/de/heat-templates/scripts/functions.sh
+++ b/de/heat-templates/scripts/functions.sh
@@ -18,7 +18,6 @@
 TUNNEL_INTERFACE=$(ip -o addr show |grep -w ${TUNNEL_INTERFACE_IP}/${TUNNEL_INTERFACE_NETWORK_NETMASK} | awk '{print $2}')
 IRONIC_BAREMETAL_NETWORK_PREFIX=$(sed 's/[0-9]*\/[0-9]*$//' <<< $IRONIC_BAREMETAL_NETWORK)
 IRONIC_BAREMETAL_TUNNEL_NETWORK_PREFIX=$(sed 's/[0-9]*\/[0-9]*$//' <<< $IRONIC_BAREMETAL_TUNNEL_NETWORK)
-STORAGE_FRONTEND_NETWORK_NETMASK=$(echo ${STORAGE_FRONTEND_NETWORK} | cut -d'/' -f2)
 DOCKER_DEFAULT_ADDRESS_POOL=${DOCKER_DEFAULT_ADDRESS_POOL:-10.10.1.0/16}
 # DOCKER_DEFAULT_ADDRESS_SIZE have to be less then netmask in DOCKER_DEFAULT_ADDRESS_POOL because
 # to the fact that actual netmask for docker_gwbridge is given from it
@@ -56,7 +55,6 @@
 OS_CODENAME=$(lsb_release -c -s)
 NODE_DEPLOYMENT_RETRIES=${NODE_DEPLOYMENT_RETRIES:-15}
 FLOATING_NETWORK_PREFIXES=${FLOATING_NETWORK_PREFIXES:-10.11.12.0/24}
-PUBLIC_INTERFACE=${PUBLIC_INTERFACE:-ens4}
 UCP_MASTER_HOST=${UCP_MASTER_HOST:-${CONTROL_IP_ADDRESS}}
 UCP_IP_ADDRESS=${UCP_IP_ADDRESS:-$CONTROL_IP_ADDRESS}
 UCP_AUDIT_LOG_LEVEL=${UCP_AUDIT_LOG_LEVEL:-''}
@@ -729,10 +727,10 @@
     fi
 fi
 
-    public_address_match_ip_line=$(grep -nm1 "${PUBLIC_NODE_IP_ADDRESS}/${PUBLIC_NODE_IP_NETMASK}" ${cloud_netplan_cfg} | cut -d: -f1)
-    if [ -n "${public_address_match_ip_line}" ] ; then
-        sed -i "$((${public_address_match_ip_line}-1)),$((${public_address_match_ip_line}))d" ${cloud_netplan_cfg}
-    fi
+public_address_match_ip_line=$(grep -nm1 "${PUBLIC_NODE_IP_ADDRESS}/${PUBLIC_NODE_IP_NETMASK}" ${cloud_netplan_cfg} | cut -d: -f1)
+if [ -n "${public_address_match_ip_line}" ] ; then
+    sed -i "$((${public_address_match_ip_line}-1)),$((${public_address_match_ip_line}))d" ${cloud_netplan_cfg}
+fi
 
 cat << EOF >> ${cloud_netplan_cfg}
     bridges:
@@ -741,11 +739,13 @@
             interfaces:
             - ${PUBLIC_INTERFACE}
             - veth-br
+EOF
+if [[ ${NODE_METADATA} == *"tempest"* ]]; then
+cat << EOF >> ${cloud_netplan_cfg}
             addresses:
             - ${PUBLIC_NODE_IP_ADDRESS}/${PUBLIC_NODE_IP_NETMASK}
 EOF
 # Assign more ips for neutron dynamic routing PRODX-31417
-if [[ ${NODE_METADATA} == *"tempest"* ]]; then
     for i in {71..76}; do
 cat << EOF >> ${cloud_netplan_cfg}
             - ${PUBLIC_NODE_IP_ADDRESS%.*}.${i}/${PUBLIC_NODE_IP_NETMASK}
@@ -973,6 +973,8 @@
     netplan --debug apply
     # NOTE(vsaienko): give some time to apply changes
     sleep 15
+    # workaround for https://github.com/systemd/systemd/issues/13432
+    systemctl restart systemd-resolved
 }
 
 function install_frr {
diff --git a/de/heat-templates/scripts/launch.sh b/de/heat-templates/scripts/launch.sh
index 21bd205..0b859c9 100644
--- a/de/heat-templates/scripts/launch.sh
+++ b/de/heat-templates/scripts/launch.sh
@@ -140,7 +140,6 @@
             install_kubectl
             configure_ntp
             configure_atop
-            workaround_default_forward_policy
             if [[ "${KUBERNETES_INSTALLER}" == "ucp" ]]; then
                 install_docker
                 swarm_init
@@ -187,7 +186,6 @@
             install_kubectl
             configure_ntp
             configure_atop
-            workaround_default_forward_policy
             if [[ "${KUBERNETES_INSTALLER}" == "ucp" ]]; then
                 install_docker
                 cache_images
@@ -227,7 +225,6 @@
             enable_iscsi
             configure_ntp
             configure_atop
-            workaround_default_forward_policy
             if [[ "${KUBERNETES_INSTALLER}" == "ucp" ]]; then
                 install_docker
                 cache_images
@@ -269,7 +266,6 @@
                 cache_images
                 download_bundles
             fi
-            workaround_default_forward_policy
             configure_contrack
             disable_iptables_for_bridges
             install_frr
diff --git a/de/heat-templates/top.yaml b/de/heat-templates/top.yaml
index ee7c964..d179f8c 100644
--- a/de/heat-templates/top.yaml
+++ b/de/heat-templates/top.yaml
@@ -112,6 +112,15 @@
   storage_frontend_network_cidr:
     type: string
     default: '10.12.0.0/24'
+  storage_frontend_network_gateway:
+    type: string
+    default: '10.12.0.1/24'
+  storage_frontend_network_ipam_pool_start:
+    type: string
+    default: '10.12.0.3'
+  storage_frontend_network_ipam_pool_end:
+    type: string
+    default: '10.12.0.254'
   storage_frontend_interface:
     type: string
     default: 'ens5'
@@ -413,6 +422,9 @@
   k0s_version:
     type: string
     default: ''
+  external_k8s_service_network_cidr:
+    type: string
+    default: '10.172.1.0/24'
 
 conditions:
   aio_deploy:
@@ -453,17 +465,6 @@
       name: { get_attr: [keypair_name, value] }
       public_key: { get_param: cluster_public_key }
       save_private_key: false
-  accessible_network:
-    type: MCP2::NetworkAcc
-    properties:
-      public_net_id: { get_param: public_net_id }
-      control_network_cidr: { get_param: control_network_cidr }
-      dns_nameservers: { get_param: dns_nameservers }
-      control_network_host_routes: { get_param: control_network_host_routes }
-      tungstenfabric_enabled: { get_param: tungstenfabric_enabled }
-      vsrx_enabled: { get_param: vsrx_enabled }
-      control_network_ext_router_ip: { get_param: control_network_ext_router_ip }
-      private_floating_network_cidr: { get_param: private_floating_network_cidr }
 
   tun_network:
     type: MCP2::NetworkTun
@@ -480,6 +481,35 @@
       private_floating_network_ipam_pool_start: { get_param: private_floating_network_ipam_pool_start }
       private_floating_network_ipam_pool_end: { get_param: private_floating_network_ipam_pool_end }
 
+  storage_network:
+    type: MCP2::NetworkAccStorage
+    properties:
+      storage_frontend_network_cidr: { get_param: storage_frontend_network_cidr }
+      storage_backend_network_cidr: { get_param: storage_backend_network_cidr }
+      storage_frontend_network_ipam_pool_start: { get_param: storage_frontend_network_ipam_pool_start }
+      storage_frontend_network_ipam_pool_end: { get_param: storage_frontend_network_ipam_pool_end }
+
+  accessible_network:
+    depends_on:
+      - private_floating_network
+      - storage_network
+    type: MCP2::NetworkAcc
+    properties:
+      public_net_id: { get_param: public_net_id }
+      control_network_cidr: { get_param: control_network_cidr }
+      dns_nameservers: { get_param: dns_nameservers }
+      control_network_host_routes: { get_param: control_network_host_routes }
+      tungstenfabric_enabled: { get_param: tungstenfabric_enabled }
+      vsrx_enabled: { get_param: vsrx_enabled }
+      control_network_ext_router_ip: { get_param: control_network_ext_router_ip }
+      private_floating_network: {get_attr: [private_floating_network, private_floating_network_id]}
+      private_floating_network_cidr: { get_param: private_floating_network_cidr }
+      private_floating_network_gateway: { get_param: private_floating_network_gateway }
+      private_floating_subnet: {get_attr: [private_floating_network, private_floating_subnet_id]}
+      storage_frontend_network: {get_attr: [storage_network, storage_frontend_network_id]}
+      storage_frontend_network_gateway: { get_param: storage_frontend_network_gateway }
+      storage_frontend_subnet: {get_attr: [storage_network, storage_frontend_subnet_id]}
+
   vmx:
     depends_on:
       - private_floating_network
@@ -527,12 +557,6 @@
       vsrx_flavor: { get_param: vsrx_flavor }
       public_net_id: { get_param: public_net_id }
 
-  storage_network:
-    type: MCP2::NetworkAccStorage
-    properties:
-      storage_frontend_network_cidr: { get_param: storage_frontend_network_cidr }
-      storage_backend_network_cidr: { get_param: storage_backend_network_cidr }
-
   ironic_baremetal_network:
     type: MCP2::NetworkIronicFlat
     properties:
@@ -544,7 +568,6 @@
   ucp:
     depends_on:
      - accessible_network
-     - storage_network
     type: MCP2::SrvInstances
     properties:
       metadata: { get_param: ucp_metadata}
@@ -979,6 +1002,7 @@
           hardware_metadata: { get_param: hardware_metadata}
           frr_bgp_neighbors: { list_join: [',', {get_attr: [workers, server_tun_ip]}] }
           availability_zone: { get_param: availability_zone }
+          devops_utils_refspec: { get_param: devops_utils_refspec }
 
   ntws:
     type: OS::Heat::ResourceGroup
@@ -1125,6 +1149,16 @@
           kubectl_version: { get_param: kubectl_version }
           devops_utils_refspec: { get_param: devops_utils_refspec }
 
+  k8s_external_service_route:
+    type: OS::Neutron::ExtraRoute
+    depends_on:
+     - tsrvs
+    condition: { not: aio_deploy }
+    properties:
+      destination: { get_param: external_k8s_service_network_cidr }
+      nexthop: { get_attr: [tsrvs, resource.0, server_private_ip ] }
+      router_id: { get_attr: [accessible_network, accessible_router] }
+
 outputs:
   ucp_ips:
     description: Private IP addresses of the deployed ucp instances
diff --git a/hco/env/ctrl1-wrkr3.yaml b/hco/env/ctrl1-wrkr3.yaml
new file mode 100644
index 0000000..ef0145b
--- /dev/null
+++ b/hco/env/ctrl1-wrkr3.yaml
@@ -0,0 +1,30 @@
+resource_registry:
+  "VMInstances": ../fragments/VMInstance.yaml
+  "VMInstancesCeph": ../fragments/VMInstanceCeph.yaml
+
+parameters:
+  controllers_size: 1
+  workers_size: 3
+  image: jammy-server-cloudimg-amd64-20240417
+  public_net_id: c3799996-dc8e-4477-a309-09ea6dd71946
+  cluster_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCp0evjOaK8c8SKYK4r2+0BN7g+8YSvQ2n8nFgOURCyvkJqOHi1qPGZmuN0CclYVdVuZiXbWw3VxRbSW3EH736VzgY1U0JmoTiSamzLHaWsXvEIW8VCi7boli539QJP0ikJiBaNAgZILyCrVPN+A6mfqtacs1KXdZ0zlMq1BPtFciR1JTCRcVs5vP2Wwz5QtY2jMIh3aiwkePjMTQPcfmh1TkOlxYu5IbQyZ3G1ahA0mNKI9a0dtF282av/F6pwB/N1R1nEZ/9VtcN2I1mf1NW/tTHEEcTzXYo1R/8K9vlqAN8QvvGLZtZduGviNVNoNWvoxaXxDt8CPv2B2NCdQFZp
+  worker_metadata: {"labels": {"role":"ceph-osd-node"}}
+  workers_flavor: 'system.compact.openstack.control.ephemeral'
+  hardware_metadata: |
+    '00:00:00:00:00:00':
+      write_files:
+        - path: /usr/share/metadata/ceph.yaml
+          content: |
+            storageDevices:
+              - name: vdb
+                role: hdd
+                sizeGb: 20
+            ramGb: 8
+            cores: 2
+            # The roles will be assigned based on node labels.
+            # roles:
+            #   - mon
+            #   - mgr
+            ips:
+              - 192.168.122.101
+            crushPath: {}
diff --git a/hco/fragments/VMInstance.yaml b/hco/fragments/VMInstance.yaml
new file mode 100644
index 0000000..15dad90
--- /dev/null
+++ b/hco/fragments/VMInstance.yaml
@@ -0,0 +1,100 @@
+heat_template_version: queens
+
+parameters:
+
+  k8s_network:
+    type: string
+  k8s_subnet_id:
+    type: string
+  public_net_id:
+    type: string
+  data_network:
+    type: string
+  storage_frontend_network:
+    type: string
+  availability_zone:
+    type: string
+    default: nova
+  boot_timeout:
+    type: number
+    description: Boot timeout for instance
+    default: 600
+  image:
+    type: string
+    description: Name of image to use for servers
+  flavor:
+    type: string
+    description: Flavor to use for servers
+  key_name:
+    type: string
+    description: Name of keypair to assign to servers
+
+resources:
+
+  k8s_network_port:
+    type: OS::Neutron::Port
+    properties:
+      network: { get_param: k8s_network }
+      port_security_enabled: false
+      fixed_ips:
+        - subnet: { get_param: k8s_subnet_id }
+
+  floating_ip_k8s_net:
+    type: OS::Neutron::FloatingIP
+    properties:
+      floating_network_id: { get_param: public_net_id }
+      port_id: { get_resource: k8s_network_port }
+
+  wait_handle:
+    type: OS::Heat::WaitConditionHandle
+
+  wait_condition:
+    type: OS::Heat::WaitCondition
+    properties:
+      handle: { get_resource: wait_handle }
+      timeout: { get_param: boot_timeout }
+
+  vm_server:
+    type: OS::Nova::Server
+    properties:
+      availability_zone: { get_param: availability_zone }
+      image: { get_param: image }
+      flavor: { get_param: flavor }
+      key_name: { get_param: key_name }
+      networks:
+        - port: { get_resource: k8s_network_port }
+        - network: { get_param : storage_frontend_network }
+        - network: { get_param : data_network }
+      user_data_format: RAW
+      user_data:
+        str_replace:
+          template: |
+            #!/bin/bash
+            
+            set -x
+                        
+            STATUS="SUCCESS"
+            REASON="The node has been successfully deployed"
+            DATA_BINARY="{\"status\": \"$STATUS\", \"reason\": \"$REASON\"}"
+            echo "Sending notification to wait condition ..."
+            
+            WC_EXIT_CODE=1
+            counter=0
+            while (( ${WC_EXIT_CODE} != 0 && ${counter} < 3 )); do
+                wc_notify -k --data-binary "$DATA_BINARY" && WC_EXIT_CODE=0
+                counter=$((counter + 1))
+            sleep 5
+            done
+            
+            if (( ${WC_EXIT_CODE} !=0 ))
+            then
+                echo "Cannot send notification to wait condition with a SUCCESS status"
+                exit 1
+            fi
+          params:
+            wc_notify: { get_attr: [wait_handle, curl_cli] }
+
+outputs:
+  server_public_ip:
+    description: Floating IP address of server in public network
+    value: { get_attr: [ floating_ip_k8s_net, floating_ip_address ] }
diff --git a/hco/fragments/VMInstanceCeph.yaml b/hco/fragments/VMInstanceCeph.yaml
new file mode 100644
index 0000000..199d10d
--- /dev/null
+++ b/hco/fragments/VMInstanceCeph.yaml
@@ -0,0 +1,163 @@
+heat_template_version: queens
+
+parameters:
+
+  k8s_network:
+    type: string
+  k8s_subnet_id:
+    type: string
+  public_net_id:
+    type: string
+  data_network:
+    type: string
+  storage_frontend_network:
+    type: string
+  storage_backend_network:
+    type: string
+  availability_zone:
+    type: string
+    default: nova
+  boot_timeout:
+    type: number
+    description: Boot timeout for instance
+    default: 600
+  image:
+    type: string
+    description: Name of image to use for servers
+  flavor:
+    type: string
+    description: Flavor to use for servers
+  key_name:
+    type: string
+    description: Name of keypair to assign to servers
+  metadata:
+    type: json
+    default: {}
+  hardware_metadata:
+    description: The content of lab metadata.
+    type: string
+  user_data_config:
+    description: This is part of clout-config which denies to mount drive with label ephemeral0 to /mnt
+    type: string
+    default: |
+      #cloud-config
+      #
+      # Don't mount ephemeral0 to /mnt as it's by default
+      mounts:
+        - [ ephemeral0, null ]
+
+resources:
+
+  k8s_network_port:
+    type: OS::Neutron::Port
+    properties:
+      network: { get_param: k8s_network }
+      port_security_enabled: false
+      fixed_ips:
+        - subnet: { get_param: k8s_subnet_id }
+
+  floating_ip_k8s_net:
+    type: OS::Neutron::FloatingIP
+    properties:
+      floating_network_id: { get_param: public_net_id }
+      port_id: { get_resource: k8s_network_port }
+
+  software_config:
+    type: OS::Heat::SoftwareConfig
+    properties:
+      group: ungrouped
+      config:
+        str_replace:
+          template: |
+            #!/bin/bash
+            
+            set -x
+            
+            /usr/sbin/prepare-metadata.py  --metadata-file /usr/share/metadata/lab-metadata.yaml
+            
+            HW_METADATA='{}'
+            if [[ -f /usr/share/metadata/ceph.yaml && 'node_metadata' == *"ceph-osd-node"* ]]; then
+                HW_METADATA="{\"ceph\": {\"$(hostname)\": \"$(base64 -w 0 /usr/share/metadata/ceph.yaml)\"}}"
+                ceph_store_drive=$(cat /usr/share/metadata/ceph.yaml | egrep '\- name\: vd?' | awk '{print $3}')
+                if [[ -b /dev/${ceph_store_drive} ]]; then
+                    sgdisk --zap-all /dev/${ceph_store_drive}
+                fi
+            fi
+          
+            apt install nfs-common -y
+            
+            STATUS="SUCCESS"
+            REASON="The node has been successfully deployed"
+            DATA_BINARY="{\"status\": \"$STATUS\", \"reason\": \"$REASON\", \"data\": $HW_METADATA}"
+            echo "Sending notification to wait condition with data: $HW_METADATA"
+            
+            WC_EXIT_CODE=1
+            counter=0
+            while (( ${WC_EXIT_CODE} != 0 && ${counter} < 3 )); do
+                wc_notify -k --data-binary "$DATA_BINARY" && WC_EXIT_CODE=0
+                counter=$((counter + 1))
+            sleep 5
+            done
+            
+            if (( ${WC_EXIT_CODE} !=0 ))
+            then
+                echo "Cannot send notification to wait condition with a SUCCESS status"
+                exit 1
+            fi
+          params:
+            wc_notify: { get_attr: [wait_handle, curl_cli] }
+            node_metadata: { get_param: metadata }
+
+  inject_files:
+    type: "OS::Heat::CloudConfig"
+    properties:
+      cloud_config:
+        write_files:
+          - path: /usr/sbin/prepare-metadata.py
+            owner: "root:root"
+            permissions: "0755"
+            content: {get_file: ../../de/heat-templates/scripts/prepare-metadata.py}
+          - path: /usr/share/metadata/lab-metadata.yaml
+            owner: "root:root"
+            permissions: "0644"
+            content: { get_param: hardware_metadata}
+
+  install_config_agent:
+    type: "OS::Heat::MultipartMime"
+    properties:
+      parts:
+      - config: {get_resource: software_config}
+      - config: {get_resource: inject_files}
+      - config: {get_param: user_data_config}
+
+  wait_handle:
+    type: OS::Heat::WaitConditionHandle
+
+  wait_condition:
+    type: OS::Heat::WaitCondition
+    properties:
+      handle: {get_resource: wait_handle}
+      timeout: { get_param: boot_timeout }
+
+  vm_server:
+    type: OS::Nova::Server
+    properties:
+      availability_zone: { get_param: availability_zone }
+      image: { get_param: image }
+      flavor: { get_param: flavor }
+      key_name: { get_param: key_name }
+      networks:
+        - port: { get_resource: k8s_network_port }
+        - network: { get_param : storage_frontend_network }
+        - network: { get_param : storage_backend_network }
+        - network: { get_param : data_network }
+      user_data_format: SOFTWARE_CONFIG
+      user_data: { get_resource:  install_config_agent}
+
+outputs:
+  server_public_ip:
+    description: Floating IP address of server in public network
+    value: { get_attr: [ floating_ip_k8s_net, floating_ip_address ] }
+  wc_data:
+    description: Metadata from instance
+    value: { get_attr: [wait_condition, data]}
diff --git a/hco/top.yaml b/hco/top.yaml
new file mode 100644
index 0000000..e8c4929
--- /dev/null
+++ b/hco/top.yaml
@@ -0,0 +1,187 @@
+heat_template_version: queens
+
+parameters:
+  controllers_size:
+    type: number
+    description: Number of masters instances to deploy
+    default: 1
+  workers_size:
+    type: number
+    description: Number of workers to deploy
+    default: 3
+  image:
+    type: string
+    description: Name of image to use for servers
+  availability_zone:
+    type: string
+    default: "nova"
+  masters_flavor:
+    type: string
+    default: 'system.compact.openstack.control'
+  workers_flavor:
+    type: string
+    default: 'system.compact.openstack.control'
+  cluster_public_key:
+    type: string
+  public_net_id:
+    type: string
+    default: ''
+    description: >
+      UUID of public network
+  k8s_network_cidr:
+    type: string
+    description: The CIDR of k8s network
+    default: '10.10.0.0/24'
+  data_network_cidr:
+    type: string
+    description: The CIDR of k8s network
+    default: '10.11.0.0/24'
+  storage_backend_network_cidr:
+    type: string
+    default: '10.12.0.0/24'
+  storage_frontend_network_cidr:
+    type: string
+    default: '10.12.1.0/24'
+  dns_nameservers:
+    type: json
+    default: ['172.18.224.6', '172.18.176.6']
+  hardware_metadata:
+    description: The content of lab metadata.
+    default: ''
+    type: string
+  worker_metadata:
+    type: json
+    default: {}
+  boot_timeout:
+    type: number
+    description: Boot timeout for instance
+    default: 600
+
+resources:
+
+  keypair_name:
+    type: OS::Heat::RandomString
+    properties:
+      character_classes: [{"class": "hexdigits", "min": 1}]
+      length: 128
+      salt: constant
+  key_pair:
+    type: OS::Nova::KeyPair
+    properties:
+      name: { get_attr: [keypair_name, value] }
+      public_key: { get_param: cluster_public_key }
+      save_private_key: false
+
+  k8s_network:
+    type: OS::Neutron::Net
+  k8s_subnet:
+    type: OS::Neutron::Subnet
+    properties:
+      network: { get_resource: k8s_network }
+      enable_dhcp: false
+      cidr: { get_param: k8s_network_cidr }
+      dns_nameservers: { get_param: dns_nameservers }
+  router:
+    type: OS::Neutron::Router
+    properties:
+      external_gateway_info:
+        network: { get_param: public_net_id }
+  public_router_iface:
+    type: OS::Neutron::RouterInterface
+    properties:
+      router: { get_resource: router }
+      subnet: { get_resource: k8s_subnet }
+
+  data_network:
+    type: OS::Neutron::Net
+  data_subnet:
+    type: OS::Neutron::Subnet
+    properties:
+      network: { get_resource: data_network }
+      enable_dhcp: false
+      cidr: { get_param: data_network_cidr }
+      gateway_ip: ~
+
+  storage_backend_network:
+    type: OS::Neutron::Net
+  storage_backend_subnet:
+    type: OS::Neutron::Subnet
+    properties:
+      network: { get_resource: storage_backend_network }
+      enable_dhcp: false
+      cidr: { get_param: storage_backend_network_cidr }
+      gateway_ip: ~
+
+  storage_frontend_network:
+    type: OS::Neutron::Net
+  storage_frontend_subnet:
+    type: OS::Neutron::Subnet
+    properties:
+      network: { get_resource: storage_frontend_network }
+      enable_dhcp: false
+      cidr: { get_param: storage_frontend_network_cidr }
+      gateway_ip: ~
+
+  masters:
+    type: OS::Heat::ResourceGroup
+    depends_on:
+      - k8s_network
+      - data_network
+      - public_router_iface
+    properties:
+      count: { get_param: controllers_size }
+      resource_def:
+        type: VMInstances
+        properties:
+          k8s_network: { get_resource: k8s_network }
+          k8s_subnet_id: { get_resource: k8s_subnet }
+          public_net_id: { get_param: public_net_id }
+          storage_frontend_network: { get_resource: storage_frontend_network }
+          data_network: { get_resource: data_network }
+          availability_zone: { get_param: availability_zone }
+          image: { get_param: image }
+          flavor: { get_param: masters_flavor }
+          key_name: { get_attr: [keypair_name, value] }
+          boot_timeout: { get_param: boot_timeout }
+
+  workers:
+    type: OS::Heat::ResourceGroup
+    depends_on:
+      - k8s_network
+      - data_network
+      - public_router_iface
+    properties:
+      count: { get_param: workers_size }
+      resource_def:
+        type: VMInstancesCeph
+        properties:
+          k8s_network: { get_resource: k8s_network }
+          k8s_subnet_id: { get_resource: k8s_subnet }
+          public_net_id: { get_param: public_net_id }
+          storage_frontend_network: { get_resource: storage_frontend_network }
+          storage_backend_network: { get_resource: storage_backend_network }
+          data_network: { get_resource: data_network }
+          availability_zone: { get_param: availability_zone }
+          image: { get_param: image }
+          flavor: { get_param: workers_flavor }
+          key_name: { get_attr: [keypair_name, value] }
+          metadata: { get_param: worker_metadata }
+          hardware_metadata: { get_param: hardware_metadata}
+          boot_timeout: { get_param: boot_timeout }
+
+outputs:
+  masters_ips:
+    description: Public IP addresses of the deployed masters instances
+    value: { get_attr: [masters, server_public_ip] }
+  workers_ips:
+    description: Public IP addresses of the deployed worker instances
+    value: { get_attr: [workers, server_public_ip] }
+  storage_frontend_network_cidr:
+    description: Storage network which is used as clientNet in Ceph CR
+    value: { get_param: storage_frontend_network_cidr }
+  storage_backend_network_cidr:
+    description: Storage network which is used as clusterNet in Ceph CR
+    value: { get_param: storage_backend_network_cidr }
+  workers_wc_data:
+    description: Metadata from workers
+    value: { get_attr: [workers, wc_data] }