Fix cookied-mcp-pike-dvr-ceph

Change-Id: I8f5beb85f25876948690e6d690d42c9ab5d9f672
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/ceph.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/ceph.yaml
index 5f2a4ae..7b9df3a 100644
--- a/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/ceph.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/ceph.yaml
@@ -4,8 +4,6 @@
 {% from 'cookied-mcp-pike-dvr-ceph/underlay.yaml' import DOMAIN_NAME with context %}
 {% import 'shared-salt.yaml' as SHARED with context %}
 
-{% import 'shared-backup-restore.yaml' as BACKUP with context %}
-
 # Install ceph mons
 - description: Update grains
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False
@@ -167,5 +165,4 @@
   retry: {count: 2, delay: 5}
   skip_fail: false
 
-{{ BACKUP.MACRO_BACKUP_CEPH() }}
 {{ SHARED.INSTALL_DOCKER_ON_GTW() }}
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/cookiecutter-context-dvr-ceph.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/cookiecutter-context-dvr-ceph.yaml
index 7367667..1230c56 100644
--- a/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/cookiecutter-context-dvr-ceph.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/cookiecutter-context-dvr-ceph.yaml
@@ -1,8 +1,8 @@
 default_context:
   auditd_enabled: 'False'
   backend_network_netmask: 255.255.255.0
-  backend_network_subnet: 10.167.7.0/24
-  backend_vlan: '30'
+  backend_network_subnet: 10.167.4.0/24
+  backend_vlan: '10'
   backup_private_key: |-
     -----BEGIN RSA PRIVATE KEY-----
     MIIEpQIBAAKCAQEAuY7v++mza4e75f80GYE2iIdZ30d7yvT6Xym00iD/OxRWNtXe
@@ -33,7 +33,7 @@
     -----END RSA PRIVATE KEY-----
   backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC5ju/76bNrh7vl/zQZgTaIh1nfR3vK9PpfKbTSIP87FFY21d6siHszRffRDQXYPeFW8/PntrFOF44rfEVFW8+ghmObd9WDA3NCZPJ8BnPhM51O1bH1zJ7wGjbSqVGmrzAVAxIOGBG/QnWJfouMmyvykzUDyrd7gZie/QOssFnUirv5P+SrPOxPs+68a8Qrrcn8NoToEjvdAb7RbQy9uV13OfBe9cuj9WUFdqNY6/ftgeIwUxNlqnuVO8ZUTLEHJUQzTgGjsAEXY8Q/AasYv2c6knUtLEEJyvI1XJlu94UX97wDUKPXvysP5bgfmnYCCvkD/LctmlOTKGdO1ZfPBPdj
   bmk_enabled: 'False'
-  ceph_cluster_network: 10.167.7.0/24
+  ceph_cluster_network: 10.167.4.0/24
   ceph_enabled: 'True'
   ceph_hyper_converged: 'False'
   ceph_mon_node01_address: 10.167.4.66
@@ -43,15 +43,16 @@
   ceph_mon_node03_address: 10.167.4.68
   ceph_mon_node03_hostname: cmn03
   ceph_osd_backend: bluestore
-  ceph_osd_block_db_size: '20'
+  ceph_osd_block_db_size: '10'
   ceph_osd_bond_mode: active-backup
   ceph_osd_count: '2'
-  ceph_osd_data_disks: /dev/vdb,/dev/vdc
-  ceph_osd_journal_or_block_db_disks: /dev/vdd,/dev/vde
+  ceph_osd_data_disks: /dev/vdb
+  ceph_osd_journal_or_block_db_disks: /dev/vdc
   ceph_osd_node_count: '2'
+  ceph_osd_journal_size: '10'
   ceph_osd_primary_first_nic: eth1
   ceph_osd_primary_second_nic: eth2
-  ceph_osd_rack01_backend_subnet: 10.167.7
+  ceph_osd_rack01_backend_subnet: 10.167.4
   ceph_osd_rack01_hostname: osd
   ceph_osd_rack01_single_subnet: 10.167.4
   ceph_public_network: 10.167.4.0/24
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/openstack.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/openstack.yaml
index 6d343ba..9745cb9 100644
--- a/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/openstack.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/openstack.yaml
@@ -7,175 +7,23 @@
 {% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
 
 {% import 'shared-salt.yaml' as SHARED with context %}
+{% import 'shared-openstack.yaml' as SHARED_OPENSTACK with context %}
 
-{% import 'shared-backup-restore.yaml' as BACKUP with context %}
+{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE(USE_ORCHESTRATE=false) }}
 
-# Install OpenStack control services
+{{ SHARED_OPENSTACK.MACRO_INSTALL_GLANCE() }}
 
-- description: Install glance on all controllers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-     -C 'I@glance:server' state.sls glance -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
+{{ SHARED_OPENSTACK.MACRO_INSTALL_NOVA() }}
 
-- description: Install keystone service (note that different fernet keys are created on different nodes)
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keystone:server' state.sls keystone.server -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 15}
-  skip_fail: false
+{{ SHARED_OPENSTACK.MACRO_INSTALL_CINDER() }}
 
-- description: Restart apache due to PROD-10477
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' cmd.run "systemctl restart apache2"
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 15}
-  skip_fail: false
+{{ SHARED_OPENSTACK.MACRO_INSTALL_NEUTRON() }}
 
-- description: Check apache status to PROD-10477
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' cmd.run "systemctl status apache2"
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 15}
-  skip_fail: false
+{{ SHARED_OPENSTACK.MACRO_INSTALL_HEAT() }}
 
-- description: Mount glusterfs.client volumes (resuires created 'keystone' and 'glusterfs' system users)
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@glance:server' state.sls glusterfs.client
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
+{{ SHARED_OPENSTACK.MACRO_INSTALL_HORIZON() }}
 
-- description: Update fernet keys for keystone server on the mounted glusterfs volume
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keystone:server' state.sls keystone.server -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Populate keystone services/tenants/admins
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keystone:client' state.sls keystone.client
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 5}
-  skip_fail: false
-
-- description: Check keystone service-list
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keystone:server' cmd.run '. /root/keystonercv3; openstack service list'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check glance image-list
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keystone:server' cmd.run '. /root/keystonercv3; glance image-list'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-
-- description: Install nova on all controllers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@nova:controller' state.sls nova -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 5}
-  skip_fail: false
-
-- description: Check nova service-list
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keystone:server' cmd.run '. /root/keystonercv3; nova --debug service-list'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 3, delay: 5}
-  skip_fail: false
-
-
-- description: Install cinder
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@cinder:controller' state.sls cinder -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 5}
-  skip_fail: false
-
-- description: Check cinder list
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keystone:server' cmd.run '. /root/keystonercv3; cinder list'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-
-- description: Install neutron service
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@neutron:server' state.sls neutron -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install neutron on gtw node
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@neutron:gateway' state.sls neutron
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check neutron agent-list
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keystone:server' cmd.run '. /root/keystonercv3; neutron agent-list'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-
-- description: Install heat service
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@heat:server' state.sls heat -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check heat service
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keystone:server' cmd.run '. /root/keystonercv3; openstack orchestration resource type list'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 5, delay: 10}
-  skip_fail: false
-
-
-- description: Deploy horizon dashboard
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@horizon:server' state.sls horizon
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: true
-
-- description: Deploy nginx proxy
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@nginx:server' state.sls nginx
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: true
-
-
-# Install compute node
-
-- description: Apply formulas for compute node
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: true
-
-- description: Re-apply(as in doc) formulas for compute node
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check IP on computes
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
-    'ip a'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 10, delay: 30}
-  skip_fail: false
+{{ SHARED_OPENSTACK.MACRO_INSTALL_COMPUTE() }}
 
 - description: Create net04_external
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
@@ -232,7 +80,3 @@
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 30}
   skip_fail: false
-
-{{ BACKUP.MACRO_WR_NGINX_MASTER() }}
-{{ BACKUP.MACRO_BACKUP_BACKUPNINJA() }}
-{{ BACKUP.MACRO_BACKUP_XTRABACKUP() }}
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/underlay.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/underlay.yaml
index d3bacce..cb6b03a 100644
--- a/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/underlay.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/underlay.yaml
@@ -17,16 +17,16 @@
 {% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
 {% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
 {% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp1.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp2.' + DOMAIN_NAME) %}
 {% set HOSTNAME_CMN01 = os_env('HOSTNAME_CMN01', 'cmn01.' + DOMAIN_NAME) %}
 {% set HOSTNAME_CMN02 = os_env('HOSTNAME_CMN02', 'cmn02.' + DOMAIN_NAME) %}
 {% set HOSTNAME_CMN03 = os_env('HOSTNAME_CMN03', 'cmn03.' + DOMAIN_NAME) %}
 {% set HOSTNAME_RGW01 = os_env('HOSTNAME_CMN01', 'rgw01.' + DOMAIN_NAME) %}
 {% set HOSTNAME_RGW02 = os_env('HOSTNAME_CMN02', 'rgw02.' + DOMAIN_NAME) %}
 {% set HOSTNAME_RGW03 = os_env('HOSTNAME_CMN03', 'rgw03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_OSD1 = os_env('HOSTNAME_OSD1', 'osd1.' + DOMAIN_NAME) %}
-{% set HOSTNAME_OSD2 = os_env('HOSTNAME_OSD2', 'osd2.' + DOMAIN_NAME) %}
+{% set HOSTNAME_OSD01 = os_env('HOSTNAME_OSD01', 'osd1.' + DOMAIN_NAME) %}
+{% set HOSTNAME_OSD02 = os_env('HOSTNAME_OSD02', 'osd2.' + DOMAIN_NAME) %}
 {% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
 {% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
 
@@ -47,8 +47,8 @@
             default_{{ HOSTNAME_CTL03 }}: +13
             default_{{ HOSTNAME_CMP01 }}: +105
             default_{{ HOSTNAME_CMP02 }}: +106
-            default_{{ HOSTNAME_OSD1 }}: +94
-            default_{{ HOSTNAME_OSD2 }}: +95
+            default_{{ HOSTNAME_OSD01 }}: +94
+            default_{{ HOSTNAME_OSD02 }}: +95
             default_{{ HOSTNAME_CMN01 }}: +96
             default_{{ HOSTNAME_CMN02 }}: +97
             default_{{ HOSTNAME_CMN03 }}: +98
@@ -72,8 +72,8 @@
             default_{{ HOSTNAME_CTL03 }}: +13
             default_{{ HOSTNAME_CMP01 }}: +105
             default_{{ HOSTNAME_CMP02 }}: +106
-            default_{{ HOSTNAME_OSD1 }}: +94
-            default_{{ HOSTNAME_OSD2 }}: +95
+            default_{{ HOSTNAME_OSD01 }}: +94
+            default_{{ HOSTNAME_OSD02 }}: +95
             default_{{ HOSTNAME_CMN01 }}: +96
             default_{{ HOSTNAME_CMN02 }}: +97
             default_{{ HOSTNAME_CMN03 }}: +98
@@ -97,8 +97,8 @@
             default_{{ HOSTNAME_CTL03 }}: +13
             default_{{ HOSTNAME_CMP01 }}: +105
             default_{{ HOSTNAME_CMP02 }}: +106
-            default_{{ HOSTNAME_OSD1 }}: +94
-            default_{{ HOSTNAME_OSD2 }}: +95
+            default_{{ HOSTNAME_OSD01 }}: +94
+            default_{{ HOSTNAME_OSD02 }}: +95
             default_{{ HOSTNAME_CMN01 }}: +96
             default_{{ HOSTNAME_CMN02 }}: +97
             default_{{ HOSTNAME_CMN03 }}: +98
@@ -122,8 +122,8 @@
             default_{{ HOSTNAME_CTL03 }}: +13
             default_{{ HOSTNAME_CMP01 }}: +105
             default_{{ HOSTNAME_CMP02 }}: +106
-            default_{{ HOSTNAME_OSD1 }}: +94
-            default_{{ HOSTNAME_OSD2 }}: +95
+            default_{{ HOSTNAME_OSD01 }}: +94
+            default_{{ HOSTNAME_OSD02 }}: +95
             default_{{ HOSTNAME_CMN01 }}: +96
             default_{{ HOSTNAME_CMN02 }}: +97
             default_{{ HOSTNAME_CMN03 }}: +98
@@ -135,31 +135,12 @@
           ip_ranges:
             dhcp: [+10, -10]
 
-      storage-pool01:
-        net: {{ os_env('STORAGE_ADDRESS_POOL01', '10.100.0.0/16:24') }}
-        params:
-          ip_reserved:
-            gateway: +1
-            l2_network_device: +1
-            default_{{ HOSTNAME_OSD1 }}: +94
-            default_{{ HOSTNAME_OSD2 }}: +95
-            default_{{ HOSTNAME_CMN01 }}: +96
-            default_{{ HOSTNAME_CMN02 }}: +97
-            default_{{ HOSTNAME_CMN03 }}: +98
-            default_{{ HOSTNAME_RGW01 }}: +76
-            default_{{ HOSTNAME_RGW02 }}: +77
-            default_{{ HOSTNAME_RGW03 }}: +78
-          ip_ranges:
-            dhcp: [+10, -10]
-
-
     groups:
       - name: default
         driver:
           name: devops.driver.libvirt
           params:
             connection_string: !os_env CONNECTION_STRING, qemu:///system
-            storage_pool_name: !os_env STORAGE_POOL_NAME, default
             stp: False
             hpet: False
             enable_acpi: true
@@ -171,7 +152,6 @@
           private: private-pool01
           tenant: tenant-pool01
           external: external-pool01
-          storage: storage-pool01
 
         l2_network_devices:
           private:
@@ -196,13 +176,6 @@
             forward:
               mode: route
 
-          storage:
-            address_pool: storage-pool01
-            dhcp: true
-            forward:
-              mode: route
-
-
         group_volumes:
          - name: cloudimage1604    # This name is used for 'backing_store' option for node volumes.
            source_image: !os_env IMAGE_PATH1604  # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
@@ -430,7 +403,7 @@
               interfaces: *interfaces
               network_config: *network_config
 
-          - name: {{ HOSTNAME_OSD1 }}
+          - name: {{ HOSTNAME_OSD01 }}
             role: salt_minion
             params:
               vcpu: !os_env SLAVE_NODE_CPU, 2
@@ -462,7 +435,7 @@
               interfaces: *interfaces
               network_config: *network_config
 
-          - name: {{ HOSTNAME_OSD2 }}
+          - name: {{ HOSTNAME_OSD02 }}
             role: salt_minion
             params:
               vcpu: !os_env SLAVE_NODE_CPU, 2
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/vcp-context-environment.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/vcp-context-environment.yaml
index fd768d0..74a1465 100644
--- a/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/vcp-context-environment.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/vcp-context-environment.yaml
@@ -104,8 +104,6 @@
           role: single_dhcp
         ens4:
           role: single_ctl
-        ens5:
-          role: single_storage_dhcp
 
     cmn01.cookied-mcp-pike-dvr-ceph.local:
       reclass_storage_name: ceph_mon_node01
@@ -117,8 +115,6 @@
           role: single_dhcp
         ens4:
           role: single_ctl
-        ens5:
-          role: single_storage_dhcp
 
     cmn02.cookied-mcp-pike-dvr-ceph.local:
       reclass_storage_name: ceph_mon_node02
@@ -130,8 +126,6 @@
           role: single_dhcp
         ens4:
           role: single_ctl
-        ens5:
-          role: single_storage_dhcp
 
     cmn03.cookied-mcp-pike-dvr-ceph.local:
       reclass_storage_name: ceph_mon_node03
@@ -143,8 +137,6 @@
           role: single_dhcp
         ens4:
           role: single_ctl
-        ens5:
-          role: single_storage_dhcp
 
     rgw01.cookied-mcp-pike-dvr-ceph.local:
       reclass_storage_name: ceph_rgw_node01
@@ -156,8 +148,6 @@
           role: single_dhcp
         ens4:
           role: single_ctl
-        ens5:
-          role: single_storage_dhcp
 
     rgw02.cookied-mcp-pike-dvr-ceph.local:
       reclass_storage_name: ceph_rgw_node02
@@ -169,8 +159,6 @@
           role: single_dhcp
         ens4:
           role: single_ctl
-        ens5:
-          role: single_storage_dhcp
 
     rgw03.cookied-mcp-pike-dvr-ceph.local:
       reclass_storage_name: ceph_rgw_node03
@@ -181,6 +169,4 @@
         ens3:
           role: single_dhcp
         ens4:
-          role: single_ctl
-        ens5:
-          role: single_storage_dhcp
\ No newline at end of file
+          role: single_ctl
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-dvr-ceph.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-dvr-ceph.yaml
index 35dc2df..988f469 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-dvr-ceph.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-dvr-ceph.yaml
@@ -43,6 +43,14 @@
   retry: {count: 1, delay: 5}
   skip_fail: false
 
+- description: Temporary workaround !! Fix or debug
+  cmd: |
+    sed -i 's/pg_num: 128/pg_num: 4/g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/ceph/setup.yml;
+    sed -i 's/pgp_num: 128/pgp_num: 4/g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/ceph/setup.yml;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: true
+
 {{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}