Merge "add support for automatic partitioning disks used as OSDs"
diff --git a/ceph/osd/setup.sls b/ceph/osd/setup.sls
index ab03da1..e028575 100644
--- a/ceph/osd/setup.sls
+++ b/ceph/osd/setup.sls
@@ -1,236 +1,11 @@
 {%- from "ceph/map.jinja" import osd, common with context %}
 
-ceph_osd_packages:
-  pkg.installed:
-  - names: {{ osd.pkgs }}
-
-/etc/ceph/{{ common.get('cluster_name', 'ceph') }}.conf:
-  file.managed:
-  - source: salt://ceph/files/{{ common.version }}/ceph.conf.{{ grains.os_family }}
-  - template: jinja
-  - require:
-    - pkg: ceph_osd_packages
-
-{% set ceph_version = pillar.ceph.common.version %}
-
-{%- if osd.backend is defined %}
-
-{%- for backend_name, backend in osd.backend.iteritems() %}
-
-{%- for disk in backend.disks %}
-
-{%- if disk.get('enabled', True) %}
-
-{% set dev = '`readlink -f ' + disk.dev + '`' %}
-
-# for uniqueness
-{% set dev_device = dev + disk.get('data_partition_prefix', '') + disk.get('data_partition', 1)|string %}
-
-#{{ dev }}{{ disk.get('data_partition', 1) }}
-
-zap_disk_{{ dev_device }}:
-  cmd.run:
-  - name: "ceph-disk zap {{ dev }}"
-  - unless: "ceph-disk list | grep {{ dev }} | grep -e 'ceph' -e 'mounted'"
-  - require:
-    - pkg: ceph_osd_packages
-    - file: /etc/ceph/{{ common.get('cluster_name', 'ceph') }}.conf
-  {%- if grains.get('noservices') %}
-  - onlyif: /bin/false
-  {%- endif %}
-
-{%- if disk.journal is defined %}
-
-zap_disk_journal_{{ disk.journal }}_for_{{ dev_device }}:
-  cmd.run:
-  - name: "ceph-disk zap {{ disk.journal }}"
-  - unless: "ceph-disk list | grep {{ disk.journal }} | grep -e 'ceph' -e 'mounted'"
-  - require:
-    - pkg: ceph_osd_packages
-    - file: /etc/ceph/{{ common.get('cluster_name', 'ceph') }}.conf
-    - cmd: zap_disk_{{ dev_device }}
-  {%- if grains.get('noservices') %}
-  - onlyif: /bin/false
-  {%- endif %}
-
+include:
+{%- if osd.zap_disks is defined and osd.zap_disks == true %}
+- ceph.osd.setup.partitioning
 {%- endif %}
-
-{%- if disk.block_db is defined %}
-
-zap_disk_blockdb_{{ disk.block_db }}_for_{{ dev_device }}:
-  cmd.run:
-  - name: "ceph-disk zap {{ disk.block_db }}"
-  - unless: "ceph-disk list | grep {{ disk.block_db }} | grep -e 'ceph' -e 'mounted'"
-  - require:
-    - pkg: ceph_osd_packages
-    - file: /etc/ceph/{{ common.get('cluster_name', 'ceph') }}.conf
-    - cmd: zap_disk_{{ dev_device }}
-  {%- if grains.get('noservices') %}
-  - onlyif: /bin/false
-  {%- endif %}
-
-{%- endif %}
-
-{%- if disk.block_wal is defined %}
-
-zap_disk_blockwal_{{ disk.block_wal }}_for_{{ dev_device }}:
-  cmd.run:
-  - name: "ceph-disk zap {{ disk.block_wal }}"
-  - unless: "ceph-disk list | grep {{ disk.block_wal }} | grep -e 'ceph' -e 'mounted'"
-  - require:
-    - pkg: ceph_osd_packages
-    - file: /etc/ceph/{{ common.get('cluster_name', 'ceph') }}.conf
-    - cmd: zap_disk_{{ dev_device }}
-  {%- if grains.get('noservices') %}
-  - onlyif: /bin/false
-  {%- endif %}
-
-{%- endif %}
-
-{%- set cmd = [] %}
-{%- do cmd.append('--cluster ' + common.get('cluster_name', 'ceph')) %}
-{%- do cmd.append('--cluster-uuid ' + common.fsid) %}
-{%- if disk.get('dmcrypt', False) %}
-  {%- do cmd.append('--dmcrypt') %}
-  {%- do cmd.append('--dmcrypt-key-dir ' + disk.get('dmcrypt_key_dir', '/etc/ceph/dmcrypt-keys')) %}
-{%- endif %}
-{%- if disk.lockbox_partition is defined %}
-  {%- do cmd.append('--lockbox-partition-number ' + disk.lockbox_partition|string) %}
-{%- endif %}
-{%- do cmd.append("--prepare-key /etc/ceph/" + common.get('cluster_name', 'ceph') + ".client.bootstrap-osd.keyring") %}
-{%- if disk.data_partition is defined %}
-  {%- do cmd.append('--data-partition-number ' + disk.data_partition|string) %}
-{%- endif %}
-{%- if disk.data_partition_size is defined %}
-  {%- do cmd.append('--data-partition-size ' + disk.data_partition_size|string) %}
-{%- endif %}
-{%- if backend_name == 'bluestore' %}
-  {%- do cmd.append('--bluestore') %}
-  {%- if disk.block_partition is defined %}
-    {%- do cmd.append('--block-partition-number ' + disk.block_partition|string) %}
-  {%- endif %}
-  {%- if disk.block_db is defined %}
-    {%- if disk.block_db_dmcrypt is defined and not disk.block_db_dmcrypt %}
-      {%- do cmd.append('--block-db-non-dmcrypt') %}
-    {%- elif disk.get('block_db_dmcrypt', False) %}
-      {%- do cmd.append('--block-db-dmcrypt') %}
-    {%- endif %}
-    {%- if disk.block_db_partition is defined %}
-      {%- do cmd.append('--block-db-partition-number ' + disk.block_db_partition|string) %}
-    {%- endif %}
-  {%- do cmd.append('--block.db ' + disk.block_db) %}
-  {%- endif %}
-  {%- if disk.block_wal is defined %}
-    {%- if disk.block_wal_dmcrypt is defined and not disk.block_wal_dmcrypt %}
-      {%- do cmd.append('--block-wal-non-dmcrypt') %}
-    {%- elif disk.get('block_wal_dmcrypt', False) %}
-      {%- do cmd.append('--block-wal-dmcrypt') %}
-    {%- endif %}
-    {%- if disk.block_wal_partition is defined %}
-      {%- do cmd.append('--block-wal-partition-number ' + disk.block_wal_partition|string) %}
-    {%- endif %}
-    {%- do cmd.append('--block.wal ' + disk.block_wal) %}
-  {%- endif %}
-  {%- do cmd.append(dev) %}
-{%- elif backend_name == 'filestore' and ceph_version not in ['kraken', 'jewel'] %}
-  {%- if disk.journal_dmcrypt is defined and not disk.journal_dmcrypt %}
-    {%- do cmd.append('--journal-non-dmcrypt') %}
-  {%- elif disk.get('journal_dmcrypt', False) %}
-    {%- do cmd.append('--journal-dmcrypt') %}
-  {%- endif %}
-  {%- if disk.journal_partition is defined %}
-    {%- do cmd.append('--journal-partition-number ' + disk.journal_partition|string) %}
-  {%- endif %}
-  {%- do cmd.append('--filestore') %}
-  {%- do cmd.append(dev) %}
-  {%- if disk.journal is defined %}
-    {%- do cmd.append(disk.journal) %}
-  {%- endif %}
-{%- elif backend_name == 'filestore' %}
-  {%- if disk.journal_dmcrypt is defined and not disk.journal_dmcrypt %}
-    {%- do cmd.append('--journal-non-dmcrypt') %}
-  {%- elif disk.get('journal_dmcrypt', False) %}
-    {%- do cmd.append('--journal-dmcrypt') %}
-  {%- endif %}
-  {%- if disk.journal_partition is defined %}
-    {%- do cmd.append('--journal-partition-number ' + disk.journal_partition|string) %}
-  {%- endif %}
-  {%- do cmd.append(dev) %}
-  {%- if disk.journal is defined %}
-    {%- do cmd.append(disk.journal) %}
-  {%- endif %}
-{%- endif %}
-
-prepare_disk_{{ dev_device }}:
-  cmd.run:
-  - name: "yes | ceph-disk prepare {{ cmd|join(' ') }}"
-  - unless: "ceph-disk list | grep {{ dev_device }} | grep -e 'ceph' -e 'mounted'"
-  - require:
-    - cmd: zap_disk_{{ dev_device }}
-    - pkg: ceph_osd_packages
-    - file: /etc/ceph/{{ common.get('cluster_name', 'ceph') }}.conf
-  {%- if grains.get('noservices') %}
-  - onlyif: /bin/false
-  {%- endif %}
-
-reload_partition_table_{{ dev_device }}:
-  cmd.run:
-  - name: "partprobe"
-  - unless: "lsblk -p | grep {{ dev_device }} -A1 | grep -v lockbox | grep {{ dev_device }} | grep ceph | grep osd"
-  - require:
-    - cmd: prepare_disk_{{ dev_device }}
-    - cmd: zap_disk_{{ dev_device }}
-    - pkg: ceph_osd_packages
-    - file: /etc/ceph/{{ common.get('cluster_name', 'ceph') }}.conf
-  {%- if grains.get('noservices') %}
-  - onlyif: /bin/false
-  {%- else %}
-  - onlyif: ceph-disk list | grep {{ dev_device }} | grep ceph
-  {%- endif %}
-
-activate_disk_{{ dev_device }}:
-  cmd.run:
-{%- if disk.get('dmcrypt', False) %}
-  - name: "ceph-disk activate --dmcrypt --activate-key /etc/ceph/{{ common.get('cluster_name', 'ceph') }}.client.bootstrap-osd.keyring {{ dev_device }}"
+{%- if osd.lvm_enabled is defined and osd.lvm_enabled == true %}
+- ceph.osd.setup.lvm
 {%- else %}
-  - name: "ceph-disk activate --activate-key /etc/ceph/{{ common.get('cluster_name', 'ceph') }}.client.bootstrap-osd.keyring {{ dev_device }}"
+- ceph.osd.setup.disk
 {%- endif %}
-  - unless: "lsblk -p | grep {{ dev_device }} -A1 | grep -v lockbox | grep {{ dev_device }} | grep ceph | grep osd"
-  - require:
-    - cmd: prepare_disk_{{ dev_device }}
-    - cmd: zap_disk_{{ dev_device }}
-    - pkg: ceph_osd_packages
-    - file: /etc/ceph/{{ common.get('cluster_name', 'ceph') }}.conf
-  {%- if grains.get('noservices') %}
-  - onlyif: /bin/false
-  {%- else %}
-  - onlyif: ceph-disk list | grep {{ dev_device }} | grep ceph
-  {%- endif %}
-
-{%- endif %}
-
-{%- endfor %}
-
-{%- endfor %}
-
-{%- endif %}
-
-osd_services_global:
-  service.running:
-  - enable: true
-  - names: ['ceph-osd.target']
-  - watch:
-    - file: /etc/ceph/{{ common.get('cluster_name', 'ceph') }}.conf
-  {%- if grains.get('noservices') %}
-  - onlyif: /bin/false
-  {%- endif %}
-
-osd_services:
-  service.running:
-  - enable: true
-  - names: ['ceph.target']
-  - watch:
-    - file: /etc/ceph/{{ common.get('cluster_name', 'ceph') }}.conf
-  {%- if grains.get('noservices') %}
-  - onlyif: /bin/false
-  {%- endif %}
diff --git a/ceph/osd/setup/disk.sls b/ceph/osd/setup/disk.sls
new file mode 100644
index 0000000..5b1a832
--- /dev/null
+++ b/ceph/osd/setup/disk.sls
@@ -0,0 +1,236 @@
+{%- from "ceph/map.jinja" import osd, common with context %}
+
+ceph_osd_packages:
+  pkg.installed:
+  - names: {{ osd.pkgs }}
+
+/etc/ceph/{{ common.get('cluster_name', 'ceph') }}.conf:
+  file.managed:
+  - source: salt://ceph/files/{{ common.version }}/ceph.conf.{{ grains.os_family }}
+  - template: jinja
+  - require:
+    - pkg: ceph_osd_packages
+
+{% set ceph_version = pillar.ceph.common.version %}
+
+{%- if osd.backend is defined %}
+
+{%- for backend_name, backend in osd.backend.iteritems() %}
+
+{%- for disk in backend.disks %}
+
+{%- if disk.get('enabled', True) %}
+
+{% set dev = disk.dev %}
+
+# for uniqueness
+{% set dev_device = dev + disk.get('data_partition_prefix', '') + disk.get('data_partition', 1)|string %}
+
+#{{ dev }}{{ disk.get('data_partition', 1) }}
+
+zap_disk_{{ dev_device }}:
+  cmd.run:
+  - name: "ceph-disk zap {{ dev }}"
+  - unless: "ceph-disk list | grep {{ dev }} | grep -e 'ceph' -e 'mounted'"
+  - require:
+    - pkg: ceph_osd_packages
+    - file: /etc/ceph/{{ common.get('cluster_name', 'ceph') }}.conf
+  {%- if grains.get('noservices') %}
+  - onlyif: /bin/false
+  {%- endif %}
+
+{%- if disk.journal is defined %}
+
+zap_disk_journal_{{ disk.journal }}_for_{{ dev_device }}:
+  cmd.run:
+  - name: "ceph-disk zap {{ disk.journal }}"
+  - unless: "ceph-disk list | grep {{ disk.journal }} | grep -e 'ceph' -e 'mounted'"
+  - require:
+    - pkg: ceph_osd_packages
+    - file: /etc/ceph/{{ common.get('cluster_name', 'ceph') }}.conf
+    - cmd: zap_disk_{{ dev_device }}
+  {%- if grains.get('noservices') %}
+  - onlyif: /bin/false
+  {%- endif %}
+
+{%- endif %}
+
+{%- if disk.block_db is defined %}
+
+zap_disk_blockdb_{{ disk.block_db }}_for_{{ dev_device }}:
+  cmd.run:
+  - name: "ceph-disk zap {{ disk.block_db }}"
+  - unless: "ceph-disk list | grep {{ disk.block_db }} | grep -e 'ceph' -e 'mounted'"
+  - require:
+    - pkg: ceph_osd_packages
+    - file: /etc/ceph/{{ common.get('cluster_name', 'ceph') }}.conf
+    - cmd: zap_disk_{{ dev_device }}
+  {%- if grains.get('noservices') %}
+  - onlyif: /bin/false
+  {%- endif %}
+
+{%- endif %}
+
+{%- if disk.block_wal is defined %}
+
+zap_disk_blockwal_{{ disk.block_wal }}_for_{{ dev_device }}:
+  cmd.run:
+  - name: "ceph-disk zap {{ disk.block_wal }}"
+  - unless: "ceph-disk list | grep {{ disk.block_wal }} | grep -e 'ceph' -e 'mounted'"
+  - require:
+    - pkg: ceph_osd_packages
+    - file: /etc/ceph/{{ common.get('cluster_name', 'ceph') }}.conf
+    - cmd: zap_disk_{{ dev_device }}
+  {%- if grains.get('noservices') %}
+  - onlyif: /bin/false
+  {%- endif %}
+
+{%- endif %}
+
+{%- set cmd = [] %}
+{%- do cmd.append('--cluster ' + common.get('cluster_name', 'ceph')) %}
+{%- do cmd.append('--cluster-uuid ' + common.fsid) %}
+{%- if disk.get('dmcrypt', False) %}
+  {%- do cmd.append('--dmcrypt') %}
+  {%- do cmd.append('--dmcrypt-key-dir ' + disk.get('dmcrypt_key_dir', '/etc/ceph/dmcrypt-keys')) %}
+{%- endif %}
+{%- if disk.lockbox_partition is defined %}
+  {%- do cmd.append('--lockbox-partition-number ' + disk.lockbox_partition|string) %}
+{%- endif %}
+{%- do cmd.append("--prepare-key /etc/ceph/" + common.get('cluster_name', 'ceph') + ".client.bootstrap-osd.keyring") %}
+{%- if disk.data_partition is defined %}
+  {%- do cmd.append('--data-partition-number ' + disk.data_partition|string) %}
+{%- endif %}
+{%- if disk.data_partition_size is defined %}
+  {%- do cmd.append('--data-partition-size ' + disk.data_partition_size|string) %}
+{%- endif %}
+{%- if backend_name == 'bluestore' %}
+  {%- do cmd.append('--bluestore') %}
+  {%- if disk.block_partition is defined %}
+    {%- do cmd.append('--block-partition-number ' + disk.block_partition|string) %}
+  {%- endif %}
+  {%- if disk.block_db is defined %}
+    {%- if disk.block_db_dmcrypt is defined and not disk.block_db_dmcrypt %}
+      {%- do cmd.append('--block-db-non-dmcrypt') %}
+    {%- elif disk.get('block_db_dmcrypt', False) %}
+      {%- do cmd.append('--block-db-dmcrypt') %}
+    {%- endif %}
+    {%- if disk.block_db_partition is defined %}
+      {%- do cmd.append('--block-db-partition-number ' + disk.block_db_partition|string) %}
+    {%- endif %}
+  {%- do cmd.append('--block.db ' + disk.block_db) %}
+  {%- endif %}
+  {%- if disk.block_wal is defined %}
+    {%- if disk.block_wal_dmcrypt is defined and not disk.block_wal_dmcrypt %}
+      {%- do cmd.append('--block-wal-non-dmcrypt') %}
+    {%- elif disk.get('block_wal_dmcrypt', False) %}
+      {%- do cmd.append('--block-wal-dmcrypt') %}
+    {%- endif %}
+    {%- if disk.block_wal_partition is defined %}
+      {%- do cmd.append('--block-wal-partition-number ' + disk.block_wal_partition|string) %}
+    {%- endif %}
+    {%- do cmd.append('--block.wal ' + disk.block_wal) %}
+  {%- endif %}
+  {%- do cmd.append(dev) %}
+{%- elif backend_name == 'filestore' and ceph_version not in ['kraken', 'jewel'] %}
+  {%- if disk.journal_dmcrypt is defined and not disk.journal_dmcrypt %}
+    {%- do cmd.append('--journal-non-dmcrypt') %}
+  {%- elif disk.get('journal_dmcrypt', False) %}
+    {%- do cmd.append('--journal-dmcrypt') %}
+  {%- endif %}
+  {%- if disk.journal_partition is defined %}
+    {%- do cmd.append('--journal-partition-number ' + disk.journal_partition|string) %}
+  {%- endif %}
+  {%- do cmd.append('--filestore') %}
+  {%- do cmd.append(dev) %}
+  {%- if disk.journal is defined %}
+    {%- do cmd.append(disk.journal) %}
+  {%- endif %}
+{%- elif backend_name == 'filestore' %}
+  {%- if disk.journal_dmcrypt is defined and not disk.journal_dmcrypt %}
+    {%- do cmd.append('--journal-non-dmcrypt') %}
+  {%- elif disk.get('journal_dmcrypt', False) %}
+    {%- do cmd.append('--journal-dmcrypt') %}
+  {%- endif %}
+  {%- if disk.journal_partition is defined %}
+    {%- do cmd.append('--journal-partition-number ' + disk.journal_partition|string) %}
+  {%- endif %}
+  {%- do cmd.append(dev) %}
+  {%- if disk.journal is defined %}
+    {%- do cmd.append(disk.journal) %}
+  {%- endif %}
+{%- endif %}
+
+prepare_disk_{{ dev_device }}:
+  cmd.run:
+  - name: "yes | ceph-disk prepare {{ cmd|join(' ') }}"
+  - unless: "ceph-disk list | grep {{ dev_device }} | grep -e 'ceph' -e 'mounted'"
+  - require:
+    - cmd: zap_disk_{{ dev_device }}
+    - pkg: ceph_osd_packages
+    - file: /etc/ceph/{{ common.get('cluster_name', 'ceph') }}.conf
+  {%- if grains.get('noservices') %}
+  - onlyif: /bin/false
+  {%- endif %}
+
+reload_partition_table_{{ dev_device }}:
+  cmd.run:
+  - name: "partprobe"
+  - unless: "lsblk -p | grep {{ dev_device }} -A1 | grep -v lockbox | grep {{ dev_device }} | grep ceph | grep osd"
+  - require:
+    - cmd: prepare_disk_{{ dev_device }}
+    - cmd: zap_disk_{{ dev_device }}
+    - pkg: ceph_osd_packages
+    - file: /etc/ceph/{{ common.get('cluster_name', 'ceph') }}.conf
+  {%- if grains.get('noservices') %}
+  - onlyif: /bin/false
+  {%- else %}
+  - onlyif: ceph-disk list | grep {{ dev_device }} | grep ceph
+  {%- endif %}
+
+activate_disk_{{ dev_device }}:
+  cmd.run:
+{%- if disk.get('dmcrypt', False) %}
+  - name: "ceph-disk activate --dmcrypt --activate-key /etc/ceph/{{ common.get('cluster_name', 'ceph') }}.client.bootstrap-osd.keyring {{ dev_device }}"
+{%- else %}
+  - name: "ceph-disk activate --activate-key /etc/ceph/{{ common.get('cluster_name', 'ceph') }}.client.bootstrap-osd.keyring {{ dev_device }}"
+{%- endif %}
+  - unless: "lsblk -p | grep {{ dev_device }} -A1 | grep -v lockbox | grep {{ dev_device }} | grep ceph | grep osd"
+  - require:
+    - cmd: prepare_disk_{{ dev_device }}
+    - cmd: zap_disk_{{ dev_device }}
+    - pkg: ceph_osd_packages
+    - file: /etc/ceph/{{ common.get('cluster_name', 'ceph') }}.conf
+  {%- if grains.get('noservices') %}
+  - onlyif: /bin/false
+  {%- else %}
+  - onlyif: ceph-disk list | grep {{ dev_device }} | grep ceph
+  {%- endif %}
+
+{%- endif %}
+
+{%- endfor %}
+
+{%- endfor %}
+
+{%- endif %}
+
+osd_services_global:
+  service.running:
+  - enable: true
+  - names: ['ceph-osd.target']
+  - watch:
+    - file: /etc/ceph/{{ common.get('cluster_name', 'ceph') }}.conf
+  {%- if grains.get('noservices') %}
+  - onlyif: /bin/false
+  {%- endif %}
+
+osd_services:
+  service.running:
+  - enable: true
+  - names: ['ceph.target']
+  - watch:
+    - file: /etc/ceph/{{ common.get('cluster_name', 'ceph') }}.conf
+  {%- if grains.get('noservices') %}
+  - onlyif: /bin/false
+  {%- endif %}
diff --git a/ceph/osd/setup/partitioning.sls b/ceph/osd/setup/partitioning.sls
new file mode 100644
index 0000000..16ced44
--- /dev/null
+++ b/ceph/osd/setup/partitioning.sls
@@ -0,0 +1,80 @@
+{%- from "ceph/map.jinja" import osd, common with context %}
+
+{%- set devs = [] %}
+{%- set dbs = [] %}
+{%- set wals = [] %}
+{%- for backend_name, backend in osd.backend.iteritems() %}
+{%- for disk in backend.disks %}
+{%- set dev = disk.dev %}
+
+{%- if disk.block_db is defined %}
+{%- set db = disk.block_db %}
+{%- do dbs.append(db) %}
+{%- endif %}
+{%- if disk.block_wal is defined %}
+{%- set wal = disk.block_wal %}
+{%- do wals.append(wal) %}
+{%- endif %}
+{%- do devs.append(dev) %}
+{%- endfor %}
+{%- endfor %}
+{%- set end_size = {} %}
+{%- set counter = {} %}
+
+{%- if dbs != [] %}
+{%- for db in dbs | unique %}
+{%- do end_size.update({db: 1048576}) %}
+{%- do counter.update({db: 1}) %}
+create_disk_label_{{ db }}:
+  module.run:
+  - name: partition.mklabel
+  - device: {{ db }}
+  - label_type: gpt
+  - unless: "fdisk -l {{ db }} | grep -i 'Disklabel type: gpt'"
+{%- endfor %}
+{%- for db in dbs %}
+create_partition_{{ db }}_{{ counter[db] }}:
+  module.run:
+  - name: partition.mkpart
+  - device: {{ db }}
+  - part_type: primary
+  - start: {{ end_size[db] }}B
+  - end: {{ end_size[db] + osd.bluestore_block_db_size }}B
+  - size: {{ osd.bluestore_block_db_size }}B
+  - unless: "blkid {{ db }}{{ counter[db] }} {{ db }}p{{ counter[db] }}"
+  - require:
+    - module: create_disk_label_{{ db }}
+
+{%- do counter.update({db: counter[db] + 1}) %}
+{%- do end_size.update({db: end_size[db] + osd.bluestore_block_db_size + 1048576}) %}
+{%- endfor %}
+{%- endif %}
+
+{%- if wals != [] %}
+{%- for wal in wals | unique %}
+{%- do end_size.update({wal: 1048576}) %}
+{%- do counter.update({wal: 1}) %}
+create_disk_label_{{ wal }}:
+  module.run:
+  - name: partition.mklabel
+  - device: {{ wal }}
+  - label_type: gpt
+  - unless: "fdisk -l {{ wal }} | grep -i 'Disklabel type: gpt'"
+{%- endfor %}
+{%- for wal in wals %}
+create_partition_{{ wal }}_{{ counter[wal] }}:
+  module.run:
+  - name: partition.mkpart
+  - device: {{ wal }}
+  - part_type: primary
+  - start: {{ end_size[wal] }}B
+  - end: {{ end_size[wal] + osd.bluestore_block_db_size }}B
+  - size: {{ osd.bluestore_block_wal_size }}B
+  - unless: "blkid {{ wal }}{{ counter[wal] }} {{ wal }}p{{ counter[wal] }}"
+  - require:
+    - module: create_disk_label_{{ wal }}
+
+{%- do counter.update({wal: counter[wal] + 1}) %}
+{%- do end_size.update({wal: end_size[wal] + osd.bluestore_block_wal_size + 1048576}) %}
+{%- endfor %}
+{% endif %}