support for multi osds per device / osd 0 weight initially
PROD-16922
PROD-17053
PROD-16923
Change-Id: I2e6e40767675a12ad0a596f4607226b8ab36d9ea
diff --git a/README.rst b/README.rst
index 9a10052..fd9a3be 100644
--- a/README.rst
+++ b/README.rst
@@ -314,22 +314,44 @@
- dev: /dev/sdm
enabled: false
journal: /dev/ssd
+ journal_partition: 5
+ data_partition: 6
+ lockbox_partition: 7
+ data_partition_size: 12000 (MB)
class: bestssd
- weight: 1.5
+ weight: 1.666
dmcrypt: true
+ journal_dmcrypt: false
+ - dev: /dev/sdf
+ journal: /dev/ssd
+ journal_dmcrypt: true
+ class: bestssd
+ weight: 1.666
- dev: /dev/sdl
journal: /dev/ssd
class: bestssd
- weight: 1.5
+ weight: 1.666
bluestore:
disks:
- dev: /dev/sdb
+ - dev: /dev/sdf
+ block_db: /dev/ssd
+ block_wal: /dev/ssd
+ block_db_dmcrypt: true
+ block_wal_dmcrypt: true
- dev: /dev/sdc
block_db: /dev/ssd
block_wal: /dev/ssd
+ data_partition: 1
+ block_partition: 2
+ lockbox_partition: 5
+ block_db_partition: 3
+ block_wal_partition: 4
class: ssd
weight: 1.666
dmcrypt: true
+ block_db_dmcrypt: false
+ block_wal_dmcrypt: false
- dev: /dev/sdd
enabled: false
@@ -585,6 +607,17 @@
- type: host
name: osd001
+Add OSDs with specific weight
+-----------------------------
+
+Add OSD device(s) with initial weight set specifically to certain value.
+
+.. code-block:: yaml
+
+ ceph:
+ osd:
+ crush_initial_weight: 0
+
Apply CRUSH map
---------------
diff --git a/ceph/files/jewel/ceph.conf.Debian b/ceph/files/jewel/ceph.conf.Debian
index 9fe3a9d..1e4d5ea 100644
--- a/ceph/files/jewel/ceph.conf.Debian
+++ b/ceph/files/jewel/ceph.conf.Debian
@@ -82,6 +82,10 @@
crush location = {% for crush in osd.crush %}{{ crush.type }}={{ crush.name }}{% if not loop.last %} {% endif %}{% endfor %}
{%- endif %}
+{%- if osd.crush_initial_weight is defined %}
+osd crush initial weight = {{ osd.crush_initial_weight }}
+{%- endif %}
+
osd crush update on start = {{ osd.get('crush_update', 'true') }}
{%- if pillar.ceph.osd.journal_size is defined %}
diff --git a/ceph/files/kraken/ceph.conf.Debian b/ceph/files/kraken/ceph.conf.Debian
index 9fe3a9d..1e4d5ea 100644
--- a/ceph/files/kraken/ceph.conf.Debian
+++ b/ceph/files/kraken/ceph.conf.Debian
@@ -82,6 +82,10 @@
crush location = {% for crush in osd.crush %}{{ crush.type }}={{ crush.name }}{% if not loop.last %} {% endif %}{% endfor %}
{%- endif %}
+{%- if osd.crush_initial_weight is defined %}
+osd crush initial weight = {{ osd.crush_initial_weight }}
+{%- endif %}
+
osd crush update on start = {{ osd.get('crush_update', 'true') }}
{%- if pillar.ceph.osd.journal_size is defined %}
diff --git a/ceph/files/luminous/ceph.conf.Debian b/ceph/files/luminous/ceph.conf.Debian
index 9fe3a9d..1e4d5ea 100644
--- a/ceph/files/luminous/ceph.conf.Debian
+++ b/ceph/files/luminous/ceph.conf.Debian
@@ -82,6 +82,10 @@
crush location = {% for crush in osd.crush %}{{ crush.type }}={{ crush.name }}{% if not loop.last %} {% endif %}{% endfor %}
{%- endif %}
+{%- if osd.crush_initial_weight is defined %}
+osd crush initial weight = {{ osd.crush_initial_weight }}
+{%- endif %}
+
osd crush update on start = {{ osd.get('crush_update', 'true') }}
{%- if pillar.ceph.osd.journal_size is defined %}
diff --git a/ceph/osd/custom.sls b/ceph/osd/custom.sls
index 812dfc1..fb504a1 100644
--- a/ceph/osd/custom.sls
+++ b/ceph/osd/custom.sls
@@ -35,7 +35,7 @@
update_weight_disk_{{ dev }}:
cmd.run:
- name: "ceph osd crush reweight osd.{{ disk_id }} {{ disk.weight }}"
- - unless: "ceph osd tree | awk '{print $3,$4}' | grep -w osd.{{ disk_id }} | grep {{ disk.weight }}"
+ - unless: "ceph osd tree | awk '{print $2,$3,$4}' | grep -w osd.{{ disk_id }} | grep {{ disk.weight }}"
{%- if grains.get('noservices') %}
- onlyif: /bin/false
{%- endif %}
diff --git a/ceph/osd/setup.sls b/ceph/osd/setup.sls
index cb772c8..de58cf3 100644
--- a/ceph/osd/setup.sls
+++ b/ceph/osd/setup.sls
@@ -13,6 +13,8 @@
{% set ceph_version = pillar.ceph.common.version %}
+{%- if osd.backend is defined %}
+
{%- for backend_name, backend in osd.backend.iteritems() %}
{%- for disk in backend.disks %}
@@ -21,7 +23,12 @@
{% set dev = disk.dev %}
-zap_disk_{{ dev }}:
+# for uniqueness
+{% set dev_device = dev + disk.get('data_partition', 1)|string %}
+
+#{{ dev }}{{ disk.get('data_partition', 1) }}
+
+zap_disk_{{ dev_device }}:
cmd.run:
- name: "ceph-disk zap {{ dev }}"
- unless: "ceph-disk list | grep {{ dev }} | grep ceph"
@@ -34,14 +41,14 @@
{%- if disk.journal is defined %}
-zap_disk_journal_{{ disk.journal }}_for_{{ dev }}:
+zap_disk_journal_{{ disk.journal }}_for_{{ dev_device }}:
cmd.run:
- name: "ceph-disk zap {{ disk.journal }}"
- unless: "ceph-disk list | grep {{ disk.journal }} | grep ceph"
- require:
- pkg: ceph_osd_packages
- file: /etc/ceph/ceph.conf
- - cmd: zap_disk_{{ dev }}
+ - cmd: zap_disk_{{ dev_device }}
{%- if grains.get('noservices') %}
- onlyif: /bin/false
{%- endif %}
@@ -50,14 +57,14 @@
{%- if disk.block_db is defined %}
-zap_disk_blockdb_{{ disk.block_db }}_for_{{ dev }}:
+zap_disk_blockdb_{{ disk.block_db }}_for_{{ dev_device }}:
cmd.run:
- name: "ceph-disk zap {{ disk.block_db }}"
- unless: "ceph-disk list | grep {{ disk.block_db }} | grep ceph"
- require:
- pkg: ceph_osd_packages
- file: /etc/ceph/ceph.conf
- - cmd: zap_disk_{{ dev }}
+ - cmd: zap_disk_{{ dev_device }}
{%- if grains.get('noservices') %}
- onlyif: /bin/false
{%- endif %}
@@ -66,14 +73,14 @@
{%- if disk.block_wal is defined %}
-zap_disk_blockwal_{{ disk.block_wal }}_for_{{ dev }}:
+zap_disk_blockwal_{{ disk.block_wal }}_for_{{ dev_device }}:
cmd.run:
- name: "ceph-disk zap {{ disk.block_wal }}"
- unless: "ceph-disk list | grep {{ disk.block_wal }} | grep ceph"
- require:
- pkg: ceph_osd_packages
- file: /etc/ceph/ceph.conf
- - cmd: zap_disk_{{ dev }}
+ - cmd: zap_disk_{{ dev_device }}
{%- if grains.get('noservices') %}
- onlyif: /bin/false
{%- endif %}
@@ -82,68 +89,112 @@
{%- set cmd = [] %}
{%- if disk.get('dmcrypt', False) %}
-{%- do cmd.append('--dmcrypt') %}
-{%- do cmd.append('--dmcrypt-key-dir ' + disk.get('dmcrypt_key_dir', '/etc/ceph/dmcrypt-keys')) %}
+ {%- do cmd.append('--dmcrypt') %}
+ {%- do cmd.append('--dmcrypt-key-dir ' + disk.get('dmcrypt_key_dir', '/etc/ceph/dmcrypt-keys')) %}
+{%- endif %}
+{%- if disk.lockbox_partition is defined %}
+ {%- do cmd.append('--lockbox-partition-number ' + disk.lockbox_partition|string) %}
{%- endif %}
{%- do cmd.append('--prepare-key /etc/ceph/ceph.client.bootstrap-osd.keyring') %}
+{%- if disk.data_partition is defined %}
+ {%- do cmd.append('--data-partition-number ' + disk.data_partition|string) %}
+{%- endif %}
+{%- if disk.data_partition_size is defined %}
+ {%- do cmd.append('--data-partition-size ' + disk.data_partition_size|string) %}
+{%- endif %}
{%- if backend_name == 'bluestore' %}
-{%- do cmd.append('--bluestore') %}
-{%- if disk.block_db is defined %}
-{%- do cmd.append('--block.db ' + disk.block_db) %}
-{%- endif %}
-{%- if disk.block_wal is defined %}
-{%- do cmd.append('--block.wal ' + disk.block_wal) %}
-{%- endif %}
-{%- do cmd.append(dev) %}
+ {%- do cmd.append('--bluestore') %}
+ {%- if disk.block_partition is defined %}
+ {%- do cmd.append('--block-partition-number ' + disk.block_partition|string) %}
+ {%- endif %}
+ {%- if disk.block_db is defined %}
+ {%- if disk.block_db_dmcrypt is defined and not disk.block_db_dmcrypt %}
+ {%- do cmd.append('--block-db-non-dmcrypt') %}
+ {%- elif disk.get('block_db_dmcrypt', False) %}
+ {%- do cmd.append('--block-db-dmcrypt') %}
+ {%- endif %}
+ {%- if disk.block_db_partition is defined %}
+ {%- do cmd.append('--block-db-partition-number ' + disk.block_db_partition|string) %}
+ {%- endif %}
+ {%- do cmd.append('--block.db ' + disk.block_db) %}
+ {%- endif %}
+ {%- if disk.block_wal is defined %}
+ {%- if disk.block_wal_dmcrypt is defined and not disk.block_wal_dmcrypt %}
+ {%- do cmd.append('--block-wal-non-dmcrypt') %}
+ {%- elif disk.get('block_wal_dmcrypt', False) %}
+ {%- do cmd.append('--block-wal-dmcrypt') %}
+ {%- endif %}
+ {%- if disk.block_wal_partition is defined %}
+ {%- do cmd.append('--block-wal-partition-number ' + disk.block_wal_partition|string) %}
+ {%- endif %}
+ {%- do cmd.append('--block.wal ' + disk.block_wal) %}
+ {%- endif %}
+ {%- do cmd.append(dev) %}
{%- elif backend_name == 'filestore' and ceph_version not in ['kraken', 'jewel'] %}
-{%- do cmd.append('--filestore') %}
-{%- do cmd.append(dev) %}
-{%- if disk.journal is defined %}
-{%- do cmd.append(disk.journal) %}
-{%- endif %}
+ {%- if disk.journal_dmcrypt is defined and not disk.journal_dmcrypt %}
+ {%- do cmd.append('--journal-non-dmcrypt') %}
+ {%- elif disk.get('journal_dmcrypt', False) %}
+ {%- do cmd.append('--journal-dmcrypt') %}
+ {%- endif %}
+ {%- if disk.journal_partition is defined %}
+ {%- do cmd.append('--journal-partition-number ' + disk.journal_partition|string) %}
+ {%- endif %}
+ {%- do cmd.append('--filestore') %}
+ {%- do cmd.append(dev) %}
+ {%- if disk.journal is defined %}
+ {%- do cmd.append(disk.journal) %}
+ {%- endif %}
{%- elif backend_name == 'filestore' %}
-{%- do cmd.append(dev) %}
-{%- if disk.journal is defined %}
-{%- do cmd.append(disk.journal) %}
-{%- endif %}
+ {%- if disk.journal_dmcrypt is defined and not disk.journal_dmcrypt %}
+ {%- do cmd.append('--journal-non-dmcrypt') %}
+ {%- elif disk.get('journal_dmcrypt', False) %}
+ {%- do cmd.append('--journal-dmcrypt') %}
+ {%- endif %}
+ {%- if disk.journal_partition is defined %}
+ {%- do cmd.append('--journal-partition-number ' + disk.journal_partition|string) %}
+ {%- endif %}
+ {%- do cmd.append(dev) %}
+ {%- if disk.journal is defined %}
+ {%- do cmd.append(disk.journal) %}
+ {%- endif %}
{%- endif %}
-prepare_disk_{{ dev }}:
+prepare_disk_{{ dev_device }}:
cmd.run:
- name: "yes | ceph-disk prepare {{ cmd|join(' ') }}"
- - unless: "ceph-disk list | grep {{ dev }} | grep ceph"
+ - unless: "ceph-disk list | grep {{ dev_device }} | grep ceph"
- require:
- - cmd: zap_disk_{{ dev }}
+ - cmd: zap_disk_{{ dev_device }}
- pkg: ceph_osd_packages
- file: /etc/ceph/ceph.conf
{%- if grains.get('noservices') %}
- onlyif: /bin/false
{%- endif %}
-reload_partition_table_{{ dev }}:
+reload_partition_table_{{ dev_device }}:
cmd.run:
- name: "partprobe"
- - unless: "lsblk -p | grep {{ dev }} -A1 | grep -v lockbox | grep ceph | grep osd"
+ - unless: "lsblk -p | grep {{ dev_device }} -A1 | grep -v lockbox | grep ceph | grep osd"
- require:
- - cmd: prepare_disk_{{ dev }}
- - cmd: zap_disk_{{ dev }}
+ - cmd: prepare_disk_{{ dev_device }}
+ - cmd: zap_disk_{{ dev_device }}
- pkg: ceph_osd_packages
- file: /etc/ceph/ceph.conf
{%- if grains.get('noservices') %}
- onlyif: /bin/false
{%- endif %}
-activate_disk_{{ dev }}:
+activate_disk_{{ dev_device }}:
cmd.run:
{%- if disk.get('dmcrypt', False) %}
- - name: "ceph-disk activate --dmcrypt --activate-key /etc/ceph/ceph.client.bootstrap-osd.keyring {{ dev }}1"
+ - name: "ceph-disk activate --dmcrypt --activate-key /etc/ceph/ceph.client.bootstrap-osd.keyring {{ dev_device }}"
{%- else %}
- - name: "ceph-disk activate --activate-key /etc/ceph/ceph.client.bootstrap-osd.keyring {{ dev }}1"
+ - name: "ceph-disk activate --activate-key /etc/ceph/ceph.client.bootstrap-osd.keyring {{ dev_device }}"
{%- endif %}
- - unless: "lsblk -p | grep {{ dev }} -A1 | grep -v lockbox | grep ceph | grep osd"
+ - unless: "lsblk -p | grep {{ dev_device }} -A1 | grep -v lockbox | grep ceph | grep osd"
- require:
- - cmd: prepare_disk_{{ dev }}
- - cmd: zap_disk_{{ dev }}
+ - cmd: prepare_disk_{{ dev_device }}
+ - cmd: zap_disk_{{ dev_device }}
- pkg: ceph_osd_packages
- file: /etc/ceph/ceph.conf
{%- if grains.get('noservices') %}
@@ -156,6 +207,8 @@
{%- endfor %}
+{%- endif %}
+
osd_services_global:
service.running:
- enable: true
diff --git a/tests/pillar/ceph_osd_single.sls b/tests/pillar/ceph_osd_single.sls
index ce1c36a..5e1d1d2 100644
--- a/tests/pillar/ceph_osd_single.sls
+++ b/tests/pillar/ceph_osd_single.sls
@@ -35,21 +35,32 @@
- dev: /dev/sdm
enabled: false
journal: /dev/sdn
- fs_type: xfs
+ journal_dmcrypt: true
class: bestssd
weight: 1.5
- dev: /dev/sdl
- fs_type: xfs
class: bestssd
weight: 1.5
dmcrypt: true
- dev: /dev/sdo
journal: /dev/sdo
- fs_type: xfs
+ journal_partition: 5
+ data_partition: 9
+ data_partition_size: 12000
class: bestssd
weight: 1.5
bluestore:
disks:
- dev: /dev/sdb
- enabled: false
+ block_db: /dev/sdf
+ block_wal: /dev/sdf
+ enabled: true
+ block_partition: 3
+ block_db_partition: 3
+ block_wal_partition: 4
+ data_partition: 2
- dev: /dev/sdc
+ block_db: /dev/sdf
+ block_wal: /dev/sdf
+ dmcrypt: true
+ block_db_dmcrypt: false