ceph osd fixes
Change-Id: Ibcc58c08a2333fd2e929599a37f3d1c26bf4691e
diff --git a/README.rst b/README.rst
index d5e1c98..6a62baf 100644
--- a/README.rst
+++ b/README.rst
@@ -182,7 +182,7 @@
ceph:
common:
- version: kraken
+ version: luminous
config:
global:
param1: value1
@@ -208,6 +208,11 @@
mgr: "allow *"
mon: "allow *"
osd: "allow *"
+ bootstrap-osd:
+ key: BQBHPYhZv5mYDBAAvisaSzCTQkC5gywGUp/voA==
+ caps:
+ mon: "allow profile bootstrap-osd"
+
Optional definition for cluster and public networks. Cluster network is used
for replication. Public network for front-end communication.
@@ -216,7 +221,7 @@
ceph:
common:
- version: kraken
+ version: luminous
fsid: a619c5fc-c4ed-4f22-9ed2-66cf2feca23d
....
public_network: 10.0.0.0/24, 10.1.0.0/24
@@ -284,31 +289,35 @@
key: value
osd:
enabled: true
- host_id: 10
- copy_admin_key: true
- journal_type: raw
- dmcrypt: disable
- osd_scenario: raw_journal_devices
- fs_type: xfs
- disk:
- '00':
- rule: hdd
- dev: /dev/vdb2
- journal: /dev/vdb1
- class: besthdd
- weight: 1.5
- '01':
- rule: hdd
- dev: /dev/vdc2
- journal: /dev/vdc1
- class: besthdd
- weight: 1.5
- '02':
- rule: hdd
- dev: /dev/vdd2
- journal: /dev/vdd1
- class: besthdd
- weight: 1.5
+ ceph_host_id: '39'
+ journal_size: 20480
+ bluestore_block_db_size: 1073741824 (1G)
+ bluestore_block_wal_size: 1073741824 (1G)
+ bluestore_block_size: 807374182400 (800G)
+ backend:
+ filestore:
+ disks:
+ - dev: /dev/sdm
+ enabled: false
+ rule: hdd
+ journal: /dev/ssd
+ fs_type: xfs
+ class: bestssd
+ weight: 1.5
+ - dev: /dev/sdl
+ rule: hdd
+ journal: /dev/ssd
+ fs_type: xfs
+ class: bestssd
+ weight: 1.5
+ bluestore:
+ disks:
+ - dev: /dev/sdb
+ - dev: /dev/sdc
+ block_db: /dev/ssd
+ block_wal: /dev/ssd
+ - dev: /dev/sdd
+ enabled: false
Ceph client roles
diff --git a/ceph/files/crushmap b/ceph/files/crushmap
index 7b244d3..a1065c4 100644
--- a/ceph/files/crushmap
+++ b/ceph/files/crushmap
@@ -6,21 +6,23 @@
{%- set osds = {} -%}
{%- set weights = {} -%}
-{%- for node_name, node_grains in salt['mine.get']('*', 'grains.items').iteritems() -%}
- {%- if node_grains.ceph_osd_host_id is defined -%}
- {# load OSDs and compute weight#}
- {%- set node_weight = [] -%}
- {%- for osd_relative_id, osd in node_grains.ceph_osd_disk.iteritems() -%}
- {%- set osd_id = node_grains.ceph_osd_host_id ~ osd_relative_id -%}
- {%- do osd.update({'host': node_grains.nodename }) -%}
- {%- do osds.update({osd_id: osd}) -%}
- {%- do node_weight.append(osd.weight) -%}
- {%- endfor -%}
+# the following for loop must be changed
- {%- do hosts.update({node_grains.nodename: {'weight': node_weight|sum, 'parent': node_grains.ceph_crush_parent }}) -%}
-
- {%- endif -%}
-{%- endfor -%}
+#{%- for node_name, node_grains in salt['mine.get']('*', 'grains.items').iteritems() -%}
+# {%- if node_grains.ceph_osd_host_id is defined -%}
+# {# load OSDs and compute weight#}
+# {%- set node_weight = [] -%}
+# {%- for osd_relative_id, osd in node_grains.ceph_osd_disk.iteritems() -%}
+# {%- set osd_id = node_grains.ceph_osd_host_id ~ osd_relative_id -%}
+# {%- do osd.update({'host': node_grains.nodename }) -%}
+# {%- do osds.update({osd_id: osd}) -%}
+# {%- do node_weight.append(osd.weight) -%}
+# {%- endfor -%}
+#
+# {%- do hosts.update({node_grains.nodename: {'weight': node_weight|sum, 'parent': node_grains.ceph_crush_parent }}) -%}
+#
+# {%- endif -%}
+#{%- endfor -%}
{%- set _crush = setup.crush -%}
{%- set _buckets = [] %}
diff --git a/ceph/files/jewel/ceph.conf.Debian b/ceph/files/jewel/ceph.conf.Debian
index 721786c..05e625e 100644
--- a/ceph/files/jewel/ceph.conf.Debian
+++ b/ceph/files/jewel/ceph.conf.Debian
@@ -36,6 +36,18 @@
{%- endfor %}
+{%- if osd.bluestore_block_size is defined %}
+bluestore_block_size = {{ osd.bluestore_block_size }}
+{%- endif %}
+
+{%- if osd.bluestore_block_db_size is defined %}
+bluestore_block_db_size = {{ osd.bluestore_block_db_size }}
+{%- endif %}
+
+{%- if osd.bluestore_block_wal_size is defined %}
+bluestore_block_wal_size = {{ osd.bluestore_block_wal_size }}
+{%- endif %}
+
{%- if pillar.ceph.mon is defined %}
[mon]
@@ -59,18 +71,14 @@
{%- if pillar.ceph.osd is defined %}
[osd]
+{%- if pillar.ceph.osd.journal_size is defined %}
+osd journal size = {{ pillar.ceph.osd.journal_size }}
+{%- endif %}
{%- for key, value in common.get('config', {}).get('osd', {}).iteritems() %}
{{ key }} = {{ value }}
{%- endfor %}
-{%- for disk_id, disk in osd.disk.iteritems() %}
-{% set id = osd.host_id~disk_id %}
-[osd.{{ id }}]
-host = {{ grains.host }}
-osd journal = {{ disk.journal }}
-{%- endfor %}
-
{%- endif %}
{%- if pillar.ceph.radosgw is defined %}
diff --git a/ceph/files/kraken/ceph.conf.Debian b/ceph/files/kraken/ceph.conf.Debian
index 721786c..05e625e 100644
--- a/ceph/files/kraken/ceph.conf.Debian
+++ b/ceph/files/kraken/ceph.conf.Debian
@@ -36,6 +36,18 @@
{%- endfor %}
+{%- if osd.bluestore_block_size is defined %}
+bluestore_block_size = {{ osd.bluestore_block_size }}
+{%- endif %}
+
+{%- if osd.bluestore_block_db_size is defined %}
+bluestore_block_db_size = {{ osd.bluestore_block_db_size }}
+{%- endif %}
+
+{%- if osd.bluestore_block_wal_size is defined %}
+bluestore_block_wal_size = {{ osd.bluestore_block_wal_size }}
+{%- endif %}
+
{%- if pillar.ceph.mon is defined %}
[mon]
@@ -59,18 +71,14 @@
{%- if pillar.ceph.osd is defined %}
[osd]
+{%- if pillar.ceph.osd.journal_size is defined %}
+osd journal size = {{ pillar.ceph.osd.journal_size }}
+{%- endif %}
{%- for key, value in common.get('config', {}).get('osd', {}).iteritems() %}
{{ key }} = {{ value }}
{%- endfor %}
-{%- for disk_id, disk in osd.disk.iteritems() %}
-{% set id = osd.host_id~disk_id %}
-[osd.{{ id }}]
-host = {{ grains.host }}
-osd journal = {{ disk.journal }}
-{%- endfor %}
-
{%- endif %}
{%- if pillar.ceph.radosgw is defined %}
diff --git a/ceph/files/luminous/ceph.conf.Debian b/ceph/files/luminous/ceph.conf.Debian
index 1c4a8b6..3c5680c 100644
--- a/ceph/files/luminous/ceph.conf.Debian
+++ b/ceph/files/luminous/ceph.conf.Debian
@@ -36,6 +36,18 @@
{%- endfor %}
+{%- if osd.bluestore_block_size is defined %}
+bluestore_block_size = {{ osd.bluestore_block_size }}
+{%- endif %}
+
+{%- if osd.bluestore_block_db_size is defined %}
+bluestore_block_db_size = {{ osd.bluestore_block_db_size }}
+{%- endif %}
+
+{%- if osd.bluestore_block_wal_size is defined %}
+bluestore_block_wal_size = {{ osd.bluestore_block_wal_size }}
+{%- endif %}
+
{%- if pillar.ceph.mon is defined %}
[mon]
@@ -59,18 +71,14 @@
{%- if pillar.ceph.osd is defined %}
[osd]
+{%- if pillar.ceph.osd.journal_size is defined %}
+osd journal size = {{ pillar.ceph.osd.journal_size }}
+{%- endif %}
{%- for key, value in common.get('config', {}).get('osd', {}).iteritems() %}
{{ key }} = {{ value }}
{%- endfor %}
-{%- for disk_id, disk in osd.disk.iteritems() %}
-{% set id = osd.host_id~disk_id %}
-[osd.{{ id }}]
-host = {{ grains.host }}
-osd journal = {{ disk.journal }}
-{%- endfor %}
-
{%- endif %}
{%- if pillar.ceph.radosgw is defined %}
diff --git a/ceph/meta/salt.yml b/ceph/meta/salt.yml
index 420de44..e69de29 100644
--- a/ceph/meta/salt.yml
+++ b/ceph/meta/salt.yml
@@ -1,13 +0,0 @@
-grain:
- {%- if pillar.get('ceph', {}).get('osd', {}).get('enabled', False) %}
- {%- from "ceph/map.jinja" import osd with context %}
- ceph_osd_disk:
- {%- set ceph_osd_disk = {'ceph_osd_disk': osd.disk} %}
- {{ ceph_osd_disk|yaml(False)|indent(4) }}
- ceph_osd_host_id:
- {%- set ceph_osd_host_id = {'ceph_osd_host_id': osd.host_id} %}
- {{ ceph_osd_host_id|yaml(False)|indent(4) }}
- ceph_crush_parent:
- {%- set ceph_crush_parent = {'ceph_crush_parent': osd.crush_parent} %}
- {{ ceph_crush_parent|yaml(False)|indent(4) }}
- {%- endif %}
diff --git a/ceph/osd.sls b/ceph/osd.sls
index 5895830..9f6c19f 100644
--- a/ceph/osd.sls
+++ b/ceph/osd.sls
@@ -14,102 +14,134 @@
- require:
- pkg: ceph_osd_packages
-{% for disk_id, disk in osd.disk.iteritems() %}
+{% set ceph_version = pillar.ceph.common.version %}
-#Set ceph_host_id per node and interpolate
-{% set id = osd.host_id~disk_id %}
+{%- for backend_name, backend in osd.backend.iteritems() %}
-#Not needed - need to test
-#create_osd_{{ id }}:
-# cmd.run:
-# - name: "ceph osd create $(ls -l /dev/disk/by-uuid | grep {{ disk.dev | replace("/dev/", "") }} | awk '{ print $9}') {{ id }} "
+{%- for disk in backend.disks %}
-#Move this thing into linux
-makefs_{{ id }}:
- module.run:
- - name: xfs.mkfs
- - device: {{ disk.dev }}
- - unless: "ceph-disk list | grep {{ disk.dev }} | grep {{ osd.fs_type }}"
- {%- if grains.get('noservices') %}
- - onlyif: /bin/false
- {%- endif %}
+{%- if disk.get('enabled', True) %}
-/var/lib/ceph/osd/ceph-{{ id }}:
- mount.mounted:
- - device: {{ disk.dev }}
- - fstype: {{ osd.fs_type }}
- - opts: {{ disk.get('opts', 'rw,noatime,inode64,logbufs=8,logbsize=256k') }}
- - mkmnt: True
- {%- if grains.get('noservices') %}
- - onlyif: /bin/false
- {%- endif %}
+{% set dev = disk.dev %}
-permission_/var/lib/ceph/osd/ceph-{{ id }}:
- file.directory:
- - name: /var/lib/ceph/osd/ceph-{{ id }}
- - user: ceph
- - group: ceph
- - mode: 755
- - makedirs: False
- - require:
- - mount: /var/lib/ceph/osd/ceph-{{ id }}
- {%- if grains.get('noservices') %}
- - onlyif: /bin/false
- {%- endif %}
-
-
-{{ disk.journal }}:
- file.managed:
- - user: ceph
- - group: ceph
- - replace: false
-
-create_disk_{{ id }}:
+zap_disk_{{ dev }}:
cmd.run:
- - name: "ceph-osd -i {{ id }} --conf /etc/ceph/ceph.conf --mkfs --mkkey --mkjournal --setuser ceph"
- - unless: "test -f /var/lib/ceph/osd/ceph-{{ id }}/fsid"
+ - name: "ceph-disk zap {{ dev }}"
+ - unless: "ceph-disk list | grep {{ dev }} | grep ceph"
- require:
- - file: /var/lib/ceph/osd/ceph-{{ id }}
- - mount: /var/lib/ceph/osd/ceph-{{ id }}
- {%- if grains.get('noservices') %}
- - onlyif: /bin/false
- {%- endif %}
-
-add_keyring_{{ id }}:
- cmd.run:
- - name: "ceph auth add osd.{{ id }} osd 'allow *' mon 'allow profile osd' -i /var/lib/ceph/osd/ceph-{{ id }}/keyring"
- - unless: "ceph auth list | grep '^osd.{{ id }}'"
- - require:
- - cmd: create_disk_{{ id }}
- {%- if grains.get('noservices') %}
- - onlyif: /bin/false
- {%- endif %}
-
-/var/lib/ceph/osd/ceph-{{ id }}/done:
- file.managed:
- - content: {}
- - require:
- - cmd: add_keyring_{{ id }}
- {%- if grains.get('noservices') %}
- - onlyif: /bin/false
- {%- endif %}
-
-
-osd_services_{{ id }}_osd:
- service.running:
- - enable: true
- - names: ['ceph-osd@{{ id }}']
- - watch:
+ - pkg: ceph_osd_packages
- file: /etc/ceph/ceph.conf
- - require:
- - file: /var/lib/ceph/osd/ceph-{{ id }}/done
- - service: osd_services_perms
{%- if grains.get('noservices') %}
- onlyif: /bin/false
{%- endif %}
-{% endfor %}
+{%- if disk.journal is defined %}
+zap_disk_journal_{{ disk.journal }}_for_{{ dev }}:
+ cmd.run:
+ - name: "ceph-disk zap {{ disk.journal }}"
+ - unless: "ceph-disk list | grep {{ disk.journal }} | grep ceph"
+ - require:
+ - pkg: ceph_osd_packages
+ - file: /etc/ceph/ceph.conf
+ - cmd: zap_disk_{{ dev }}
+ {%- if grains.get('noservices') %}
+ - onlyif: /bin/false
+ {%- endif %}
+
+{%- endif %}
+
+{%- if disk.block_db is defined %}
+
+zap_disk_blockdb_{{ disk.block_db }}_for_{{ dev }}:
+ cmd.run:
+ - name: "ceph-disk zap {{ disk.block_db }}"
+ - unless: "ceph-disk list | grep {{ disk.block_db }} | grep ceph"
+ - require:
+ - pkg: ceph_osd_packages
+ - file: /etc/ceph/ceph.conf
+ - cmd: zap_disk_{{ dev }}
+ {%- if grains.get('noservices') %}
+ - onlyif: /bin/false
+ {%- endif %}
+
+{%- endif %}
+
+{%- if disk.block_wal is defined %}
+
+zap_disk_blockwal_{{ disk.block_wal }}_for_{{ dev }}:
+ cmd.run:
+ - name: "ceph-disk zap {{ disk.block_wal }}"
+ - unless: "ceph-disk list | grep {{ disk.block_wal }} | grep ceph"
+ - require:
+ - pkg: ceph_osd_packages
+ - file: /etc/ceph/ceph.conf
+ - cmd: zap_disk_{{ dev }}
+ {%- if grains.get('noservices') %}
+ - onlyif: /bin/false
+ {%- endif %}
+
+{%- endif %}
+
+prepare_disk_{{ dev }}:
+ cmd.run:
+ {%- if backend_name == 'bluestore' and disk.block_db is defined and disk.block_wal is defined %}
+ - name: "ceph-disk prepare --bluestore {{ dev }} --block.db {{ disk.block_db }} --block.wal {{ disk.block_wal }}"
+ {%- elif backend_name == 'bluestore' and disk.block_db is defined %}
+ - name: "ceph-disk prepare --bluestore {{ dev }} --block.db {{ disk.block_db }}"
+ {%- elif backend_name == 'bluestore' and disk.block_wal is defined %}
+ - name: "ceph-disk prepare --bluestore {{ dev }} --block.wal {{ disk.block_wal }}"
+ {%- elif backend_name == 'bluestore' %}
+ - name: "ceph-disk prepare --bluestore {{ dev }}"
+ {%- elif backend_name == 'filestore' and disk.journal is defined and ceph_version == 'luminous' %}
+ - name: "ceph-disk prepare --filestore {{ dev }} {{ disk.journal }}"
+ {%- elif backend_name == 'filestore' and ceph_version == 'luminous' %}
+ - name: "ceph-disk prepare --filestore {{ dev }}"
+ {%- elif backend_name == 'filestore' and disk.journal is defined and ceph_version != 'luminous' %}
+ - name: "ceph-disk prepare {{ dev }} {{ disk.journal }}"
+ {%- else %}
+ - name: "ceph-disk prepare {{ dev }}"
+ {%- endif %}
+ - unless: "ceph-disk list | grep {{ dev }} | grep ceph"
+ - require:
+ - cmd: zap_disk_{{ dev }}
+ - pkg: ceph_osd_packages
+ - file: /etc/ceph/ceph.conf
+ {%- if grains.get('noservices') %}
+ - onlyif: /bin/false
+ {%- endif %}
+
+reload_partition_table_{{ dev }}:
+ cmd.run:
+ - name: "partprobe"
+ - unless: "ceph-disk list | grep {{ dev }} | grep active"
+ - require:
+ - cmd: prepare_disk_{{ dev }}
+ - cmd: zap_disk_{{ dev }}
+ - pkg: ceph_osd_packages
+ - file: /etc/ceph/ceph.conf
+ {%- if grains.get('noservices') %}
+ - onlyif: /bin/false
+ {%- endif %}
+
+activate_disk_{{ dev }}:
+ cmd.run:
+ - name: "ceph-disk activate --activate-key /etc/ceph/ceph.client.bootstrap-osd.keyring {{ dev }}1"
+ - unless: "ceph-disk list | grep {{ dev }} | grep active"
+ - require:
+ - cmd: prepare_disk_{{ dev }}
+ - cmd: zap_disk_{{ dev }}
+ - pkg: ceph_osd_packages
+ - file: /etc/ceph/ceph.conf
+ {%- if grains.get('noservices') %}
+ - onlyif: /bin/false
+ {%- endif %}
+
+{%- endif %}
+
+{%- endfor %}
+
+{%- endfor %}
osd_services_global:
service.running:
@@ -121,7 +153,6 @@
- onlyif: /bin/false
{%- endif %}
-
osd_services:
service.running:
- enable: true
@@ -132,29 +163,3 @@
- onlyif: /bin/false
{%- endif %}
-
-/etc/systemd/system/ceph-osd-perms.service:
- file.managed:
- - contents: |
- [Unit]
- Description=Set OSD journals owned by ceph user
- After=local-fs.target
- Before=ceph-osd.target
-
- [Service]
- Type=oneshot
- RemainAfterExit=yes
- ExecStart=/bin/bash -c "chown -v ceph $(cat /etc/ceph/ceph.conf | grep 'osd journal' | awk '{print $4}')"
-
- [Install]
- WantedBy=multi-user.target
-
-osd_services_perms:
- service.running:
- - enable: true
- - names: ['ceph-osd-perms']
- - require:
- - file: /etc/systemd/system/ceph-osd-perms.service
- {%- if grains.get('noservices') %}
- - onlyif: /bin/false
- {%- endif %}
diff --git a/metadata/service/osd/cluster.yml b/metadata/service/osd/cluster.yml
index 7b429f2..88e79d3 100644
--- a/metadata/service/osd/cluster.yml
+++ b/metadata/service/osd/cluster.yml
@@ -8,9 +8,3 @@
osd:
enabled: true
host_id: ${_param:ceph_host_id}
- crush_parent: ${_param:ceph_crush_parent}
- copy_admin_key: true
- journal_type: raw
- dmcrypt: disable
- osd_scenario: raw_journal_devices
- fs_type: xfs
\ No newline at end of file
diff --git a/metadata/service/osd/single.yml b/metadata/service/osd/single.yml
index 5aec498..4fce284 100644
--- a/metadata/service/osd/single.yml
+++ b/metadata/service/osd/single.yml
@@ -8,8 +8,3 @@
osd:
enabled: true
host_id: ${_param:ceph_host_id}
- copy_admin_key: true
- journal_type: raw
- dmcrypt: disable
- osd_scenario: raw_journal_devices
- fs_type: xfs
\ No newline at end of file
diff --git a/tests/pillar/ceph_osd_single.sls b/tests/pillar/ceph_osd_single.sls
index f039bbc..3138ed7 100644
--- a/tests/pillar/ceph_osd_single.sls
+++ b/tests/pillar/ceph_osd_single.sls
@@ -29,28 +29,29 @@
enabled: true
version: kraken
host_id: 10
- crush_parent: rack01
- copy_admin_key: true
- journal_type: raw
- dmcrypt: disable
- osd_scenario: raw_journal_devices
- fs_type: xfs
- disk:
- '00':
- rule: hdd
- dev: /dev/vdb2
- journal: /dev/vdb1
- class: besthdd
- weight: 1.5
- '01':
- rule: hdd
- dev: /dev/vdc2
- journal: /dev/vdc1
- class: besthdd
- weight: 1.5
- '02':
- rule: hdd
- dev: /dev/vdd2
- journal: /dev/vdd1
- class: besthdd
- weight: 1.5
+ backend:
+ filestore:
+ disks:
+ - dev: /dev/sdm
+ enabled: false
+ rule: hdd
+ journal: /dev/sdn
+ fs_type: xfs
+ class: bestssd
+ weight: 1.5
+ - dev: /dev/sdl
+ rule: hdd
+ fs_type: xfs
+ class: bestssd
+ weight: 1.5
+ - dev: /dev/sdo
+ rule: hdd
+ journal: /dev/sdo
+ fs_type: xfs
+ class: bestssd
+ weight: 1.5
+ bluestore:
+ disks:
+ - dev: /dev/sdb
+ enabled: false
+ - dev: /dev/sdc