fixes for kraken and lower versions
Change-Id: I0543b8ab2169e561ac8189331f38c8c5baa5a696
diff --git a/README.rst b/README.rst
index 8b676c3..1d41a52 100644
--- a/README.rst
+++ b/README.rst
@@ -430,6 +430,9 @@
crush_rule: sata
application: rbd
+ .. note:: For Kraken and earlier releases please specify crush_rule as a ruleset number.
+ For Kraken and earlier releases application param is not needed.
+
Erasure ceph storage pool
.. code-block:: yaml
@@ -471,6 +474,7 @@
- region
- rack
- host
+ - osd
root:
- name: root-ssd
- name: root-sata
@@ -553,6 +557,9 @@
crush_rule: ssd
application: rbd
+ .. note:: For Kraken and earlier releases please specify crush_rule as a ruleset number.
+ For Kraken and earlier releases application param is not needed.
+
Persist CRUSH map
--------------------
diff --git a/_grains/ceph.py b/_grains/ceph.py
index 1b4dfb2..a678404 100644
--- a/_grains/ceph.py
+++ b/_grains/ceph.py
@@ -25,10 +25,14 @@
devices[device[0]]['dev'] = dev
tline = check_output("ceph osd tree | awk '{print $1,$2,$3,$4}' | grep -w 'osd." + device[0] + "'", shell=True)
osd = tline.split()
- crush_class = osd[1]
- crush_weight = osd[2]
- devices[device[0]]['class'] = crush_class
- devices[device[0]]['weight'] = crush_weight
+ if "osd" not in osd[2]:
+ crush_class = osd[1]
+ crush_weight = osd[2]
+ devices[device[0]]['class'] = crush_class
+ devices[device[0]]['weight'] = crush_weight
+ else:
+ crush_weight = osd[1]
+ devices[device[0]]['weight'] = crush_weight
grain["ceph"]["ceph_disk"] = devices
# keyrings
diff --git a/ceph/files/crushmap b/ceph/files/crushmap
index 07a441a..82d1be0 100644
--- a/ceph/files/crushmap
+++ b/ceph/files/crushmap
@@ -31,14 +31,21 @@
# types
{%- for i in _crush.get('type', []) %}
-type {{ loop.index }} {{ i }}
+type {{ loop.index0 }} {{ i }}
{%- do types.update({loop.index: i}) %}
{%- endfor %}
# devices
+{%- set ceph_version = pillar.ceph.common.version -%}
+{%- if ceph_version not in ['kraken', 'jewel'] %}
{% for disk_id, disk in osds|dictsort %}
device {{ disk_id }} osd.{{ disk_id }}{%- if disk.get('class') %} class {{ disk.class }}{% endif %}
{%- endfor %}
+{%- else %}
+{% for disk_id, disk in osds|dictsort %}
+device {{ disk_id }} osd.{{ disk_id }}
+{%- endfor %}
+{%- endif %}
{% set count = [1] %}
diff --git a/ceph/mgr.sls b/ceph/mgr.sls
index 40f8bb8..e5dbf54 100644
--- a/ceph/mgr.sls
+++ b/ceph/mgr.sls
@@ -49,6 +49,8 @@
- onlyif: /bin/false
{%- endif %}
+{%- if common.version not in ['kraken', 'jewel'] %}
+
{%- if mgr.get('dashboard', {}).get('enabled', False) %}
ceph_dashboard_address:
@@ -61,7 +63,6 @@
- name: "ceph config-key put mgr/dashboard/{{ grains.host }}/server_port {{ mgr.dashboard.get('port', '7000') }}"
- unless: "ceph config-key get mgr/dashboard/{{ grains.host }}/server_port | grep {{ mgr.dashboard.get('port', '7000') }}"
-
ceph_restart_dashboard_plugin:
cmd.wait:
- name: "ceph mgr module disable dashboard;ceph mgr module enable dashboard --force"
@@ -85,4 +86,6 @@
{%- endif %}
+{%- endif %}
+
{%- endif %}
\ No newline at end of file
diff --git a/ceph/osd/custom.sls b/ceph/osd/custom.sls
index 1410559..812dfc1 100644
--- a/ceph/osd/custom.sls
+++ b/ceph/osd/custom.sls
@@ -14,6 +14,8 @@
{%- if ceph_disk.get('dev') == dev %}
+{%- if ceph_version not in ['kraken', 'jewel'] %}
+
{%- if disk.class is defined %}
update_class_disk_{{ dev }}:
@@ -26,6 +28,8 @@
{%- endif %}
+{%- endif %}
+
{%- if disk.weight is defined %}
update_weight_disk_{{ dev }}:
diff --git a/ceph/setup/crush.sls b/ceph/setup/crush.sls
index 945a266..c188905 100644
--- a/ceph/setup/crush.sls
+++ b/ceph/setup/crush.sls
@@ -14,10 +14,19 @@
ceph_enforce_crush_map:
cmd.run:
- - name: ceph osd setcrushmap -i /etc/ceph/crushmap.compiled; touch /etc/ceph/crushmap.enforced
+ - name: ceph osd setcrushmap -i /etc/ceph/crushmap.compiled
- unless: "test -f /etc/ceph/crushmap.enforced"
-{# after crush map is setup enable appplication and crush rule for a pool #}
+/etc/ceph/crushmap.enforced:
+ file.managed:
+ - content: { }
+ - unless: "test -f /etc/ceph/crushmap.enforced"
+ - require:
+ - cmd: ceph_enforce_crush_map
+
+{% set ceph_version = pillar.ceph.common.version %}
+
+{# after crush map is setup set crush rule for a pool #}
{%- if setup.pool is defined %}
@@ -25,14 +34,14 @@
{%- for option_name, option_value in pool.iteritems() %}
-{%- if option_name in ['application', 'crush_rule'] %}
+{%- if option_name in ['crush_rule'] %}
-{%- if option_name == 'application' %}
+{%- if ceph_version in ['kraken', 'jewel'] %}
-ceph_pool_{{ pool_name }}_enable_{{ option_name }}:
+ceph_pool_option_{{ pool_name }}_crush_ruleset:
cmd.run:
- - name: ceph osd pool {{ option_name }} enable {{ pool_name }} {{ option_value }}
- - unless: "ceph osd pool {{ option_name }} get {{ pool_name }} | grep '{{ option_value }}'"
+ - name: ceph osd pool set {{ pool_name }} crush_ruleset {{ option_value }}
+ - unless: "ceph osd pool get {{ pool_name }} crush_ruleset | grep 'crush_ruleset: {{ option_value }}'"
{%- else %}
diff --git a/ceph/setup/pool.sls b/ceph/setup/pool.sls
index 19fe080..35c7a08 100644
--- a/ceph/setup/pool.sls
+++ b/ceph/setup/pool.sls
@@ -1,5 +1,7 @@
{%- from "ceph/map.jinja" import setup with context %}
+{% set ceph_version = pillar.ceph.common.version %}
+
{%- for pool_name, pool in setup.pool.iteritems() %}
ceph_pool_create_{{ pool_name }}:
@@ -17,7 +19,14 @@
{%- for option_name, option_value in pool.iteritems() %}
-{%- if option_name not in ['type', 'pg_num', 'application', 'crush_rule'] %}
+{%- if option_name == 'application' and ceph_version not in ['kraken', 'jewel'] %}
+
+ceph_pool_{{ pool_name }}_enable_{{ option_name }}:
+ cmd.run:
+ - name: ceph osd pool {{ option_name }} enable {{ pool_name }} {{ option_value }}
+ - unless: "ceph osd pool {{ option_name }} get {{ pool_name }} | grep '{{ option_value }}'"
+
+{%- elif option_name not in ['type', 'pg_num', 'application', 'crush_rule'] %}
ceph_pool_option_{{ pool_name }}_{{ option_name }}:
cmd.run: