Merge "include ceph.osd.custom in ceph.osd.init"
diff --git a/README.rst b/README.rst
index a0c3d3c..74ac462 100644
--- a/README.rst
+++ b/README.rst
@@ -525,10 +525,11 @@
...
-Ceph manage keyring keys
+Ceph manage clients keyring keys
------------------------
-Keyrings are dynamically generated unless specified by the following pillar.
+Keyrings are dynamically generated unless specified by the manage_keyring pillar.
+This settings has no effect on admin keyring.
.. code-block:: yaml
@@ -543,6 +544,31 @@
mon: "allow r"
osd: "allow class-read object_prefix rdb_children, allow rwx pool=images"
+Ceph manage admin keyring
+--------------------------
+To use pre-defined admin key add manage_admin_keyring and admin keyring definition to ceph mon nodes in cluster_model/ceph/mon.yml
+
+ceph:
+ common:
+ manage_admin_keyring: true
+ keyring:
+ admin:
+ caps:
+ mds: "allow *"
+ mgr: "allow *"
+ mon: "allow *"
+ osd: "allow *"
+ key: AACf3ulZFFPNDxAAd2DWds3aEkHh4IklZVgIaQ==
+
+Specify alternative keyring path and username
+
+.. code-block:: yaml
+
+ ceph:
+ radosgw:
+ keyring_user: radosgw.gateway
+ keyring_path: /etc/ceph/keyring.radosgw.gateway
+
Generate CRUSH map - Recommended way
-----------------------------------
diff --git a/_grains/ceph.py b/_grains/ceph.py
index 45652e8..54bc97e 100644
--- a/_grains/ceph.py
+++ b/_grains/ceph.py
@@ -6,6 +6,7 @@
from subprocess import check_output
from subprocess import CalledProcessError
import shlex
+ import json
import os
import re
@@ -28,44 +29,60 @@
# osd
if os.path.exists('/var/lib/ceph/osd'):
- mount_path = check_output("df -h | awk '{print $6}' | grep ceph | grep -v lockbox | sed 's/-[0-9]*//g' | awk 'NR==1{print $1}'", shell=True).rstrip()
- sed = 'sed \'s#{0}-##g\''.format(mount_path)
- cmd = "lsblk -rp | awk '{print $1,$6,$7}' | grep -v lockbox | grep ceph | " + sed
+ cmd = "ceph-volume lvm list --format json"
osd_output = check_output(cmd, shell=True)
+ osd_output = json.loads(osd_output)
+ dev_id = ''
+ devices = {}
if osd_output:
+ for osd, params in osd_output.iteritems():
+ dev_id = osd
+ devices[dev_id] = {}
+ devices[dev_id]['dev'] = params[0]['devices'][0]
+ devices[dev_id]['path'] = params[0]['path']
+ devices[dev_id]['uuid'] = params[0]['tags']['ceph.osd_fsid']
+
+ cmd = "ceph osd tree --format json"
+ osd_tree_output = check_output(cmd, shell=True)
+ osd_tree_output = json.loads(osd_tree_output)
+ for osd in osd_tree_output['nodes']:
+ if 'type_id' in osd.keys():
+ if str(osd['type_id']) == '0':
+ for dev_id in devices.keys():
+ if str(osd['id']) == str(dev_id):
+ devices[dev_id]['weight'] = osd['crush_weight']
+ devices[dev_id]['class'] = osd['device_class']
+ grain["ceph"]["ceph_volume"] = devices
+ else:
+ cmd = "ceph-disk list --format json"
+ osd_output = check_output(cmd, shell=True).decode("utf-8")
+ osd_output = json.loads(osd_output)
+ dev_id = ''
devices = {}
- for line in osd_output.splitlines():
- device = line.split()
- encrypted = False
- if "crypt" in device[1]:
- output = check_output("lsblk -rp | grep -B1 " + device[0], shell=True)
- for l in output.splitlines():
- d = l.split()
- dev = re.sub("\d+", "", device[0])
- encrypted = True
- break
- else:
- dev = device[0].replace('1','')
- dev = re.sub("\d+", "", device[0])
- device[0] = device[2]
- try:
- devices[device[0]] = {}
- devices[device[0]]['dev'] = dev
- if encrypted:
- devices[device[0]]['dmcrypt'] = 'true'
- tline = check_output("ceph -c " + conf_file + " osd tree | awk '{print $1,$2,$3,$4}' | grep -w 'osd." + device[0] + "'", shell=True)
- osd = tline.split()
- if "osd" not in osd[2]:
- crush_class = osd[1]
- crush_weight = osd[2]
- devices[device[0]]['class'] = crush_class
- devices[device[0]]['weight'] = crush_weight
- else:
- crush_weight = osd[1]
- devices[device[0]]['weight'] = crush_weight
- except CalledProcessError:
- continue
- grain["ceph"]["ceph_disk"] = devices
+ if osd_output:
+ for line in osd_output:
+ if "is_partition" not in line.keys():
+ dev = line["path"]
+ parts = line["partitions"]
+ for p in parts:
+ if "mount" in p.keys() and "ceph" in p["mount"]:
+ dev_id = p["whoami"]
+ devices[dev_id] = {}
+ devices[dev_id]['dev'] = dev
+ if len(p["dmcrypt"]) > 0:
+ devices[dev_id]['dmcrypt'] = 'true'
+
+ cmd = "ceph osd tree --format json"
+ osd_tree_output = check_output(cmd, shell=True).decode("utf-8")
+ osd_tree_output = json.loads(osd_tree_output)
+ for osd in osd_tree_output['nodes']:
+ if 'type_id' in osd.keys():
+ if str(osd['type_id']) == '0':
+ for dev_id in devices.keys():
+ if str(osd['id']) == str(dev_id):
+ devices[dev_id]['weight'] = osd['crush_weight']
+ devices[dev_id]['class'] = osd['device_class']
+ grain["ceph"]["ceph_disk"] = devices
# keyrings
directory = '/etc/ceph/'
diff --git a/ceph/backup.sls b/ceph/backup.sls
index 7963e5c..5067012 100644
--- a/ceph/backup.sls
+++ b/ceph/backup.sls
@@ -54,6 +54,8 @@
{%- endif %}
{%- if backup.client.backup_times.minute is defined %}
- minute: {{ backup.client.backup_times.minute }}
+{%- else %}
+ - minute: 0
{%- endif %}
{%- elif backup.client.hours_before_incr is defined %}
- minute: 0
@@ -155,6 +157,8 @@
{%- endif %}
{%- if backup.server.backup_times.minute is defined %}
- minute: {{ backup.server.backup_times.minute }}
+{%- else %}
+ - minute: 0
{%- endif %}
{%- elif backup.server.hours_before_incr is defined %}
- minute: 0
diff --git a/ceph/common.sls b/ceph/common.sls
index b445355..964dff7 100644
--- a/ceph/common.sls
+++ b/ceph/common.sls
@@ -37,6 +37,20 @@
{%- if common.keyring is defined and common.keyring.admin is defined %}
+{%- if common.get("manage_admin_keyring", False) %}
+
+ceph_create_keyring_admin:
+ cmd.run:
+ - name: "ceph-authtool --create-keyring {{ common.prefix_dir }}/etc/ceph/{{ common.get('cluster_name', 'ceph') }}.client.admin.keyring --add-key {{ common.keyring.admin.key }} -n client.admin {%- for cap_name, cap in common.keyring.admin.caps.iteritems() %} --cap {{ cap_name }} '{{ cap }}' {%- endfor %}"
+ - unless: "test -f {{ common.prefix_dir }}/etc/ceph/{{ common.get('cluster_name', 'ceph') }}.client.admin.keyring"
+ - require:
+ {% if not common.get('container_mode', False) %}
+ - pkg: common_packages
+ {%- endif %}
+ - file: common_config
+
+{%- else %}
+
ceph_create_keyring_admin:
cmd.run:
- name: "ceph-authtool --create-keyring {{ common.prefix_dir }}/etc/ceph/{{ common.get('cluster_name', 'ceph') }}.client.admin.keyring --gen-key -n client.admin {%- for cap_name, cap in common.keyring.admin.caps.iteritems() %} --cap {{ cap_name }} '{{ cap }}' {%- endfor %}"
@@ -49,6 +63,8 @@
{%- endif %}
+{%- endif %}
+
{%- for node_name, node_grains in salt['mine.get']('ceph:common:keyring:admin', 'grains.items', 'pillar').iteritems() %}
{%- if node_grains.ceph is defined and node_grains.ceph.ceph_keyring is defined and node_grains.ceph.ceph_keyring.admin is defined and node_grains.ceph.get('fsid', '') == common.fsid %}
diff --git a/ceph/files/backup/ceph-backup-client-runner.sh b/ceph/files/backup/ceph-backup-client-runner.sh
index 971f944..4b5b4f9 100644
--- a/ceph/files/backup/ceph-backup-client-runner.sh
+++ b/ceph/files/backup/ceph-backup-client-runner.sh
@@ -45,9 +45,13 @@
rsync -arv --exclude=osd/{{ common.get('cluster_name', 'ceph') }}-*/current /var/lib/ceph $TMPDIR/{{ common.get('cluster_name', 'ceph') }}-$HOSTNAME/
{%- elif mon.get('enabled', False) %}
cp -a /etc/ceph/ $TMPDIR/
- service ceph-mon@$HOSTNAME stop
- cp -a /var/lib/ceph/ $TMPDIR/{{ common.get('cluster_name', 'ceph') }}-$HOSTNAME/
- service ceph-mon@$HOSTNAME start
+ if service ceph-mon@$HOSTNAME status >/dev/null; then
+ service ceph-mon@$HOSTNAME stop
+ cp -a /var/lib/ceph/ $TMPDIR/{{ common.get('cluster_name', 'ceph') }}-$HOSTNAME/
+ service ceph-mon@$HOSTNAME start
+ else
+ cp -a /var/lib/ceph/ $TMPDIR/{{ common.get('cluster_name', 'ceph') }}-$HOSTNAME/
+ fi
{%- endif %}
tar -cvzf $BACKUPDIR/$HOSTNAME/{{ common.get('cluster_name', 'ceph') }}-$HOSTNAME-$TIMESTAMP.tgz $TMPDIR
diff --git a/ceph/files/grafana_dashboards/hosts-overview_prometheus.json b/ceph/files/grafana_dashboards/hosts-overview_prometheus.json
index 93314e2..921f886 100644
--- a/ceph/files/grafana_dashboards/hosts-overview_prometheus.json
+++ b/ceph/files/grafana_dashboards/hosts-overview_prometheus.json
@@ -577,7 +577,7 @@
"tableColumn": "Value",
"targets": [
{
- "expr": "avg ( (irate(diskio_io_time{host=~\"($osd_hosts).*\"}[5m]) / 10 ) )",
+ "expr": "avg ( (irate(diskio_io_time{host=~\"($osd_hosts).*\"}[5m])) )",
"format": "table",
"instant": true,
"intervalFactor": 1,
diff --git a/ceph/files/grafana_dashboards/osds-overview_prometheus.json b/ceph/files/grafana_dashboards/osds-overview_prometheus.json
index 49c1cb5..3337014 100644
--- a/ceph/files/grafana_dashboards/osds-overview_prometheus.json
+++ b/ceph/files/grafana_dashboards/osds-overview_prometheus.json
@@ -383,7 +383,7 @@
},
{
"aliasColors": {},
- "bars": false,
+ "bars": true,
"dashLength": 10,
"dashes": false,
"datasource": null,
@@ -405,11 +405,11 @@
"max": false,
"min": false,
"rightSide": false,
- "show": true,
+ "show": false,
"total": false,
"values": false
},
- "lines": true,
+ "lines": false,
"linewidth": 1,
"links": [],
"nullPointMode": "null",
@@ -427,7 +427,7 @@
{
"expr": "ceph_osd_numpg",
"format": "time_series",
- "instant": false,
+ "instant": true,
"intervalFactor": 1,
"legendFormat": "PGs per OSD",
"refId": "A"
@@ -446,7 +446,7 @@
"type": "graph",
"xaxis": {
"buckets": 20,
- "mode": "time",
+ "mode": "histogram",
"name": null,
"show": true,
"values": []
diff --git a/ceph/files/grafana_dashboards/pool-overview_prometheus.json b/ceph/files/grafana_dashboards/pool-overview_prometheus.json
index 1d50c9a..cd52bad 100644
--- a/ceph/files/grafana_dashboards/pool-overview_prometheus.json
+++ b/ceph/files/grafana_dashboards/pool-overview_prometheus.json
@@ -40,7 +40,7 @@
"dashLength": 10,
"dashes": false,
"datasource": null,
- "fill": 5,
+ "fill": 2,
"gridPos": {
"h": 7,
"w": 24,
@@ -68,7 +68,7 @@
"renderer": "flot",
"seriesOverrides": [],
"spaceLength": 10,
- "stack": true,
+ "stack": false,
"steppedLine": false,
"targets": [
{
@@ -126,7 +126,7 @@
"dashLength": 10,
"dashes": false,
"datasource": null,
- "fill": 5,
+ "fill": 2,
"gridPos": {
"h": 6,
"w": 24,
@@ -154,7 +154,7 @@
"renderer": "flot",
"seriesOverrides": [],
"spaceLength": 10,
- "stack": true,
+ "stack": false,
"steppedLine": false,
"targets": [
{
diff --git a/ceph/files/luminous/ceph.conf.Debian b/ceph/files/luminous/ceph.conf.Debian
index 43cb2f7..725109f 100644
--- a/ceph/files/luminous/ceph.conf.Debian
+++ b/ceph/files/luminous/ceph.conf.Debian
@@ -89,9 +89,17 @@
{%- if pillar.ceph.radosgw is defined %}
+{%- if radosgw.keyring_user is defined %}
+[client.{{ radosgw.keyring_user }}]
+{%- else %}
[client.rgw.{{ grains.host }}]
+{%- endif %}
host = {{ grains.host }}
+{%- if radosgw.keyring_path is defined %}
+keyring = {{ radosgw.keyring_path }}
+{%- else %}
keyring = /etc/ceph/{{ common.get('cluster_name', 'ceph') }}.client.rgw.{{ grains.host }}.keyring
+{%- endif %}
rgw socket path = /tmp/radosgw-{{ grains.host }}.sock
log file = /var/log/ceph/{{ common.get('cluster_name', 'ceph') }}-rgw-{{ grains.host }}.log
rgw data = /var/lib/ceph/radosgw/{{ common.get('cluster_name', 'ceph') }}-rgw.{{ grains.host }}
diff --git a/ceph/map.jinja b/ceph/map.jinja
index 9533c09..1833052 100644
--- a/ceph/map.jinja
+++ b/ceph/map.jinja
@@ -46,7 +46,11 @@
- librados2
services:
{%- if grains.get('init', None) == 'systemd' %}
+ {%- if pillar.get('ceph',{}).get('radosgw',{}).keyring_user is defined %}
+ - ceph-radosgw@{{ pillar.ceph.radosgw.keyring_user }}
+ {%- else %}
- ceph-radosgw@rgw.{{ grains.host }}
+ {%- endif %}
{%- else %}
- radosgw-all
{%- endif %}
diff --git a/ceph/meta/logrotate.yml b/ceph/meta/logrotate.yml
new file mode 100644
index 0000000..8f2730c
--- /dev/null
+++ b/ceph/meta/logrotate.yml
@@ -0,0 +1,19 @@
+## Default: Daily rotation with 28 rotations kept
+{%- from "ceph/map.jinja" import common, mgr, mon, osd, radosgw with context %}
+
+{%- if mgr.get('enabled', False) or mon.get('enabled', False) or osd.get('enabled', False) or radosgw.get('enabled', False) %}
+job:
+ ceph-common:
+ - files:
+ - /var/log/ceph/*.log
+ options:
+ - {{ common.get('logrotate', {}).get('interval', 'daily') }}
+ - rotate: {{ common.get('logrotate', {}).get('rotate', 28) }}
+ - compress
+ - sharedscripts
+ - missingok
+ - notifempty
+ - su root ceph
+ - postrotate:
+ killall -q -1 ceph-mon ceph-mgr ceph-mds ceph-osd ceph-fuse radosgw || true
+{%- endif %}
diff --git a/ceph/osd/setup.sls b/ceph/osd/setup.sls
index 5b1a832..ab03da1 100644
--- a/ceph/osd/setup.sls
+++ b/ceph/osd/setup.sls
@@ -21,7 +21,7 @@
{%- if disk.get('enabled', True) %}
-{% set dev = disk.dev %}
+{% set dev = '`readlink -f ' + disk.dev + '`' %}
# for uniqueness
{% set dev_device = dev + disk.get('data_partition_prefix', '') + disk.get('data_partition', 1)|string %}
diff --git a/ceph/setup/keyring.sls b/ceph/setup/keyring.sls
index f26c608..d51a109 100644
--- a/ceph/setup/keyring.sls
+++ b/ceph/setup/keyring.sls
@@ -2,18 +2,18 @@
{% if not common.get('container_mode', False) %}
-{# run only if ceph cluster is present #}
-{%- for node_name, node_grains in salt['mine.get']('ceph:common:keyring:admin', 'grains.items', 'pillar').iteritems() %}
+ {# run only if ceph cluster is present #}
+ {%- for node_name, node_grains in salt['mine.get']('ceph:common:keyring:admin', 'grains.items', 'pillar').iteritems() %}
-{%- if node_grains.ceph is defined and node_grains.ceph.ceph_keyring is defined and node_grains.ceph.ceph_keyring.admin is defined and node_grains.ceph.get('fsid', '') == common.fsid %}
+ {%- if node_grains.ceph is defined and node_grains.ceph.ceph_keyring is defined and node_grains.ceph.ceph_keyring.admin is defined and node_grains.ceph.get('fsid', '') == common.fsid %}
-{%- if loop.index0 == 0 %}
+ {%- if loop.index0 == 0 %}
-{% for keyring_name, keyring in common.get('keyring', {}).iteritems() %}
+ {% for keyring_name, keyring in common.get('keyring', {}).iteritems() %}
-{%- if keyring.name is defined %}
+ {%- if keyring.name is defined %}
-{%- if keyring.name != 'admin' and keyring.key is defined and common.get("manage_keyring", False) %}
+ {%- if keyring.name != 'admin' and keyring.key is defined and common.get("manage_keyring", False) %}
{{ common.prefix_dir }}/etc/ceph/{{ common.get('cluster_name', 'ceph') }}.client.{{ keyring.name }}.keyring:
file.managed:
@@ -25,22 +25,43 @@
ceph_import_keyring_{{ keyring.name }}:
cmd.run:
- - name: "ceph -c /etc/ceph/{{ common.get('cluster_name', 'ceph') }}.conf auth import -i {{ common.prefix_dir }}/etc/ceph/{{ common.get('cluster_name', 'ceph') }}.client.{{ keyring.name }}.keyring"
+ - name: ceph -c /etc/ceph/{{ common.get('cluster_name', 'ceph') }}.conf auth import -i {{ common.prefix_dir }}/etc/ceph/{{ common.get('cluster_name', 'ceph') }}.client.{{ keyring.name }}.keyring
- onchanges:
- file: {{ common.prefix_dir }}/etc/ceph/{{ common.get('cluster_name', 'ceph') }}.client.{{ keyring.name }}.keyring
-{%- elif keyring.name != 'admin' %}
+ceph_update_caps_for_{{ keyring.name }}:
+ cmd.run:
+ - name: ceph -c /etc/ceph/{{ common.get('cluster_name', 'ceph') }}.conf auth caps client.{{ keyring.name }} {%- for cap_name, cap in keyring.caps.iteritems() %} {{ cap_name }} '{{ cap }}' {%- endfor %}
+ - onchanges:
+ - file: {{ common.prefix_dir }}/etc/ceph/{{ common.get('cluster_name', 'ceph') }}.client.{{ keyring.name }}.keyring
+
+ {%- elif keyring.name != 'admin' %}
ceph_create_keyring_{{ keyring.name }}:
cmd.run:
- - name: "ceph -c /etc/ceph/{{ common.get('cluster_name', 'ceph') }}.conf auth get-or-create client.{{ keyring.name }} {%- for cap_name, cap in keyring.caps.iteritems() %} {{ cap_name }} '{{ cap }}' {%- endfor %} > {{ common.prefix_dir }}/etc/ceph/{{ common.get('cluster_name', 'ceph') }}.client.{{ keyring.name }}.keyring"
+ - name: ceph -c /etc/ceph/{{ common.get('cluster_name', 'ceph') }}.conf auth get-or-create client.{{ keyring.name }} {%- for cap_name, cap in keyring.caps.iteritems() %} {{ cap_name }} '{{ cap }}' {%- endfor %} > {{ common.prefix_dir }}/etc/ceph/{{ common.get('cluster_name', 'ceph') }}.client.{{ keyring.name }}.keyring
- unless: "test -f {{ common.prefix_dir }}/etc/ceph/{{ common.get('cluster_name', 'ceph') }}.client.{{ keyring.name }}.keyring"
-{%- endif %}
+ {%- if salt['file.file_exists']('/usr/bin/ceph') %}
+ {%- set caps = salt['cmd.shell']('ceph auth list --format json') | load_json %}
+ {%- for client in caps['auth_dump'] %}
+ {%- if client['entity'] == "client." + keyring.name %}
+ {%- for cap_name, cap in client.caps.iteritems() %}
+ {%- if cap != keyring.caps[cap_name] %}
+ceph_update_caps_for_{{ keyring.name }}:
+ cmd.run:
+ - name: ceph -c /etc/ceph/{{ common.get('cluster_name', 'ceph') }}.conf auth caps client.{{ keyring.name }} {%- for cap_name, cap in keyring.caps.iteritems() %} {{ cap_name }} '{{ cap }}' {%- endfor %}
+ {%- endif %}
+ {%- endfor %}
+ {%- endif %}
+ {%- endfor %}
+ {%- endif %}
-{%- else %}
+ {%- endif %}
-{%- if keyring_name != 'admin' and keyring.key is defined and common.get("manage_keyring", False) %}
+ {%- else %}
+
+ {%- if keyring_name != 'admin' and keyring.key is defined and common.get("manage_keyring", False) %}
{{ common.prefix_dir }}/etc/ceph/{{ common.get('cluster_name', 'ceph') }}.client.{{ keyring_name }}.keyring:
file.managed:
@@ -56,23 +77,44 @@
- onchanges:
- file: {{ common.prefix_dir }}/etc/ceph/{{ common.get('cluster_name', 'ceph') }}.client.{{ keyring_name }}.keyring
-{%- elif keyring_name != 'admin' %}
+ceph_update_caps_for_{{ keyring_name }}:
+ cmd.run:
+ - name: ceph -c /etc/ceph/{{ common.get('cluster_name', 'ceph') }}.conf auth caps client.{{ keyring_name }} {%- for cap_name, cap in keyring.caps.iteritems() %} {{ cap_name }} '{{ cap }}' {%- endfor %}
+ - onchanges:
+ - file: {{ common.prefix_dir }}/etc/ceph/{{ common.get('cluster_name', 'ceph') }}.client.{{ keyring_name }}.keyring
+
+ {%- elif keyring_name != 'admin' %}
ceph_create_keyring_{{ keyring_name }}:
cmd.run:
- - name: "ceph -c /etc/ceph/{{ common.get('cluster_name', 'ceph') }}.conf auth get-or-create client.{{ keyring_name }} {%- for cap_name, cap in keyring.caps.iteritems() %} {{ cap_name }} '{{ cap }}' {%- endfor %} > {{ common.prefix_dir }}/etc/ceph/{{ common.get('cluster_name', 'ceph') }}.client.{{ keyring_name }}.keyring"
+ - name: ceph -c /etc/ceph/{{ common.get('cluster_name', 'ceph') }}.conf auth get-or-create client.{{ keyring_name }} {%- for cap_name, cap in keyring.caps.iteritems() %} {{ cap_name }} '{{ cap }}' {%- endfor %} > {{ common.prefix_dir }}/etc/ceph/{{ common.get('cluster_name', 'ceph') }}.client.{{ keyring_name }}.keyring
- unless: "test -f {{ common.prefix_dir }}/etc/ceph/{{ common.get('cluster_name', 'ceph') }}.client.{{ keyring_name }}.keyring"
-{%- endif %}
+ {%- if salt['file.file_exists']('/usr/bin/ceph') %}
+ {%- set caps = salt['cmd.shell']('ceph auth list --format json') | load_json %}
+ {%- for client in caps['auth_dump'] %}
+ {%- if client['entity'] == "client." + keyring_name %}
+ {%- for cap_name, cap in client.caps.iteritems() %}
+ {%- if cap != keyring.caps[cap_name] %}
+ceph_update_caps_for_{{ keyring_name }}:
+ cmd.run:
+ - name: ceph -c /etc/ceph/{{ common.get('cluster_name', 'ceph') }}.conf auth caps client.{{ keyring_name }} {%- for cap_name, cap in keyring.caps.iteritems() %} {{ cap_name }} '{{ cap }}' {%- endfor %}
+ {%- endif %}
+ {%- endfor %}
+ {%- endif %}
+ {%- endfor %}
+ {%- endif %}
-{%- endif %}
+ {%- endif %}
-{% endfor %}
+ {%- endif %}
-{%- endif %}
+ {% endfor %}
-{%- endif %}
+ {%- endif %}
-{%- endfor %}
+ {%- endif %}
+
+ {%- endfor %}
{%- endif %}
diff --git a/tests/pillar/ceph_mon_single.sls b/tests/pillar/ceph_mon_single.sls
index 05ed121..5ad74c9 100644
--- a/tests/pillar/ceph_mon_single.sls
+++ b/tests/pillar/ceph_mon_single.sls
@@ -1,6 +1,6 @@
ceph:
common:
- version: kraken
+ version: luminous
cluster_name: ceph
config:
global:
@@ -27,7 +27,7 @@
osd: "allow *"
mon:
enabled: true
- version: kraken
+ version: luminous
keyring:
mon:
key: AQAnQIhZ6in5KxAAdf467upoRMWFcVg5pbh1yg==
@@ -40,3 +40,24 @@
mgr: "allow *"
mon: "allow *"
osd: "allow *"
+ radosgw:
+ enabled: true
+ hostname: gw.ceph.lab
+ keyring_user: radosgw.gateway
+ keyring_path: /etc/ceph/keyring.radosgw.gateway
+ bind:
+ address: 10.10.10.1
+ port: 8080
+ identity:
+ engine: keystone
+ api_version: 3
+ host: 10.10.10.100
+ port: 5000
+ user: admin
+ password: password
+ project: admin
+ domain: default
+ swift:
+ versioning:
+ enabled: true
+ enforce_content_length: true