added mgr role - only for luminous
Change-Id: I550fadd2702b2539c4b07c26a2fe997810507c47
diff --git a/README.rst b/README.rst
index df8bf25..c7db5ea 100644
--- a/README.rst
+++ b/README.rst
@@ -92,6 +92,24 @@
mon: "allow *"
osd: "allow *"
+Ceph mgr roles
+------------------------
+
+The Ceph Manager daemon (ceph-mgr) runs alongside monitor daemons, to provide additional monitoring and interfaces to external monitoring and management systems. Since the 12.x (luminous) Ceph release, the ceph-mgr daemon is required for normal operations. The ceph-mgr daemon is an optional component in the 11.x (kraken) Ceph release.
+
+By default, the manager daemon requires no additional configuration, beyond ensuring it is running. If there is no mgr daemon running, you will see a health warning to that effect, and some of the other information in the output of ceph status will be missing or stale until a mgr is started.
+
+
+.. code-block:: yaml
+
+ ceph:
+ mgr:
+ enabled: true
+ dashboard:
+ enabled: true
+ host: 10.103.255.252
+ port: 7000
+
Ceph OSD (storage) roles
------------------------
diff --git a/ceph/files/luminous/ceph.conf.Debian b/ceph/files/luminous/ceph.conf.Debian
new file mode 100644
index 0000000..1c4a8b6
--- /dev/null
+++ b/ceph/files/luminous/ceph.conf.Debian
@@ -0,0 +1,108 @@
+{%- from "ceph/map.jinja" import common, mon, osd, radosgw with context %}
+[global]
+mon initial members = {%- for member in common.members %}{{ member.name }}{% if not loop.last %},{% endif %}{%- endfor %}
+mon host = {%- for member in common.members %}{{ member.host }}:6789{% if not loop.last %},{% endif %}{%- endfor %}
+
+{%- if common.cluster_network is defined %}
+cluster network = {{ common.cluster_network }}
+{%- endif %}
+{%- if common.public_network is defined %}
+public network = {{ common.public_network }}
+{%- endif %}
+
+fsid = {{ common.fsid }}
+
+##Global key: value
+{%- for key_name, key in common.get('config', {}).get('global', {}).iteritems() %}
+
+{{ key_name }} = {{ key }}
+
+{%- endfor %}
+
+
+##Other sections key: value
+{%- for key_name, key in common.get('config', {}).iteritems() %}
+
+{%- if key_name not in ['osd', 'mon', 'global'] %}
+[{{ key_name }}]
+
+{%- for value_name, value in key.iteritems() %}
+
+{{ value_name }} = {{ value }}
+
+{%- endfor %}
+
+{%- endif %}
+
+{%- endfor %}
+
+{%- if pillar.ceph.mon is defined %}
+
+[mon]
+{%- for key, value in common.get('config', {}).get('mon', {}).iteritems() %}
+{{ key }} = {{ value }}
+{%- endfor %}
+mon host = {%- for member in common.members %}{{ member.name }}{% if not loop.last %},{% endif %}{%- endfor %}
+mon addr = {%- for member in common.members %}{{ member.host }}:6789{% if not loop.last %},{% endif %}{%- endfor %}
+
+{%- for member in common.members %}
+[mon.{{ member.name }}]
+mon host = {{ member.name }}
+mon addr = {{ member.host }}:6789
+{%- if not loop.last %}
+
+{%- endif %}
+{%- endfor %}
+
+{%- endif %}
+
+{%- if pillar.ceph.osd is defined %}
+
+[osd]
+
+{%- for key, value in common.get('config', {}).get('osd', {}).iteritems() %}
+{{ key }} = {{ value }}
+{%- endfor %}
+
+{%- for disk_id, disk in osd.disk.iteritems() %}
+{% set id = osd.host_id~disk_id %}
+[osd.{{ id }}]
+host = {{ grains.host }}
+osd journal = {{ disk.journal }}
+{%- endfor %}
+
+{%- endif %}
+
+{%- if pillar.ceph.radosgw is defined %}
+
+[client.rgw.{{ grains.host }}]
+host = {{ grains.host }}
+keyring = /etc/ceph/ceph.client.rgw.{{ grains.host }}.keyring
+rgw socket path = /tmp/radosgw-{{ grains.host }}.sock
+log file = /var/log/ceph/ceph-rgw-{{ grains.host }}.log
+rgw data = /var/lib/ceph/radosgw/ceph-rgw.{{ grains.host }}
+rgw frontends = civetweb port={{ radosgw.bind.address }}:{{ radosgw.bind.port }} num_threads={{ radosgw.threads }}
+rgw dns name = {{ radosgw.get('hostname', grains.host) }}
+
+{%- if radosgw.identity.engine == 'keystone' %}
+{%- set ident = radosgw.identity %}
+
+rgw keystone api version = {{ ident.get('api_version', 3) }}
+rgw keystone url = {{ ident.host }}:{{ ident.get('port', '5000') }}
+rgw keystone accepted roles = _member_, Member, admin, swiftoperator
+rgw keystone revocation interval = 1000000
+rgw keystone implicit tenants = false
+rgw s3 auth use keystone = true
+rgw keystone admin user = {{ ident.get('user', 'admin') }}
+rgw keystone admin password = {{ ident.password }}
+rgw keystone verify ssl = False
+{%- if ident.get('api_version', 3) == 2 %}
+rgw keystone admin tenant = {{ ident.get('tenant', 'admin') }}
+{%- else %}
+rgw keystone admin project = {{ ident.get('project', 'admin') }}
+rgw keystone admin domain = {{ ident.get('domain', 'admin') }}
+{% endif %}
+
+{%- endif %}
+
+{%- endif %}
diff --git a/ceph/map.jinja b/ceph/map.jinja
index 3cd9166..0986380 100644
--- a/ceph/map.jinja
+++ b/ceph/map.jinja
@@ -16,6 +16,15 @@
{%- endload %}
{% set mon = salt['grains.filter_by'](mon_defaults, merge=salt['pillar.get']('ceph:mon')) %}
+{%- load_yaml as mgr_defaults %}
+Debian:
+ pkgs:
+ - ceph-mgr
+ services:
+ - ceph-mgr
+{%- endload %}
+{% set mgr = salt['grains.filter_by'](mgr_defaults, merge=salt['pillar.get']('ceph:mgr')) %}
+
{%- load_yaml as osd_defaults %}
Debian:
pkgs:
diff --git a/ceph/mgr.yml b/ceph/mgr.yml
new file mode 100644
index 0000000..4553e40
--- /dev/null
+++ b/ceph/mgr.yml
@@ -0,0 +1,90 @@
+{%- from "ceph/map.jinja" import common, mgr with context %}
+
+{%- if mgr.get('enabled', False) %}
+
+include:
+- ceph.common
+
+mon_packages:
+ pkg.installed:
+ - names: {{ mgr.pkgs }}
+
+/etc/ceph/ceph.conf:
+ file.managed:
+ - source: salt://ceph/files/{{ common.version }}/ceph.conf.{{ grains.os_family }}
+ - template: jinja
+ - require:
+ - pkg: mon_packages
+
+/var/lib/ceph/mgr/ceph-{{ grains.host }}/:
+ file.directory:
+ - template: jinja
+ - user: ceph
+ - group: ceph
+ - require:
+ - pkg: mon_packages
+
+ceph_create_mgr_keyring_{{ grains.host }}:
+ cmd.run:
+ - name: "ceph auth get-or-create mgr.{{ grains.host }} mon 'allow profile mgr' osd 'allow *' mds 'allow *' > /etc/ceph/ceph/mgr/ceph-{{ grains.host }}/keyring"
+ - unless: "test -f /var/lib/ceph/mgr/ceph-{{ grains.host }}/keyring"
+ - require:
+ - file: /var/lib/ceph/mgr/ceph-{{ grains.host }}/
+
+/var/lib/ceph/mgr/ceph-{{ grains.host }}/keyring:
+ file.managed:
+ - user: ceph
+ - group: ceph
+
+{%- if mgr.get('dashboard', {}).get('enabled', False) %}
+
+ceph_dashboard_address:
+ cmd.run:
+ - name: "ceph config-key put mgr/dashboard/server_addr {{ mgr.dashboard.get('host', '::') }}"
+ - unless: "ceph config-key get mgr/dashboard/server_addr | grep {{ mgr.dashboard.get('host', '::') }}"
+
+ceph_dashboard_port:
+ cmd.run:
+ - name: "ceph config-key put mgr/dashboard/server_port {{ mgr.dashboard.get('port', '7000') }}"
+ - unless: "ceph config-key get mgr/dashboard/server_port | grep {{ mgr.dashboard.get('port', '7000') }}"
+
+
+ceph_restart_dashboard_plugin:
+ cmd.wait:
+ - name: "ceph mgr module disable dashboard;ceph mgr module enable dashboard"
+ - watch:
+ - cmd: ceph_dashboard_address
+ - cmd: ceph_dashboard_port
+
+enable_ceph_dashboard:
+ cmd.run:
+ - name: "ceph mgr module enable dashboard"
+ - unless: "ceph mgr module ls | grep dashboard"
+
+{%- else %}
+
+disable_ceph_dashboard:
+ cmd.run:
+ - name: "ceph mgr module disable dashboard"
+ - onlyif: "ceph mgr module ls | grep dashboard"
+ - require:
+ - file: /var/lib/ceph/mgr/ceph-{{ grains.host }}/
+
+{%- endif %}
+
+
+mon_services:
+ service.running:
+ - enable: true
+ - names: [ceph-mgr@{{ grains.host }}]
+ - watch:
+ - file: /etc/ceph/ceph.conf
+ - require:
+ - pkg: mon_packages
+ - file: /var/lib/ceph/mgr/ceph-{{ grains.host }}/keyring
+ {%- if grains.get('noservices') %}
+ - onlyif: /bin/false
+ {%- endif %}
+
+
+{%- endif %}
\ No newline at end of file