add support for nautilus
Related-Prod: PROD-34576
Change-Id: I4acc6c6bcf165254fdfb24508ff4142586c7e9c3
diff --git a/.gitignore b/.gitignore
index 26733ce..911172f 100644
--- a/.gitignore
+++ b/.gitignore
@@ -7,3 +7,4 @@
Gemfile*
.bundle/
kitchen-init.sh
+.vscode/settings.json
diff --git a/ceph/client.sls b/ceph/client.sls
index f2700ba..1bd36f7 100644
--- a/ceph/client.sls
+++ b/ceph/client.sls
@@ -11,8 +11,8 @@
{{ client.prefix_dir }}/etc/ceph:
file.directory:
- - user: root
- - group: root
+ - user: ceph
+ - group: ceph
- mode: 755
- makedirs: True
@@ -20,8 +20,8 @@
{{ client.prefix_dir }}/etc/ceph/ceph.client.{{ keyring_name }}.keyring:
file.managed:
- - user: root
- - group: root
+ - user: ceph
+ - group: ceph
- mode: 644
- replace: False
# bug, if file is empty no section is added by options_present
@@ -50,8 +50,8 @@
{{ client.prefix_dir }}/etc/ceph/ceph.conf:
file.managed:
- - user: root
- - group: root
+ - user: ceph
+ - group: ceph
- mode: 644
- replace: False
# bug, if file is empty no section is added by options_present
diff --git a/ceph/common.sls b/ceph/common.sls
index 964dff7..6b9608c 100644
--- a/ceph/common.sls
+++ b/ceph/common.sls
@@ -17,16 +17,16 @@
{{ common.prefix_dir }}/etc/ceph:
file.directory:
- - user: root
- - group: root
+ - user: ceph
+ - group: ceph
- mode: 755
- makedirs: True
common_config:
file.managed:
- name: {{ common.prefix_dir }}/etc/ceph/{{ common.get('cluster_name', 'ceph') }}.conf
- - user: root
- - group: root
+ - user: ceph
+ - group: ceph
- source: salt://ceph/files/{{ common.version }}/ceph.conf.{{ grains.os_family }}
- template: jinja
{% if not common.get('container_mode', False) %}
diff --git a/ceph/files/nautilus/ceph.conf.Debian b/ceph/files/nautilus/ceph.conf.Debian
new file mode 100644
index 0000000..2fbc880
--- /dev/null
+++ b/ceph/files/nautilus/ceph.conf.Debian
@@ -0,0 +1,137 @@
+{%- from "ceph/map.jinja" import common, mon, osd, radosgw with context -%}
+[global]
+mon initial members = {% for member in common.members %}{{ member.name }}{% if not loop.last %},{% endif %}{%- endfor %}
+mon host = {% for member in common.members %}{{ member.host }}:6789{% if not loop.last %},{% endif %}{%- endfor %}
+
+{%- if common.cluster_network is defined %}
+cluster network = {{ common.cluster_network }}
+{%- endif %}
+{%- if common.public_network is defined %}
+public network = {{ common.public_network }}
+{%- endif %}
+
+fsid = {{ common.fsid }}
+{%- if common.pg_num is defined %}
+osd pool default pg num = {{ common.pg_num }}
+{%- endif %}
+{%- if common.pgp_num is defined %}
+osd pool default pgp num = {{ common.pgp_num }}
+{%- endif %}
+{# Global key: value #}
+{%- for key_name, key in common.get('config', {}).get('global', {}).iteritems() %}
+{{ key_name }} = {{ key }}
+{%- endfor %}
+
+{# Other sections key: value #}
+{%- for key_name, key in common.get('config', {}).iteritems() %}
+{%- if key_name not in ['rgw', 'osd', 'mon', 'global'] %}
+[{{ key_name }}]
+{%- for value_name, value in key.iteritems() %}
+{{ value_name }} = {{ value }}
+{%- endfor %}
+{%- endif %}
+{%- endfor %}
+
+{%- if osd.bluestore_block_size is defined %}
+bluestore_block_size = {{ osd.bluestore_block_size }}
+{%- endif %}
+
+{%- if osd.bluestore_block_db_size is defined %}
+bluestore_block_db_size = {{ osd.bluestore_block_db_size }}
+{%- endif %}
+
+{%- if osd.bluestore_block_wal_size is defined %}
+bluestore_block_wal_size = {{ osd.bluestore_block_wal_size }}
+{%- endif %}
+
+{%- if pillar.ceph.mon is defined %}
+
+[mon]
+{%- for key, value in common.get('config', {}).get('mon', {}).iteritems() %}
+{{ key }} = {{ value }}
+{%- endfor %}
+mon host = {%- for member in common.members %}[v2:{{ member.host }}:3300/0,v1:{{ member.host }}:6789/0]{% if not loop.last %},{% endif %}{%- endfor %}
+
+{%- for member in common.members %}
+[mon.{{ member.name }}]
+mon host = {{ member.name }}
+mon addr = [v2:{{ member.host }}:3300,v1:{{ member.host }}:6789]
+{%- if not loop.last %}
+
+{%- endif %}
+{%- endfor %}
+
+{%- endif %}
+
+{%- if pillar.ceph.osd is defined %}
+
+[osd]
+{%- if osd.crush is defined %}
+crush location = {% for crush in osd.crush %}{{ crush.type }}={{ crush.name }}{% if not loop.last %} {% endif %}{% endfor %}
+{%- endif %}
+
+{%- if osd.crush_initial_weight is defined %}
+osd crush initial weight = {{ osd.crush_initial_weight }}
+{%- endif %}
+
+osd crush update on start = {{ osd.get('crush_update', 'true') }}
+
+{%- if pillar.ceph.osd.journal_size is defined %}
+osd journal size = {{ osd.journal_size }}
+{%- endif %}
+
+{%- for key, value in common.get('config', {}).get('osd', {}).iteritems() %}
+{{ key }} = {{ value }}
+{%- endfor %}
+
+{%- endif %}
+
+{%- if pillar.ceph.radosgw is defined %}
+
+{%- if radosgw.keyring_user is defined %}
+[client.{{ radosgw.keyring_user }}]
+{%- else %}
+[client.rgw.{{ grains.host }}]
+{%- endif %}
+host = {{ grains.host }}
+{%- if radosgw.keyring_path is defined %}
+keyring = {{ radosgw.keyring_path }}
+{%- else %}
+keyring = /etc/ceph/{{ common.get('cluster_name', 'ceph') }}.client.rgw.{{ grains.host }}.keyring
+{%- endif %}
+rgw socket path = /tmp/radosgw-{{ grains.host }}.sock
+log file = /var/log/ceph/{{ common.get('cluster_name', 'ceph') }}-rgw-{{ grains.host }}.log
+rgw data = /var/lib/ceph/radosgw/{{ common.get('cluster_name', 'ceph') }}-rgw.{{ grains.host }}
+{%- if radosgw.ssl is defined and radosgw.ssl.get("enabled", False) %}
+rgw frontends = civetweb port={{ radosgw.bind.address }}:{{ radosgw.bind.port }}s num_threads={{ radosgw.threads }} ssl_certificate={{ radosgw.ssl.cert }}
+{%- else %}
+rgw frontends = civetweb port={{ radosgw.bind.address }}:{{ radosgw.bind.port }} num_threads={{ radosgw.threads }}
+{%- endif %}
+rgw dns name = {{ radosgw.get('hostname', grains.host) }}
+rgw swift versioning enabled = {{ radosgw.get('swift', {}).get('versioning', {}).get('enabled', 'false') }}
+rgw swift enforce content length = {{ radosgw.get('swift', {}).get('enforce_content_length', 'false') }}
+{%- if radosgw.identity.engine == 'keystone' %}
+{%- set ident = radosgw.identity %}
+rgw keystone api version = {{ ident.get('api_version', 3) }}
+rgw keystone url = {{ ident.get('protocol', 'https') }}://{{ ident.host }}:{{ ident.get('port', '5000') }}
+rgw keystone accepted roles = {{ ident.get('accepted_roles', '_member_, Member, admin, swiftoperator, ResellerAdmin') }}
+rgw keystone revocation interval = {{ ident.get('revocation_interval', '1000000') }}
+rgw keystone implicit tenants = {{ ident.get('implicit_tenants', 'true') }}
+rgw swift account in url = {{ ident.get('swift_account_in_url', 'true') }}
+rgw s3 auth use keystone = {{ ident.get('s3_auth_use_keystone', 'true') }}
+rgw keystone admin user = {{ ident.get('user', 'admin') }}
+rgw keystone admin password = {{ ident.password }}
+rgw keystone verify ssl = {{ ident.get('keystone_verify_ssl', 'False') }}
+rgw keystone token cache size = {{ ident.get('cache', '10000') }}
+{%- if ident.get('api_version', 3) == 2 %}
+rgw keystone admin tenant = {{ ident.get('tenant', 'admin') }}
+{%- else %}
+rgw keystone admin project = {{ ident.get('project', 'admin') }}
+rgw keystone admin domain = {{ ident.get('domain', 'admin') }}
+{%- endif %}
+{%- endif %}
+{# rgw key: value #}
+{%- for key, value in common.get('config', {}).get('rgw', {}).iteritems() %}
+{{ key }} = {{ value }}
+{%- endfor %}
+{%- endif %}
diff --git a/ceph/mon.sls b/ceph/mon.sls
index dc51bb4..ceaa458 100644
--- a/ceph/mon.sls
+++ b/ceph/mon.sls
@@ -26,16 +26,17 @@
generate_monmap:
cmd.run:
- name: "monmaptool --create {%- for member in common.members %} --add {{ member.name }} {{ member.host }} {%- endfor %} --fsid {{ common.fsid }} /tmp/monmap"
+ - user: ceph
- unless: "test -f /tmp/monmap"
- require:
- pkg: mon_packages
-#/var/lib/ceph/mon/{{ common.get('cluster_name', 'ceph') }}-{{ grains.host }}:
-# file.directory:
-# - user: ceph
-# - group: ceph
-# - mode: 655
-# - makedirs: True
+/var/lib/ceph/mon/{{ common.get('cluster_name', 'ceph') }}-{{ grains.host }}:
+ file.directory:
+ - user: ceph
+ - group: ceph
+ - mode: 755
+ - makedirs: True
/etc/ceph/{{ common.get('cluster_name', 'ceph') }}.mon.{{ grains.host }}.keyring:
file.managed:
@@ -47,44 +48,32 @@
populate_monmap:
cmd.run:
- - name: "sudo -u ceph ceph-mon -c /etc/ceph/{{ common.get('cluster_name', 'ceph') }}.conf --mkfs -i {{ grains.host }} --monmap /tmp/monmap"
+ - name: "sudo -u ceph ceph-mon -c /etc/ceph/{{ common.get('cluster_name', 'ceph') }}.conf --keyring /etc/ceph/{{ common.get('cluster_name', 'ceph') }}.mon.{{ grains.host }}.keyring --mkfs -i {{ grains.host }} --monmap /tmp/monmap"
- unless: "test -f /var/lib/ceph/mon/{{ common.get('cluster_name', 'ceph') }}-{{ grains.host }}/kv_backend"
- require:
- pkg: mon_packages
- file: common_config
-{% for keyring_name, keyring in mon.get('keyring', {}).iteritems() %}
-
-{%- if keyring_name == 'mon' and keyring.key is undefined %}
-
-cluster_secret_key:
- cmd.run:
- - name: "ceph-authtool --create-keyring /var/lib/ceph/mon/{{ common.get('cluster_name', 'ceph') }}-{{ grains.host }}/keyring --gen-key -n mon. {%- for cap_name, cap in keyring.caps.iteritems() %} --cap {{ cap_name }} '{{ cap }}' {%- endfor %}"
- - unless: "test -f /var/lib/ceph/mon/{{ common.get('cluster_name', 'ceph') }}-{{ grains.host }}/done"
- - require:
- - pkg: mon_packages
-
-cluster_secret_key_flag:
- file.managed:
- - name: /var/lib/ceph/mon/{{ common.get('cluster_name', 'ceph') }}-{{ grains.host }}/done
- - user: ceph
- - group: ceph
- - content: { }
- - require:
- - pkg: mon_packages
-
-{%- endif %}
-
-{% endfor %}
-
/var/lib/ceph/mon/{{ common.get('cluster_name', 'ceph') }}-{{ grains.host }}/keyring:
file.managed:
- source: salt://ceph/files/mon_keyring
+ - user: ceph
- template: jinja
- unless: "test -f /var/lib/ceph/mon/{{ common.get('cluster_name', 'ceph') }}-{{ grains.host }}/done"
- require:
- pkg: mon_packages
+{%- if common.version in ['nautilus'] -%}
+enable_msgr2_protocol:
+ cmd.run:
+ - name: "ceph mon enable-msgr2"
+ - unless: "test -f /var/lib/ceph/mon/{{ common.get('cluster_name', 'ceph') }}-{{ grains.host }}/done"
+ - require:
+ - pkg: mon_packages
+ - file: common_config
+
+{%- endif %}
+
/var/lib/ceph/mon/{{ common.get('cluster_name', 'ceph') }}-{{ grains.host }}/done:
file.managed:
- user: ceph
diff --git a/ceph/osd/setup/lvm.sls b/ceph/osd/setup/lvm.sls
index 363c649..7634179 100644
--- a/ceph/osd/setup/lvm.sls
+++ b/ceph/osd/setup/lvm.sls
@@ -50,7 +50,7 @@
prepare_disk_{{ dev }}:
cmd.run:
- - name: "yes | ceph-volume lvm prepare {{ cmd|join(' ') }}"
+ - name: "ceph-volume lvm prepare {{ cmd|join(' ') }}"
- unless: "ceph-volume lvm list | grep {{ dev }}"
- require:
- pkg: ceph_osd_packages
@@ -67,19 +67,18 @@
{%- endfor %}
+{%- endif %}
+
activate_disks:
cmd.run:
- name: "ceph-volume lvm activate --all"
- require:
- - cmd: prepare_disk_{{ dev }}
- pkg: ceph_osd_packages
- file: /etc/ceph/{{ common.get('cluster_name', 'ceph') }}.conf
{%- if grains.get('noservices') %}
- onlyif: /bin/false
{%- endif %}
-{%- endif %}
-
osd_services_global:
service.running:
- enable: true