| {%- from "ceph/map.jinja" import osd, common with context %} |
| |
| ceph_osd_packages: |
| pkg.installed: |
| - names: {{ osd.pkgs }} |
| |
| /etc/ceph/{{ common.get('cluster_name', 'ceph') }}.conf: |
| file.managed: |
| - source: salt://ceph/files/{{ common.version }}/ceph.conf.{{ grains.os_family }} |
| - template: jinja |
| - require: |
| - pkg: ceph_osd_packages |
| |
| {% set ceph_version = pillar.ceph.common.version %} |
| |
| {%- if osd.backend is defined %} |
| |
| link_keyring: |
| cmd.run: |
| - name: "ln -s {{"/etc/ceph/" + common.get('cluster_name', 'ceph') + ".client.bootstrap-osd.keyring"}} /var/lib/ceph/bootstrap-osd/ceph.keyring" |
| - unless: "ls /var/lib/ceph/bootstrap-osd/ceph.keyring" |
| |
| {%- for backend_name, backend in osd.backend.iteritems() %} |
| |
| {%- for disk in backend.disks %} |
| |
| {%- if disk.get('enabled', True) %} |
| |
| {% set dev = disk.dev %} |
| |
| {% set db_partition = disk.get('db_partition', 1) %} |
| {% set wal_partition = disk.get('wal_partition', 1) %} |
| |
| {%- endif %} |
| |
| {%- set cmd = [] %} |
| {%- do cmd.append('--cluster-fsid ' + common.fsid) %} |
| {%- if disk.get('dmcrypt', False) %} |
| {%- do cmd.append('--dmcrypt') %} |
| {%- endif %} |
| {%- do cmd.append('--bluestore') %} |
| {%- if disk.block_db is defined %} |
| {%- do cmd.append('--block.db ' + disk.block_db + db_partition | string) %} |
| {%- endif %} |
| {%- if disk.block_wal is defined %} |
| {%- do cmd.append('--block.wal ' + disk.block_wal + wal_partition | string ) %} |
| {%- endif %} |
| {%- do cmd.append('--data ') %} |
| {%- do cmd.append(dev) %} |
| |
| prepare_disk_{{ dev }}: |
| cmd.run: |
| - name: "ceph-volume lvm prepare {{ cmd|join(' ') }}" |
| - unless: "ceph-volume lvm list | grep {{ dev }}" |
| - require: |
| - pkg: ceph_osd_packages |
| - file: /etc/ceph/{{ common.get('cluster_name', 'ceph') }}.conf |
| {%- if grains.get('noservices') %} |
| - onlyif: /bin/false |
| {%- endif %} |
| |
| {% set db_partition = db_partition + 1 %} |
| {% set wal_partition = wal_partition + 1 %} |
| |
| |
| {%- endfor %} |
| |
| {%- endfor %} |
| |
| {%- endif %} |
| |
| activate_disks: |
| cmd.run: |
| - name: "ceph-volume lvm activate --all" |
| - require: |
| - pkg: ceph_osd_packages |
| - file: /etc/ceph/{{ common.get('cluster_name', 'ceph') }}.conf |
| {%- if grains.get('noservices') %} |
| - onlyif: /bin/false |
| {%- endif %} |
| |
| osd_services_global: |
| service.running: |
| - enable: true |
| - names: ['ceph-osd.target'] |
| - watch: |
| - file: /etc/ceph/{{ common.get('cluster_name', 'ceph') }}.conf |
| {%- if grains.get('noservices') %} |
| - onlyif: /bin/false |
| {%- endif %} |
| |
| osd_services: |
| service.running: |
| - enable: true |
| - names: ['ceph.target'] |
| - watch: |
| - file: /etc/ceph/{{ common.get('cluster_name', 'ceph') }}.conf |
| {%- if grains.get('noservices') %} |
| - onlyif: /bin/false |
| {%- endif %} |