Merge "use ceph-volume for osd deployment"
diff --git a/.kitchen.yml b/.kitchen.yml
index c6c5d32..68b7f56 100644
--- a/.kitchen.yml
+++ b/.kitchen.yml
@@ -35,26 +35,14 @@
   sudo: true
 
 docker_images:
-  - &xenial-20163 <%=ENV['IMAGE_XENIAL_20163'] || 'docker-dev-local.docker.mirantis.net/epcim/salt/saltstack-ubuntu-xenial-salt-2016.3/salt:2018_11_19'%>
-  - &xenial-20177 <%=ENV['IMAGE_XENIAL_20177'] || 'docker-dev-local.docker.mirantis.net/epcim/salt/saltstack-ubuntu-xenial-salt-2017.7/salt:2018_11_19'%>
-  - &xenial-stable <%=ENV['IMAGE_XENIAL_STABLE'] || 'docker-dev-local.docker.mirantis.net/epcim/salt/saltstack-ubuntu-xenial-salt-stable/salt:2018_11_19'%>
+  - &xenial-20177 <%=ENV['IMAGE_XENIAL_20177'] || 'docker-dev-local.docker.mirantis.net/mirantis/drivetrain/salt-formulas-ci/salt-formulas-ci-xenial-2017.7:latest'%>
 
 platforms:
-  - name: xenial-2016.3
-    driver_config:
-      image: *xenial-20163
-      platform: ubuntu
-
   - name: xenial-2017.7
     driver_config:
       image: *xenial-20177
       platform: ubuntu
 
-  - name: xenial-stable
-    driver_config:
-      image: *xenial-stable
-      platform: ubuntu
-
 suites:
 
   - name: ceph_client_single
diff --git a/README.rst b/README.rst
index a0c3d3c..74ac462 100644
--- a/README.rst
+++ b/README.rst
@@ -525,10 +525,11 @@
             ...
 
 
-Ceph manage keyring keys
+Ceph manage clients keyring keys
 ------------------------
 
-Keyrings are dynamically generated unless specified by the following pillar.
+Keyrings are dynamically generated unless specified by the manage_keyring pillar.
+This settings has no effect on admin keyring.
 
 .. code-block:: yaml
 
@@ -543,6 +544,31 @@
               mon: "allow r"
               osd: "allow class-read object_prefix rdb_children, allow rwx pool=images"
 
+Ceph manage admin keyring
+--------------------------
+To use pre-defined admin key add manage_admin_keyring and admin keyring definition to ceph mon nodes in cluster_model/ceph/mon.yml
+
+ceph:
+  common:
+    manage_admin_keyring: true
+    keyring:
+      admin:
+        caps:
+          mds: "allow *"
+          mgr: "allow *"
+          mon: "allow *"
+          osd: "allow *"
+        key: AACf3ulZFFPNDxAAd2DWds3aEkHh4IklZVgIaQ==
+
+Specify alternative keyring path and username
+
+.. code-block:: yaml
+
+    ceph:
+      radosgw:
+        keyring_user: radosgw.gateway
+        keyring_path: /etc/ceph/keyring.radosgw.gateway
+
 
 Generate CRUSH map - Recommended way
 -----------------------------------
diff --git a/_grains/ceph.py b/_grains/ceph.py
index 45652e8..54bc97e 100644
--- a/_grains/ceph.py
+++ b/_grains/ceph.py
@@ -6,6 +6,7 @@
     from subprocess import check_output
     from subprocess import CalledProcessError
     import shlex
+    import json
     import os
     import re
 
@@ -28,44 +29,60 @@
 
         # osd
         if os.path.exists('/var/lib/ceph/osd'):
-            mount_path = check_output("df -h | awk '{print $6}' | grep ceph | grep -v lockbox | sed 's/-[0-9]*//g' | awk 'NR==1{print $1}'", shell=True).rstrip()
-            sed = 'sed \'s#{0}-##g\''.format(mount_path)
-            cmd = "lsblk -rp | awk '{print $1,$6,$7}' | grep -v lockbox | grep ceph | " + sed
+            cmd = "ceph-volume lvm list --format json"
             osd_output = check_output(cmd, shell=True)
+            osd_output = json.loads(osd_output)
+            dev_id = ''
+            devices = {}
             if osd_output:
+                for osd, params in osd_output.iteritems():
+                    dev_id = osd
+                    devices[dev_id] = {}
+                    devices[dev_id]['dev'] = params[0]['devices'][0]
+                    devices[dev_id]['path'] = params[0]['path']
+                    devices[dev_id]['uuid'] = params[0]['tags']['ceph.osd_fsid']
+
+                cmd = "ceph osd tree --format json"
+                osd_tree_output = check_output(cmd, shell=True)
+                osd_tree_output = json.loads(osd_tree_output)
+                for osd in osd_tree_output['nodes']:
+                    if 'type_id' in osd.keys():
+                        if str(osd['type_id']) == '0':
+                            for dev_id in devices.keys():
+                                if str(osd['id']) == str(dev_id):
+                                    devices[dev_id]['weight'] = osd['crush_weight']
+                                    devices[dev_id]['class'] = osd['device_class']
+                grain["ceph"]["ceph_volume"] = devices
+            else:
+                cmd = "ceph-disk list --format json"
+                osd_output = check_output(cmd, shell=True).decode("utf-8")
+                osd_output = json.loads(osd_output)
+                dev_id = ''
                 devices = {}
-                for line in osd_output.splitlines():
-                    device = line.split()
-                    encrypted = False
-                    if "crypt" in device[1]:
-                        output = check_output("lsblk -rp | grep -B1 " + device[0], shell=True)
-                        for l in output.splitlines():
-                            d = l.split()
-                            dev = re.sub("\d+", "", device[0])
-                            encrypted = True
-                            break
-                    else:
-                        dev = device[0].replace('1','')
-                        dev = re.sub("\d+", "", device[0])
-                    device[0] = device[2]
-                    try:
-                        devices[device[0]] = {}
-                        devices[device[0]]['dev'] = dev
-                        if encrypted:
-                            devices[device[0]]['dmcrypt'] = 'true'
-                        tline = check_output("ceph -c " + conf_file + " osd tree | awk '{print $1,$2,$3,$4}' | grep -w 'osd." + device[0] + "'", shell=True)
-                        osd = tline.split()
-                        if "osd" not in osd[2]:
-                            crush_class = osd[1]
-                            crush_weight = osd[2]
-                            devices[device[0]]['class'] = crush_class
-                            devices[device[0]]['weight'] = crush_weight
-                        else:
-                            crush_weight = osd[1]
-                            devices[device[0]]['weight'] = crush_weight
-                    except CalledProcessError:
-                        continue
-                grain["ceph"]["ceph_disk"] = devices
+                if osd_output:
+                    for line in osd_output:
+                        if "is_partition" not in line.keys():
+                            dev = line["path"]
+                            parts = line["partitions"]
+                            for p in parts:
+                                if "mount" in p.keys() and "ceph" in p["mount"]:
+                                    dev_id = p["whoami"]
+                                    devices[dev_id] = {}
+                                    devices[dev_id]['dev'] = dev
+                                    if len(p["dmcrypt"]) > 0:
+                                        devices[dev_id]['dmcrypt'] = 'true'
+
+                    cmd = "ceph osd tree --format json"
+                    osd_tree_output = check_output(cmd, shell=True).decode("utf-8")
+                    osd_tree_output = json.loads(osd_tree_output)
+                    for osd in osd_tree_output['nodes']:
+                        if 'type_id' in osd.keys():
+                            if str(osd['type_id']) == '0':
+                                for dev_id in devices.keys():
+                                    if str(osd['id']) == str(dev_id):
+                                        devices[dev_id]['weight'] = osd['crush_weight']
+                                        devices[dev_id]['class'] = osd['device_class']
+                    grain["ceph"]["ceph_disk"] = devices
 
         # keyrings
         directory = '/etc/ceph/'
diff --git a/ceph/backup.sls b/ceph/backup.sls
index 7963e5c..5067012 100644
--- a/ceph/backup.sls
+++ b/ceph/backup.sls
@@ -54,6 +54,8 @@
 {%- endif %}
 {%- if backup.client.backup_times.minute is defined %}
   - minute: {{ backup.client.backup_times.minute }}
+{%- else %}
+  - minute: 0
 {%- endif %}
 {%- elif backup.client.hours_before_incr is defined %}
   - minute: 0
@@ -155,6 +157,8 @@
 {%- endif %}
 {%- if backup.server.backup_times.minute is defined %}
   - minute: {{ backup.server.backup_times.minute }}
+{%- else %}
+  - minute: 0
 {%- endif %}
 {%- elif backup.server.hours_before_incr is defined %}
   - minute: 0
diff --git a/ceph/common.sls b/ceph/common.sls
index b445355..964dff7 100644
--- a/ceph/common.sls
+++ b/ceph/common.sls
@@ -37,6 +37,20 @@
 
 {%- if common.keyring is defined and common.keyring.admin is defined %}
 
+{%- if common.get("manage_admin_keyring", False) %}
+
+ceph_create_keyring_admin:
+  cmd.run:
+  - name: "ceph-authtool --create-keyring {{ common.prefix_dir }}/etc/ceph/{{ common.get('cluster_name', 'ceph') }}.client.admin.keyring --add-key {{ common.keyring.admin.key }} -n client.admin {%- for cap_name, cap in  common.keyring.admin.caps.iteritems() %} --cap {{ cap_name }} '{{ cap }}' {%- endfor %}"
+  - unless: "test -f {{ common.prefix_dir }}/etc/ceph/{{ common.get('cluster_name', 'ceph') }}.client.admin.keyring"
+  - require:
+  {% if not common.get('container_mode', False) %}
+    - pkg: common_packages
+  {%- endif %}
+    - file: common_config
+
+{%- else %}
+
 ceph_create_keyring_admin:
   cmd.run:
   - name: "ceph-authtool --create-keyring {{ common.prefix_dir }}/etc/ceph/{{ common.get('cluster_name', 'ceph') }}.client.admin.keyring --gen-key -n client.admin {%- for cap_name, cap in  common.keyring.admin.caps.iteritems() %} --cap {{ cap_name }} '{{ cap }}' {%- endfor %}"
@@ -49,6 +63,8 @@
 
 {%- endif %}
 
+{%- endif %}
+
 {%- for node_name, node_grains in salt['mine.get']('ceph:common:keyring:admin', 'grains.items', 'pillar').iteritems() %}
 
 {%- if node_grains.ceph is defined and node_grains.ceph.ceph_keyring is defined and node_grains.ceph.ceph_keyring.admin is defined and node_grains.ceph.get('fsid', '') == common.fsid %}
diff --git a/ceph/files/backup/ceph-backup-client-runner.sh b/ceph/files/backup/ceph-backup-client-runner.sh
index 971f944..bb04fc6 100644
--- a/ceph/files/backup/ceph-backup-client-runner.sh
+++ b/ceph/files/backup/ceph-backup-client-runner.sh
@@ -45,9 +45,13 @@
     rsync -arv --exclude=osd/{{ common.get('cluster_name', 'ceph') }}-*/current /var/lib/ceph $TMPDIR/{{ common.get('cluster_name', 'ceph') }}-$HOSTNAME/
 {%- elif mon.get('enabled', False) %}
     cp -a /etc/ceph/ $TMPDIR/
-    service ceph-mon@$HOSTNAME stop
-    cp -a /var/lib/ceph/ $TMPDIR/{{ common.get('cluster_name', 'ceph') }}-$HOSTNAME/
-    service ceph-mon@$HOSTNAME start
+    if systemctl status ceph-mon@$HOSTNAME >/dev/null; then
+        systemctl stop ceph-mon@$HOSTNAME
+        cp -a /var/lib/ceph/ $TMPDIR/{{ common.get('cluster_name', 'ceph') }}-$HOSTNAME/
+        systemctl start ceph-mon@$HOSTNAME
+    else
+        cp -a /var/lib/ceph/ $TMPDIR/{{ common.get('cluster_name', 'ceph') }}-$HOSTNAME/
+    fi
 {%- endif %}
 
     tar -cvzf $BACKUPDIR/$HOSTNAME/{{ common.get('cluster_name', 'ceph') }}-$HOSTNAME-$TIMESTAMP.tgz $TMPDIR
diff --git a/ceph/files/grafana_dashboards/ceph-cluster_prometheus.json b/ceph/files/grafana_dashboards/ceph-cluster_prometheus.json
index 2f54c74..5770143 100644
--- a/ceph/files/grafana_dashboards/ceph-cluster_prometheus.json
+++ b/ceph/files/grafana_dashboards/ceph-cluster_prometheus.json
@@ -124,6 +124,88 @@
       "colorBackground": false,
       "colorValue": false,
       "colors": [
+        "rgba(50, 128, 45, 0.9)",
+        "rgba(237, 129, 40, 0.9)",
+        "rgb(255, 0, 0)"
+      ],
+      "datasource": null,
+      "editable": true,
+      "error": false,
+      "format": "bytes",
+      "gauge": {
+        "maxValue": 100,
+        "minValue": 0,
+        "show": false,
+        "thresholdLabels": false,
+        "thresholdMarkers": true
+      },
+      "gridPos": {
+        "h": 3,
+        "w": 2,
+        "x": 2,
+        "y": 0
+      },
+      "hideTimeOverride": true,
+      "id": 67,
+      "interval": "1m",
+      "links": [],
+      "mappingType": 1,
+      "mappingTypes": [
+        {
+          "name": "value to text",
+          "value": 1
+        },
+        {
+          "name": "range to text",
+          "value": 2
+        }
+      ],
+      "maxDataPoints": 100,
+      "nullPointMode": "connected",
+      "nullText": null,
+      "postfix": "",
+      "postfixFontSize": "50%",
+      "prefix": "",
+      "prefixFontSize": "50%",
+      "rangeMaps": [
+        {
+          "from": "null",
+          "text": "N/A",
+          "to": "null"
+        }
+      ],
+      "span": 2,
+      "sparkline": {
+        "fillColor": "rgba(31, 118, 189, 0.18)",
+        "full": false,
+        "lineColor": "rgb(31, 120, 193)",
+        "show": false
+      },
+      "tableColumn": "",
+      "targets": [
+        {
+          "expr": "sum(ceph_osd_stat_bytes{instance=~\"$instance\"})",
+          "format": "time_series",
+          "interval": "$interval",
+          "intervalFactor": 1,
+          "refId": "A",
+          "step": 60
+        }
+      ],
+      "thresholds": "",
+      "timeFrom": "1m",
+      "title": "Total",
+      "transparent": false,
+      "type": "singlestat",
+      "valueFontSize": "50%",
+      "valueMaps": [],
+      "valueName": "current"
+    },
+    {
+      "cacheTimeout": null,
+      "colorBackground": false,
+      "colorValue": false,
+      "colors": [
         "#299c46",
         "rgba(237, 129, 40, 0.89)",
         "#d44a3a"
@@ -471,6 +553,170 @@
       }
     },
     {
+      "cacheTimeout": null,
+      "colorBackground": false,
+      "colorValue": false,
+      "colors": [
+        "rgba(50, 128, 45, 0.9)",
+        "rgba(237, 129, 40, 0.9)",
+        "rgb(255, 0, 0)"
+      ],
+      "datasource": null,
+      "editable": true,
+      "error": false,
+      "format": "bytes",
+      "gauge": {
+        "maxValue": 100,
+        "minValue": 0,
+        "show": false,
+        "thresholdLabels": false,
+        "thresholdMarkers": true
+      },
+      "gridPos": {
+        "h": 3,
+        "w": 2,
+        "x": 0,
+        "y": 3
+      },
+      "hideTimeOverride": true,
+      "id": 68,
+      "interval": "1m",
+      "links": [],
+      "mappingType": 1,
+      "mappingTypes": [
+        {
+          "name": "value to text",
+          "value": 1
+        },
+        {
+          "name": "range to text",
+          "value": 2
+        }
+      ],
+      "maxDataPoints": 100,
+      "nullPointMode": "connected",
+      "nullText": null,
+      "postfix": "",
+      "postfixFontSize": "50%",
+      "prefix": "",
+      "prefixFontSize": "50%",
+      "rangeMaps": [
+        {
+          "from": "null",
+          "text": "N/A",
+          "to": "null"
+        }
+      ],
+      "span": 2,
+      "sparkline": {
+        "fillColor": "rgba(31, 118, 189, 0.18)",
+        "full": false,
+        "lineColor": "rgb(31, 120, 193)",
+        "show": false
+      },
+      "tableColumn": "",
+      "targets": [
+        {
+          "expr": "sum(ceph_osd_stat_bytes{instance=~\"$instance\"})-sum(ceph_osd_stat_bytes_used{instance=~\"$instance\"})",
+          "format": "time_series",
+          "interval": "$interval",
+          "intervalFactor": 1,
+          "refId": "A",
+          "step": 60
+        }
+      ],
+      "thresholds": "",
+      "timeFrom": "1m",
+      "title": "Free",
+      "transparent": false,
+      "type": "singlestat",
+      "valueFontSize": "50%",
+      "valueMaps": [],
+      "valueName": "current"
+    },
+    {
+      "cacheTimeout": null,
+      "colorBackground": false,
+      "colorValue": false,
+      "colors": [
+        "rgba(50, 128, 45, 0.9)",
+        "rgba(237, 129, 40, 0.9)",
+        "rgb(255, 0, 0)"
+      ],
+      "datasource": null,
+      "editable": true,
+      "error": false,
+      "format": "bytes",
+      "gauge": {
+        "maxValue": 100,
+        "minValue": 0,
+        "show": false,
+        "thresholdLabels": false,
+        "thresholdMarkers": true
+      },
+      "gridPos": {
+        "h": 3,
+        "w": 2,
+        "x": 2,
+        "y": 3
+      },
+      "hideTimeOverride": true,
+      "id": 69,
+      "interval": "1m",
+      "links": [],
+      "mappingType": 1,
+      "mappingTypes": [
+        {
+          "name": "value to text",
+          "value": 1
+        },
+        {
+          "name": "range to text",
+          "value": 2
+        }
+      ],
+      "maxDataPoints": 100,
+      "nullPointMode": "connected",
+      "nullText": null,
+      "postfix": "",
+      "postfixFontSize": "50%",
+      "prefix": "",
+      "prefixFontSize": "50%",
+      "rangeMaps": [
+        {
+          "from": "null",
+          "text": "N/A",
+          "to": "null"
+        }
+      ],
+      "span": 2,
+      "sparkline": {
+        "fillColor": "rgba(31, 118, 189, 0.18)",
+        "full": false,
+        "lineColor": "rgb(31, 120, 193)",
+        "show": false
+      },
+      "tableColumn": "",
+      "targets": [
+        {
+          "expr": "sum(ceph_osd_stat_bytes_used{instance=~\"$instance\"})",
+          "format": "time_series",
+          "interval": "$interval",
+          "intervalFactor": 1,
+          "refId": "A",
+          "step": 60
+        }
+      ],
+      "thresholds": "",
+      "timeFrom": "1m",
+      "title": "Used",
+      "transparent": false,
+      "type": "singlestat",
+      "valueFontSize": "50%",
+      "valueMaps": [],
+      "valueName": "current"
+    },
+    {
       "aliasColors": {},
       "bars": false,
       "dashLength": 10,
@@ -512,14 +758,14 @@
       "steppedLine": false,
       "targets": [
         {
-          "expr": "sum(irate(ceph_osd_op_w_in_bytes{instance=~\"$instance\"}[1m]))",
+          "expr": "sum(irate(ceph_osd_op_w_in_bytes{instance=~\"$instance\"}[$rate_interval]))",
           "format": "time_series",
           "intervalFactor": 1,
           "legendFormat": "Writes",
           "refId": "A"
         },
         {
-          "expr": "sum(irate(ceph_osd_op_r_out_bytes{instance=~\"$instance\"}[1m]))",
+          "expr": "sum(irate(ceph_osd_op_r_out_bytes{instance=~\"$instance\"}[$rate_interval]))",
           "format": "time_series",
           "intervalFactor": 1,
           "legendFormat": "Reads",
@@ -603,7 +849,7 @@
       "steppedLine": false,
       "targets": [
         {
-          "expr": "sum(deriv(ceph_pool_bytes_used{instance=~\"$instance\"}[1m]))",
+          "expr": "sum(deriv(ceph_pool_bytes_used{instance=~\"$instance\"}[$rate_interval]))",
           "format": "time_series",
           "intervalFactor": 1,
           "legendFormat": "Bytes",
@@ -813,7 +1059,7 @@
       "steppedLine": false,
       "targets": [
         {
-          "expr": "sum(irate(ceph_osd_recovery_ops[1m]))",
+          "expr": "sum(irate(ceph_osd_recovery_ops[$rate_interval]))",
           "format": "time_series",
           "intervalFactor": 1,
           "legendFormat": "Op/s",
@@ -972,6 +1218,49 @@
         "tagsQuery": "",
         "type": "query",
         "useTags": false
+      },
+      {
+        "allValue": null,
+        "current": {
+          "tags": [],
+          "text": "3m",
+          "value": "3m"
+        },
+        "hide": 0,
+        "includeAll": false,
+        "label": "rate_interval",
+        "multi": false,
+        "name": "rate_interval",
+        "options": [
+          {
+            "selected": false,
+            "text": "1m",
+            "value": "1m"
+          },
+          {
+            "selected": true,
+            "text": "3m",
+            "value": "3m"
+          },
+          {
+            "selected": false,
+            "text": "5m",
+            "value": "5m"
+          },
+          {
+            "selected": false,
+            "text": "10m",
+            "value": "10m"
+          },
+          {
+            "selected": false,
+            "text": "15m",
+            "value": "15m"
+          }
+        ],
+        "query": "1m,3m,5m,10m,15m",
+        "skipUrlSync": false,
+        "type": "custom"
       }
     ]
   },
diff --git a/ceph/files/grafana_dashboards/hosts-overview_prometheus.json b/ceph/files/grafana_dashboards/hosts-overview_prometheus.json
index bdc6a90..921f886 100644
--- a/ceph/files/grafana_dashboards/hosts-overview_prometheus.json
+++ b/ceph/files/grafana_dashboards/hosts-overview_prometheus.json
@@ -577,7 +577,7 @@
       "tableColumn": "Value",
       "targets": [
         {
-          "expr": "avg (\n  ((irate(diskio_io_time[5m]) / 10 )\n  ) *\n  on(host, name) diskio_io_time{host=~\"($osd_hosts).*\"}\n)",
+          "expr": "avg ( (irate(diskio_io_time{host=~\"($osd_hosts).*\"}[5m])) )",
           "format": "table",
           "instant": true,
           "intervalFactor": 1,
@@ -660,7 +660,7 @@
       "tableColumn": "Value",
       "targets": [
         {
-          "expr": "sum (\n  irate(net_bytes_recv{host=~\"($osd_hosts|mon_hosts|rgw_hosts).*\",device!=\"lo\"}[1m])\n  ) +\nsum (\n  irate(net_bytes_sent{host=~\"($osd_hosts|mon_hosts|rgw_hosts).*\",device!=\"lo\"}[1m]) \n  )",
+          "expr": "sum (\n  irate(net_bytes_recv{host=~\"($osd_hosts|mon_hosts|rgw_hosts).*\",device!=\"lo\"}[$rate_interval])\n  ) +\nsum (\n  irate(net_bytes_sent{host=~\"($osd_hosts|mon_hosts|rgw_hosts).*\",device!=\"lo\"}[$rate_interval]) \n  )",
           "format": "table",
           "instant": true,
           "intervalFactor": 1,
@@ -814,7 +814,7 @@
       "steppedLine": false,
       "targets": [
         {
-          "expr": "topk(10, (sum by(host) (\n  (\n  irate(net_bytes_recv{host=~\"($osd_hosts|mon_hosts|rgw_hosts).*\",device!=\"lo\"}[1m])\n  ) +\n  (\n  irate(net_bytes_sent{host=~\"($osd_hosts|mon_hosts|rgw_hosts).*\",device!=\"lo\"}[1m])\n  ))\n  )\n)",
+          "expr": "topk(10, (sum by(host) (\n  (\n  irate(net_bytes_recv{host=~\"($osd_hosts|mon_hosts|rgw_hosts).*\",device!=\"lo\"}[$rate_interval])\n  ) +\n  (\n  irate(net_bytes_sent{host=~\"($osd_hosts|mon_hosts|rgw_hosts).*\",device!=\"lo\"}[$rate_interval])\n  ))\n  )\n)",
           "format": "time_series",
           "intervalFactor": 1,
           "legendFormat": "{{host}}",
@@ -1029,6 +1029,48 @@
         "tagsQuery": "",
         "type": "query",
         "useTags": false
+      },
+      {
+        "allValue": null,
+        "current": {
+          "text": "3m",
+          "value": "3m"
+        },
+        "hide": 0,
+        "includeAll": false,
+        "label": "rate_interval",
+        "multi": false,
+        "name": "rate_interval",
+        "options": [
+          {
+            "selected": false,
+            "text": "1m",
+            "value": "1m"
+          },
+          {
+            "selected": true,
+            "text": "3m",
+            "value": "3m"
+          },
+          {
+            "selected": false,
+            "text": "5m",
+            "value": "5m"
+          },
+          {
+            "selected": false,
+            "text": "10m",
+            "value": "10m"
+          },
+          {
+            "selected": false,
+            "text": "15m",
+            "value": "15m"
+          }
+        ],
+        "query": "1m,3m,5m,10m,15m",
+        "skipUrlSync": false,
+        "type": "custom"
       }
     ]
   },
diff --git a/ceph/files/grafana_dashboards/osds-detail_prometheus.json b/ceph/files/grafana_dashboards/osds-detail_prometheus.json
index b9b950b..f02abbb 100644
--- a/ceph/files/grafana_dashboards/osds-detail_prometheus.json
+++ b/ceph/files/grafana_dashboards/osds-detail_prometheus.json
@@ -70,14 +70,14 @@
       "steppedLine": false,
       "targets": [
         {
-          "expr": "irate(ceph_osd_op_r_latency_sum{ceph_daemon=~\"osd.[[osd_id]]\"}[1m]) / on (ceph_daemon) irate(ceph_osd_op_r_latency_count[1m]) * 1000",
+          "expr": "irate(ceph_osd_op_r_latency_sum{ceph_daemon=~\"osd.[[osd_id]]\"}[$rate_interval]) / on (ceph_daemon) irate(ceph_osd_op_r_latency_count[$rate_interval]) * 1000",
           "format": "time_series",
           "intervalFactor": 1,
           "legendFormat": "READs",
           "refId": "A"
         },
         {
-          "expr": "irate(ceph_osd_op_w_latency_sum{ceph_daemon=~\"osd.[[osd_id]]\"}[1m]) / on (ceph_daemon) irate(ceph_osd_op_w_latency_count[1m]) * 1000",
+          "expr": "irate(ceph_osd_op_w_latency_sum{ceph_daemon=~\"osd.[[osd_id]]\"}[$rate_interval]) / on (ceph_daemon) irate(ceph_osd_op_w_latency_count[$rate_interval]) * 1000",
           "format": "time_series",
           "intervalFactor": 1,
           "legendFormat": "WRITEs",
@@ -161,14 +161,14 @@
       "steppedLine": false,
       "targets": [
         {
-          "expr": "irate(ceph_osd_op_r{ceph_daemon=~\"osd.[[osd_id]]\"}[1m])",
+          "expr": "irate(ceph_osd_op_r{ceph_daemon=~\"osd.[[osd_id]]\"}[$rate_interval])",
           "format": "time_series",
           "intervalFactor": 1,
           "legendFormat": "Reads",
           "refId": "A"
         },
         {
-          "expr": "irate(ceph_osd_op_w{ceph_daemon=~\"osd.[[osd_id]]\"}[1m])",
+          "expr": "irate(ceph_osd_op_w{ceph_daemon=~\"osd.[[osd_id]]\"}[$rate_interval])",
           "format": "time_series",
           "intervalFactor": 1,
           "legendFormat": "Writes",
@@ -252,14 +252,14 @@
       "steppedLine": false,
       "targets": [
         {
-          "expr": "irate(ceph_osd_op_r_out_bytes{ceph_daemon=~\"osd.[[osd_id]]\"}[1m])",
+          "expr": "irate(ceph_osd_op_r_out_bytes{ceph_daemon=~\"osd.[[osd_id]]\"}[$rate_interval])",
           "format": "time_series",
           "intervalFactor": 1,
           "legendFormat": "Read Bytes",
           "refId": "A"
         },
         {
-          "expr": "irate(ceph_osd_op_w_in_bytes{ceph_daemon=~\"osd.[[osd_id]]\"}[1m])",
+          "expr": "irate(ceph_osd_op_w_in_bytes{ceph_daemon=~\"osd.[[osd_id]]\"}[$rate_interval])",
           "format": "time_series",
           "intervalFactor": 1,
           "legendFormat": "Write Bytes",
@@ -356,14 +356,14 @@
       "steppedLine": false,
       "targets": [
         {
-          "expr": "label_replace(label_replace(irate(diskio_read_time[1m]), \"instance\", \"$1\", \"host\", \"(.+)\"), \"device\", \"$1\", \"name\", \"(.+)\") / label_replace(label_replace(irate(diskio_reads[1m]), \"instance\", \"$1\", \"host\", \"(.+)\"), \"device\", \"$1\", \"name\", \"(.+)\") and on (instance, device) ceph_disk_occupation{ceph_daemon=~\"osd.[[osd_id]]\"}",
+          "expr": "label_replace(label_replace(irate(diskio_read_time[$rate_interval]), \"instance\", \"$1\", \"host\", \"(.+)\"), \"device\", \"$1\", \"name\", \"(.+)\") / label_replace(label_replace(irate(diskio_reads[$rate_interval]), \"instance\", \"$1\", \"host\", \"(.+)\"), \"device\", \"$1\", \"name\", \"(.+)\") and on (instance, device) ceph_disk_occupation{ceph_daemon=~\"osd.[[osd_id]]\"}",
           "format": "time_series",
           "intervalFactor": 1,
           "legendFormat": "{{instance}}/{{device}} Reads",
           "refId": "A"
         },
         {
-          "expr": "label_replace(label_replace(irate(diskio_write_time[1m]), \"instance\", \"$1\", \"host\", \"(.+)\"), \"device\", \"$1\", \"name\", \"(.+)\") / label_replace(label_replace(irate(diskio_writes[1m]), \"instance\", \"$1\", \"host\", \"(.+)\"), \"device\", \"$1\", \"name\", \"(.+)\") and on (instance, device) ceph_disk_occupation{ceph_daemon=~\"osd.[[osd_id]]\"}",
+          "expr": "label_replace(label_replace(irate(diskio_write_time[$rate_interval]), \"instance\", \"$1\", \"host\", \"(.+)\"), \"device\", \"$1\", \"name\", \"(.+)\") / label_replace(label_replace(irate(diskio_writes[$rate_interval]), \"instance\", \"$1\", \"host\", \"(.+)\"), \"device\", \"$1\", \"name\", \"(.+)\") and on (instance, device) ceph_disk_occupation{ceph_daemon=~\"osd.[[osd_id]]\"}",
           "format": "time_series",
           "intervalFactor": 1,
           "legendFormat": "{{instance}}/{{device}} Writes",
@@ -447,14 +447,14 @@
       "steppedLine": false,
       "targets": [
         {
-          "expr": "label_replace(label_replace(irate(diskio_reads[1m]), \"instance\", \"$1\", \"host\", \"(.+)\"), \"device\", \"$1\", \"name\", \"(.+)\") and on (instance, device) ceph_disk_occupation{ceph_daemon=~\"osd.[[osd_id]]\"}",
+          "expr": "label_replace(label_replace(irate(diskio_reads[$rate_interval]), \"instance\", \"$1\", \"host\", \"(.+)\"), \"device\", \"$1\", \"name\", \"(.+)\") and on (instance, device) ceph_disk_occupation{ceph_daemon=~\"osd.[[osd_id]]\"}",
           "format": "time_series",
           "intervalFactor": 1,
           "legendFormat": "{{instance}} {{device}} READS",
           "refId": "A"
         },
         {
-          "expr": "label_replace(label_replace(irate(diskio_writes[1m]), \"instance\", \"$1\", \"host\", \"(.+)\"), \"device\", \"$1\", \"name\", \"(.+)\") and on (instance, device) ceph_disk_occupation{ceph_daemon=~\"osd.[[osd_id]]\"}",
+          "expr": "label_replace(label_replace(irate(diskio_writes[$rate_interval]), \"instance\", \"$1\", \"host\", \"(.+)\"), \"device\", \"$1\", \"name\", \"(.+)\") and on (instance, device) ceph_disk_occupation{ceph_daemon=~\"osd.[[osd_id]]\"}",
           "format": "time_series",
           "intervalFactor": 1,
           "legendFormat": "{{instance}} {{device}} WRITES",
@@ -538,14 +538,14 @@
       "steppedLine": false,
       "targets": [
         {
-          "expr": "label_replace(label_replace(irate(diskio_read_bytes[1m]), \"instance\", \"$1\", \"host\", \"(.+)\"), \"device\", \"$1\", \"name\", \"(.+)\") and on (instance, device) ceph_disk_occupation{ceph_daemon=~\"osd.[[osd_id]]\"}",
+          "expr": "label_replace(label_replace(irate(diskio_read_bytes[$rate_interval]), \"instance\", \"$1\", \"host\", \"(.+)\"), \"device\", \"$1\", \"name\", \"(.+)\") and on (instance, device) ceph_disk_occupation{ceph_daemon=~\"osd.[[osd_id]]\"}",
           "format": "time_series",
           "intervalFactor": 1,
           "legendFormat": "{{instance}} {{device}} READS",
           "refId": "A"
         },
         {
-          "expr": "label_replace(label_replace(irate(diskio_write_bytes[1m]), \"instance\", \"$1\", \"host\", \"(.+)\"), \"device\", \"$1\", \"name\", \"(.+)\") and on (instance, device) ceph_disk_occupation{ceph_daemon=~\"osd.[[osd_id]]\"}",
+          "expr": "label_replace(label_replace(irate(diskio_write_bytes[$rate_interval]), \"instance\", \"$1\", \"host\", \"(.+)\"), \"device\", \"$1\", \"name\", \"(.+)\") and on (instance, device) ceph_disk_occupation{ceph_daemon=~\"osd.[[osd_id]]\"}",
           "format": "time_series",
           "intervalFactor": 1,
           "legendFormat": "{{instance}} {{device}} WRITES",
@@ -629,7 +629,7 @@
       "steppedLine": false,
       "targets": [
         {
-          "expr": "label_replace(label_replace(irate(diskio_io_time[1m]), \"instance\", \"$1\", \"host\", \"(.+)\"), \"device\", \"$1\", \"name\", \"(.+)\") and on (instance, device) ceph_disk_occupation{ceph_daemon=~\"osd.[[osd_id]]\"}",
+          "expr": "label_replace(label_replace(irate(diskio_io_time[$rate_interval]), \"instance\", \"$1\", \"host\", \"(.+)\"), \"device\", \"$1\", \"name\", \"(.+)\") and on (instance, device) ceph_disk_occupation{ceph_daemon=~\"osd.[[osd_id]]\"}",
           "format": "time_series",
           "intervalFactor": 1,
           "legendFormat": "{{instance}} {{device}}",
@@ -708,6 +708,48 @@
         "tagsQuery": "",
         "type": "query",
         "useTags": false
+      },
+      {
+        "allValue": null,
+        "current": {
+          "text": "3m",
+          "value": "3m"
+        },
+        "hide": 0,
+        "includeAll": false,
+        "label": "rate_interval",
+        "multi": false,
+        "name": "rate_interval",
+        "options": [
+          {
+            "selected": false,
+            "text": "1m",
+            "value": "1m"
+          },
+          {
+            "selected": true,
+            "text": "3m",
+            "value": "3m"
+          },
+          {
+            "selected": false,
+            "text": "5m",
+            "value": "5m"
+          },
+          {
+            "selected": false,
+            "text": "10m",
+            "value": "10m"
+          },
+          {
+            "selected": false,
+            "text": "15m",
+            "value": "15m"
+          }
+        ],
+        "query": "1m,3m,5m,10m,15m",
+        "skipUrlSync": false,
+        "type": "custom"
       }
     ]
   },
diff --git a/ceph/files/grafana_dashboards/osds-overview_prometheus.json b/ceph/files/grafana_dashboards/osds-overview_prometheus.json
index c399fc0..3337014 100644
--- a/ceph/files/grafana_dashboards/osds-overview_prometheus.json
+++ b/ceph/files/grafana_dashboards/osds-overview_prometheus.json
@@ -58,21 +58,21 @@
       "steppedLine": false,
       "targets": [
         {
-          "expr": "avg (irate(ceph_osd_op_r_latency_sum[1m]) / on (ceph_daemon) irate(ceph_osd_op_r_latency_count[1m]) * 1000)",
+          "expr": "avg (irate(ceph_osd_op_r_latency_sum[$rate_interval]) / on (ceph_daemon) irate(ceph_osd_op_r_latency_count[$rate_interval]) * 1000)",
           "format": "time_series",
           "intervalFactor": 1,
           "legendFormat": "AVG read",
           "refId": "A"
         },
         {
-          "expr": "max (irate(ceph_osd_op_r_latency_sum[1m]) / on (ceph_daemon) irate(ceph_osd_op_r_latency_count[1m]) * 1000)",
+          "expr": "max (irate(ceph_osd_op_r_latency_sum[$rate_interval]) / on (ceph_daemon) irate(ceph_osd_op_r_latency_count[$rate_interval]) * 1000)",
           "format": "time_series",
           "intervalFactor": 1,
           "legendFormat": "MAX read",
           "refId": "B"
         },
         {
-          "expr": "quantile(0.95,\n  (irate(ceph_osd_op_r_latency_sum[1m]) / on (ceph_daemon) irate(ceph_osd_op_r_latency_count[1m]) * 1000)\n)",
+          "expr": "quantile(0.95,\n  (irate(ceph_osd_op_r_latency_sum[$rate_interval]) / on (ceph_daemon) irate(ceph_osd_op_r_latency_count[$rate_interval]) * 1000)\n)",
           "format": "time_series",
           "intervalFactor": 1,
           "legendFormat": "@95%ile",
@@ -188,7 +188,7 @@
       ],
       "targets": [
         {
-          "expr": "topk(10,\n  (sort(\n    (irate(ceph_osd_op_r_latency_sum[1m]) / on (ceph_daemon) irate(ceph_osd_op_r_latency_count[1m]) * 1000)\n  ))\n)\n\n",
+          "expr": "topk(10,\n  (sort(\n    (irate(ceph_osd_op_r_latency_sum[$rate_interval]) / on (ceph_daemon) irate(ceph_osd_op_r_latency_count[$rate_interval]) * 1000)\n  ))\n)\n\n",
           "format": "table",
           "instant": true,
           "intervalFactor": 1,
@@ -239,21 +239,21 @@
       "steppedLine": false,
       "targets": [
         {
-          "expr": "avg (irate(ceph_osd_op_w_latency_sum[1m]) / on (ceph_daemon) irate(ceph_osd_op_w_latency_count[1m]) * 1000)",
+          "expr": "avg (irate(ceph_osd_op_w_latency_sum[$rate_interval]) / on (ceph_daemon) irate(ceph_osd_op_w_latency_count[$rate_interval]) * 1000)",
           "format": "time_series",
           "intervalFactor": 1,
           "legendFormat": "AVG write",
           "refId": "A"
         },
         {
-          "expr": "max (irate(ceph_osd_op_w_latency_sum[1m]) / on (ceph_daemon) irate(ceph_osd_op_w_latency_count[1m]) * 1000)",
+          "expr": "max (irate(ceph_osd_op_w_latency_sum[$rate_interval]) / on (ceph_daemon) irate(ceph_osd_op_w_latency_count[$rate_interval]) * 1000)",
           "format": "time_series",
           "intervalFactor": 1,
           "legendFormat": "MAX write",
           "refId": "B"
         },
         {
-          "expr": "quantile(0.95,\n (irate(ceph_osd_op_w_latency_sum[1m]) / on (ceph_daemon) irate(ceph_osd_op_w_latency_count[1m]) * 1000)\n)",
+          "expr": "quantile(0.95,\n (irate(ceph_osd_op_w_latency_sum[$rate_interval]) / on (ceph_daemon) irate(ceph_osd_op_w_latency_count[$rate_interval]) * 1000)\n)",
           "format": "time_series",
           "intervalFactor": 1,
           "legendFormat": "@95%ile write",
@@ -369,7 +369,7 @@
       ],
       "targets": [
         {
-          "expr": "topk(10,\n  (sort(\n    (irate(ceph_osd_op_w_latency_sum[1m]) / on (ceph_daemon) irate(ceph_osd_op_w_latency_count[1m]) * 1000)\n  ))\n)\n\n",
+          "expr": "topk(10,\n  (sort(\n    (irate(ceph_osd_op_w_latency_sum[$rate_interval]) / on (ceph_daemon) irate(ceph_osd_op_w_latency_count[$rate_interval]) * 1000)\n  ))\n)\n\n",
           "format": "table",
           "instant": true,
           "intervalFactor": 1,
@@ -383,7 +383,7 @@
     },
     {
       "aliasColors": {},
-      "bars": false,
+      "bars": true,
       "dashLength": 10,
       "dashes": false,
       "datasource": null,
@@ -405,11 +405,11 @@
         "max": false,
         "min": false,
         "rightSide": false,
-        "show": true,
+        "show": false,
         "total": false,
         "values": false
       },
-      "lines": true,
+      "lines": false,
       "linewidth": 1,
       "links": [],
       "nullPointMode": "null",
@@ -427,7 +427,7 @@
         {
           "expr": "ceph_osd_numpg",
           "format": "time_series",
-          "instant": false,
+          "instant": true,
           "intervalFactor": 1,
           "legendFormat": "PGs per OSD",
           "refId": "A"
@@ -446,7 +446,7 @@
       "type": "graph",
       "xaxis": {
         "buckets": 20,
-        "mode": "time",
+        "mode": "histogram",
         "name": null,
         "show": true,
         "values": []
@@ -526,7 +526,7 @@
       "steppedLine": false,
       "targets": [
         {
-          "expr": "round(sum(irate(ceph_pool_rd[1m])))",
+          "expr": "round(sum(irate(ceph_pool_rd[$rate_interval])))",
           "format": "time_series",
           "hide": false,
           "instant": false,
@@ -536,7 +536,7 @@
           "refId": "A"
         },
         {
-          "expr": "round(sum(irate(ceph_pool_wr[1m])))",
+          "expr": "round(sum(irate(ceph_pool_wr[$rate_interval])))",
           "format": "time_series",
           "instant": false,
           "interval": "",
@@ -594,7 +594,51 @@
     "osd"
   ],
   "templating": {
-    "list": []
+    "list": [
+      {
+        "allValue": null,
+        "current": {
+          "tags": [],
+          "text": "3m",
+          "value": "3m"
+        },
+        "hide": 0,
+        "includeAll": false,
+        "label": "rate_interval",
+        "multi": false,
+        "name": "rate_interval",
+        "options": [
+          {
+            "selected": false,
+            "text": "1m",
+            "value": "1m"
+          },
+          {
+            "selected": true,
+            "text": "3m",
+            "value": "3m"
+          },
+          {
+            "selected": false,
+            "text": "5m",
+            "value": "5m"
+          },
+          {
+            "selected": false,
+            "text": "10m",
+            "value": "10m"
+          },
+          {
+            "selected": false,
+            "text": "15m",
+            "value": "15m"
+          }
+        ],
+        "query": "1m,3m,5m,10m,15m",
+        "skipUrlSync": false,
+        "type": "custom"
+      }
+    ]
   },
   "time": {
     "from": "now-1h",
diff --git a/ceph/files/grafana_dashboards/pool-overview_prometheus.json b/ceph/files/grafana_dashboards/pool-overview_prometheus.json
index a58e6a2..600ef46 100644
--- a/ceph/files/grafana_dashboards/pool-overview_prometheus.json
+++ b/ceph/files/grafana_dashboards/pool-overview_prometheus.json
@@ -16,8 +16,7 @@
   "editable": true,
   "gnetId": null,
   "graphTooltip": 0,
-  "id": null,
-  "iteration": 1545072836850,
+  "iteration": 1574692480292,
   "links": [],
   "panels": [
     {
@@ -40,10 +39,10 @@
       "dashLength": 10,
       "dashes": false,
       "datasource": null,
-      "fill": 5,
+      "fill": 2,
       "gridPos": {
         "h": 7,
-        "w": 24,
+        "w": 12,
         "x": 0,
         "y": 1
       },
@@ -68,11 +67,11 @@
       "renderer": "flot",
       "seriesOverrides": [],
       "spaceLength": 10,
-      "stack": true,
+      "stack": false,
       "steppedLine": false,
       "targets": [
         {
-          "expr": "avg((rate(ceph_pool_rd{pool_id=~\"[[pool_id]]\"}[1m]) + rate(ceph_pool_wr{pool_id=~\"[[pool_id]]\"}[1m])) + on(pool_id,instance) group_left(name) ceph_pool_metadata{pool_id=~\"[[pool_id]]\"}) without (instance)",
+          "expr": "avg(rate(ceph_pool_rd{pool_id=~\"[[pool_id]]\"}[$rate_interval]) * on(pool_id,instance) group_left(name) ceph_pool_metadata{pool_id=~\"[[pool_id]]\"}) without (instance)",
           "format": "time_series",
           "hide": false,
           "intervalFactor": 1,
@@ -83,7 +82,7 @@
       "thresholds": [],
       "timeFrom": null,
       "timeShift": null,
-      "title": "Client IOPS by Pool",
+      "title": "Read IOPS by Pool",
       "tooltip": {
         "shared": true,
         "sort": 0,
@@ -99,7 +98,7 @@
       },
       "yaxes": [
         {
-          "format": "none",
+          "format": "rps",
           "label": null,
           "logBase": 1,
           "max": null,
@@ -126,10 +125,96 @@
       "dashLength": 10,
       "dashes": false,
       "datasource": null,
-      "fill": 5,
+      "fill": 2,
       "gridPos": {
-        "h": 6,
-        "w": 24,
+        "h": 7,
+        "w": 12,
+        "x": 12,
+        "y": 1
+      },
+      "id": 16,
+      "legend": {
+        "avg": false,
+        "current": false,
+        "max": false,
+        "min": false,
+        "show": true,
+        "total": false,
+        "values": false
+      },
+      "lines": true,
+      "linewidth": 1,
+      "links": [],
+      "minSpan": 12,
+      "nullPointMode": "null",
+      "percentage": false,
+      "pointradius": 5,
+      "points": false,
+      "renderer": "flot",
+      "seriesOverrides": [],
+      "spaceLength": 10,
+      "stack": false,
+      "steppedLine": false,
+      "targets": [
+        {
+          "expr": "avg(rate(ceph_pool_wr{pool_id=~\"[[pool_id]]\"}[$rate_interval]) * on(pool_id,instance) group_left(name) ceph_pool_metadata{pool_id=~\"[[pool_id]]\"}) without (instance)",
+          "format": "time_series",
+          "hide": false,
+          "intervalFactor": 1,
+          "legendFormat": "{{name}}",
+          "refId": "F"
+        }
+      ],
+      "thresholds": [],
+      "timeFrom": null,
+      "timeShift": null,
+      "title": "Write IOPS by Pool",
+      "tooltip": {
+        "shared": true,
+        "sort": 0,
+        "value_type": "individual"
+      },
+      "type": "graph",
+      "xaxis": {
+        "buckets": null,
+        "mode": "time",
+        "name": null,
+        "show": true,
+        "values": []
+      },
+      "yaxes": [
+        {
+          "format": "wps",
+          "label": null,
+          "logBase": 1,
+          "max": null,
+          "min": "0",
+          "show": true
+        },
+        {
+          "format": "short",
+          "label": null,
+          "logBase": 1,
+          "max": null,
+          "min": null,
+          "show": false
+        }
+      ],
+      "yaxis": {
+        "align": false,
+        "alignLevel": null
+      }
+    },
+    {
+      "aliasColors": {},
+      "bars": false,
+      "dashLength": 10,
+      "dashes": false,
+      "datasource": null,
+      "fill": 2,
+      "gridPos": {
+        "h": 7,
+        "w": 12,
         "x": 0,
         "y": 8
       },
@@ -154,11 +239,11 @@
       "renderer": "flot",
       "seriesOverrides": [],
       "spaceLength": 10,
-      "stack": true,
+      "stack": false,
       "steppedLine": false,
       "targets": [
         {
-          "expr": "avg((rate(ceph_pool_rd_bytes{pool_id=~\"[[pool_id]]\"}[1m]) + rate(ceph_pool_wr_bytes{pool_id=~\"[[pool_id]]\"}[1m])) + on(pool_id,instance) group_left(name) ceph_pool_metadata{pool_id=~\"[[pool_id]]\"}) without (instance)",
+          "expr": "avg(rate(ceph_pool_rd_bytes{pool_id=~\"[[pool_id]]\"}[$rate_interval]) * on(pool_id,instance) group_left(name) ceph_pool_metadata{pool_id=~\"[[pool_id]]\"}) without (instance)",
           "format": "time_series",
           "intervalFactor": 1,
           "legendFormat": "{{name}}",
@@ -169,7 +254,7 @@
       "thresholds": [],
       "timeFrom": null,
       "timeShift": null,
-      "title": "Client Throughput by Pool",
+      "title": "Read Throughput by Pool",
       "tooltip": {
         "shared": true,
         "sort": 0,
@@ -185,7 +270,93 @@
       },
       "yaxes": [
         {
-          "format": "decbytes",
+          "format": "Bps",
+          "label": null,
+          "logBase": 1,
+          "max": null,
+          "min": "0",
+          "show": true
+        },
+        {
+          "format": "short",
+          "label": null,
+          "logBase": 1,
+          "max": null,
+          "min": null,
+          "show": false
+        }
+      ],
+      "yaxis": {
+        "align": false,
+        "alignLevel": null
+      }
+    },
+    {
+      "aliasColors": {},
+      "bars": false,
+      "dashLength": 10,
+      "dashes": false,
+      "datasource": null,
+      "fill": 2,
+      "gridPos": {
+        "h": 7,
+        "w": 12,
+        "x": 12,
+        "y": 8
+      },
+      "id": 17,
+      "legend": {
+        "avg": false,
+        "current": false,
+        "max": false,
+        "min": false,
+        "show": true,
+        "total": false,
+        "values": false
+      },
+      "lines": true,
+      "linewidth": 1,
+      "links": [],
+      "minSpan": 12,
+      "nullPointMode": "null",
+      "percentage": false,
+      "pointradius": 5,
+      "points": false,
+      "renderer": "flot",
+      "seriesOverrides": [],
+      "spaceLength": 10,
+      "stack": false,
+      "steppedLine": false,
+      "targets": [
+        {
+          "expr": "avg(rate(ceph_pool_wr_bytes{pool_id=~\"[[pool_id]]\"}[$rate_interval]) * on(pool_id,instance) group_left(name) ceph_pool_metadata{pool_id=~\"[[pool_id]]\"}) without (instance)",
+          "format": "time_series",
+          "intervalFactor": 1,
+          "legendFormat": "{{name}}",
+          "refId": "A",
+          "textEditor": true
+        }
+      ],
+      "thresholds": [],
+      "timeFrom": null,
+      "timeShift": null,
+      "title": "Write Throughput by Pool",
+      "tooltip": {
+        "shared": true,
+        "sort": 0,
+        "value_type": "individual"
+      },
+      "type": "graph",
+      "xaxis": {
+        "buckets": null,
+        "mode": "time",
+        "name": null,
+        "show": true,
+        "values": []
+      },
+      "yaxes": [
+        {
+          "format": "Bps",
           "label": null,
           "logBase": 1,
           "max": null,
@@ -212,7 +383,7 @@
         "h": 1,
         "w": 24,
         "x": 0,
-        "y": 14
+        "y": 15
       },
       "id": 15,
       "panels": [],
@@ -231,9 +402,9 @@
       "fontSize": "100%",
       "gridPos": {
         "h": 7,
-        "w": 8,
+        "w": 12,
         "x": 0,
-        "y": 15
+        "y": 16
       },
       "id": 3,
       "links": [],
@@ -242,7 +413,7 @@
       "scroll": true,
       "showHeader": true,
       "sort": {
-        "col": 6,
+        "col": 7,
         "desc": true
       },
       "styles": [
@@ -337,7 +508,7 @@
           "unit": "short"
         },
         {
-          "alias": "IOPS (R+W)",
+          "alias": "Read IOPS",
           "colorMode": null,
           "colors": [
             "rgba(245, 54, 54, 0.9)",
@@ -349,12 +520,12 @@
           "pattern": "Value",
           "thresholds": [],
           "type": "number",
-          "unit": "none"
+          "unit": "rps"
         }
       ],
       "targets": [
         {
-          "expr": "topk(5,(label_replace((irate(ceph_pool_rd{pool_id=~\"[[pool_id]]\"}[1m]) + irate(ceph_pool_wr{pool_id=~\"[[pool_id]]\"}[1m])),\"id\", \"$1\", \"pool_id\", \"(.*)\") + on(pool_id) group_left(instance,name) ceph_pool_metadata{pool_id=~\"[[pool_id]]\"}) )",
+          "expr": "topk(5,avg(rate(ceph_pool_rd{pool_id=~\"[[pool_id]]\"}[$rate_interval]) * on(pool_id) group_left(instance,name) ceph_pool_metadata{pool_id=~\"[[pool_id]]\"}) without (instance))",
           "format": "table",
           "instant": true,
           "intervalFactor": 2,
@@ -362,7 +533,7 @@
           "textEditor": true
         }
       ],
-      "title": "Top 5 Pools by Client IOPS",
+      "title": "Top 5 Pools by Read IOPS",
       "transform": "table",
       "type": "table"
     },
@@ -377,26 +548,35 @@
       "fontSize": "100%",
       "gridPos": {
         "h": 7,
-        "w": 8,
-        "x": 8,
-        "y": 15
+        "w": 12,
+        "x": 12,
+        "y": 16
       },
-      "id": 4,
+      "id": 20,
       "links": [],
       "minSpan": 12,
       "pageSize": null,
       "scroll": true,
       "showHeader": true,
       "sort": {
-        "col": 6,
+        "col": 7,
         "desc": true
       },
       "styles": [
         {
-          "alias": "Time",
+          "alias": "",
+          "colorMode": null,
+          "colors": [
+            "rgba(245, 54, 54, 0.9)",
+            "rgba(237, 129, 40, 0.89)",
+            "rgba(50, 172, 45, 0.97)"
+          ],
           "dateFormat": "YYYY-MM-DD HH:mm:ss",
+          "decimals": 2,
           "pattern": "Time",
-          "type": "hidden"
+          "thresholds": [],
+          "type": "hidden",
+          "unit": "short"
         },
         {
           "alias": "",
@@ -474,7 +654,63 @@
           "unit": "short"
         },
         {
-          "alias": "Throughput",
+          "alias": "Write IOPS",
+          "colorMode": null,
+          "colors": [
+            "rgba(245, 54, 54, 0.9)",
+            "rgba(237, 129, 40, 0.89)",
+            "rgba(50, 172, 45, 0.97)"
+          ],
+          "dateFormat": "YYYY-MM-DD HH:mm:ss",
+          "decimals": 0,
+          "pattern": "Value",
+          "thresholds": [],
+          "type": "number",
+          "unit": "wps"
+        }
+      ],
+      "targets": [
+        {
+          "expr": "topk(5,avg(rate(ceph_pool_wr{pool_id=~\"[[pool_id]]\"}[$rate_interval]) * on(pool_id) group_left(instance,name) ceph_pool_metadata{pool_id=~\"[[pool_id]]\"}) without (instance))",
+          "format": "table",
+          "instant": true,
+          "intervalFactor": 2,
+          "refId": "A",
+          "textEditor": true
+        }
+      ],
+      "title": "Top 5 Pools by Write IOPS",
+      "transform": "table",
+      "type": "table"
+    },
+    {
+      "columns": [
+        {
+          "text": "Current",
+          "value": "current"
+        }
+      ],
+      "datasource": null,
+      "fontSize": "100%",
+      "gridPos": {
+        "h": 7,
+        "w": 12,
+        "x": 0,
+        "y": 23
+      },
+      "id": 21,
+      "links": [],
+      "minSpan": 12,
+      "pageSize": null,
+      "scroll": true,
+      "showHeader": true,
+      "sort": {
+        "col": 7,
+        "desc": true
+      },
+      "styles": [
+        {
+          "alias": "",
           "colorMode": null,
           "colors": [
             "rgba(245, 54, 54, 0.9)",
@@ -483,15 +719,105 @@
           ],
           "dateFormat": "YYYY-MM-DD HH:mm:ss",
           "decimals": 2,
+          "pattern": "Time",
+          "thresholds": [],
+          "type": "hidden",
+          "unit": "short"
+        },
+        {
+          "alias": "",
+          "colorMode": null,
+          "colors": [
+            "rgba(245, 54, 54, 0.9)",
+            "rgba(237, 129, 40, 0.89)",
+            "rgba(50, 172, 45, 0.97)"
+          ],
+          "dateFormat": "YYYY-MM-DD HH:mm:ss",
+          "decimals": 2,
+          "pattern": "id",
+          "thresholds": [],
+          "type": "hidden",
+          "unit": "short"
+        },
+        {
+          "alias": "",
+          "colorMode": null,
+          "colors": [
+            "rgba(245, 54, 54, 0.9)",
+            "rgba(237, 129, 40, 0.89)",
+            "rgba(50, 172, 45, 0.97)"
+          ],
+          "dateFormat": "YYYY-MM-DD HH:mm:ss",
+          "decimals": 2,
+          "pattern": "instance",
+          "thresholds": [],
+          "type": "hidden",
+          "unit": "short"
+        },
+        {
+          "alias": "",
+          "colorMode": null,
+          "colors": [
+            "rgba(245, 54, 54, 0.9)",
+            "rgba(237, 129, 40, 0.89)",
+            "rgba(50, 172, 45, 0.97)"
+          ],
+          "dateFormat": "YYYY-MM-DD HH:mm:ss",
+          "decimals": 2,
+          "pattern": "job",
+          "thresholds": [],
+          "type": "hidden",
+          "unit": "short"
+        },
+        {
+          "alias": "Pool Name",
+          "colorMode": null,
+          "colors": [
+            "rgba(245, 54, 54, 0.9)",
+            "rgba(237, 129, 40, 0.89)",
+            "rgba(50, 172, 45, 0.97)"
+          ],
+          "dateFormat": "YYYY-MM-DD HH:mm:ss",
+          "decimals": 2,
+          "pattern": "name",
+          "thresholds": [],
+          "type": "number",
+          "unit": "short"
+        },
+        {
+          "alias": "Pool ID",
+          "colorMode": null,
+          "colors": [
+            "rgba(245, 54, 54, 0.9)",
+            "rgba(237, 129, 40, 0.89)",
+            "rgba(50, 172, 45, 0.97)"
+          ],
+          "dateFormat": "YYYY-MM-DD HH:mm:ss",
+          "decimals": 2,
+          "pattern": "pool_id",
+          "thresholds": [],
+          "type": "number",
+          "unit": "short"
+        },
+        {
+          "alias": "Read Throughput",
+          "colorMode": null,
+          "colors": [
+            "rgba(245, 54, 54, 0.9)",
+            "rgba(237, 129, 40, 0.89)",
+            "rgba(50, 172, 45, 0.97)"
+          ],
+          "dateFormat": "YYYY-MM-DD HH:mm:ss",
+          "decimals": 0,
           "pattern": "Value",
           "thresholds": [],
           "type": "number",
-          "unit": "decbytes"
+          "unit": "Bps"
         }
       ],
       "targets": [
         {
-          "expr": "(label_replace((irate(ceph_pool_rd_bytes{pool_id=~\"[[pool_id]]\"}[1m]) + irate(ceph_pool_wr_bytes{pool_id=~\"[[pool_id]]\"}[1m])),\"id\", \"$1\", \"pool_id\", \"(.*)\") + on(pool_id) group_left(instance,name) ceph_pool_metadata{pool_id=~\"[[pool_id]]\"}) ",
+          "expr": "topk(5,avg(rate(ceph_pool_rd_bytes{pool_id=~\"[[pool_id]]\"}[$rate_interval]) * on(pool_id) group_left(instance,name) ceph_pool_metadata{pool_id=~\"[[pool_id]]\"}) without (instance))",
           "format": "table",
           "instant": true,
           "intervalFactor": 2,
@@ -499,7 +825,153 @@
           "textEditor": true
         }
       ],
-      "title": "Top 5 Pools by Throughput",
+      "title": "Top 5 Pools by Read Throughput",
+      "transform": "table",
+      "type": "table"
+    },
+    {
+      "columns": [
+        {
+          "text": "Current",
+          "value": "current"
+        }
+      ],
+      "datasource": null,
+      "fontSize": "100%",
+      "gridPos": {
+        "h": 7,
+        "w": 12,
+        "x": 12,
+        "y": 23
+      },
+      "id": 22,
+      "links": [],
+      "minSpan": 12,
+      "pageSize": null,
+      "scroll": true,
+      "showHeader": true,
+      "sort": {
+        "col": 7,
+        "desc": true
+      },
+      "styles": [
+        {
+          "alias": "",
+          "colorMode": null,
+          "colors": [
+            "rgba(245, 54, 54, 0.9)",
+            "rgba(237, 129, 40, 0.89)",
+            "rgba(50, 172, 45, 0.97)"
+          ],
+          "dateFormat": "YYYY-MM-DD HH:mm:ss",
+          "decimals": 2,
+          "pattern": "Time",
+          "thresholds": [],
+          "type": "hidden",
+          "unit": "short"
+        },
+        {
+          "alias": "",
+          "colorMode": null,
+          "colors": [
+            "rgba(245, 54, 54, 0.9)",
+            "rgba(237, 129, 40, 0.89)",
+            "rgba(50, 172, 45, 0.97)"
+          ],
+          "dateFormat": "YYYY-MM-DD HH:mm:ss",
+          "decimals": 2,
+          "pattern": "id",
+          "thresholds": [],
+          "type": "hidden",
+          "unit": "short"
+        },
+        {
+          "alias": "",
+          "colorMode": null,
+          "colors": [
+            "rgba(245, 54, 54, 0.9)",
+            "rgba(237, 129, 40, 0.89)",
+            "rgba(50, 172, 45, 0.97)"
+          ],
+          "dateFormat": "YYYY-MM-DD HH:mm:ss",
+          "decimals": 2,
+          "pattern": "instance",
+          "thresholds": [],
+          "type": "hidden",
+          "unit": "short"
+        },
+        {
+          "alias": "",
+          "colorMode": null,
+          "colors": [
+            "rgba(245, 54, 54, 0.9)",
+            "rgba(237, 129, 40, 0.89)",
+            "rgba(50, 172, 45, 0.97)"
+          ],
+          "dateFormat": "YYYY-MM-DD HH:mm:ss",
+          "decimals": 2,
+          "pattern": "job",
+          "thresholds": [],
+          "type": "hidden",
+          "unit": "short"
+        },
+        {
+          "alias": "Pool Name",
+          "colorMode": null,
+          "colors": [
+            "rgba(245, 54, 54, 0.9)",
+            "rgba(237, 129, 40, 0.89)",
+            "rgba(50, 172, 45, 0.97)"
+          ],
+          "dateFormat": "YYYY-MM-DD HH:mm:ss",
+          "decimals": 2,
+          "pattern": "name",
+          "thresholds": [],
+          "type": "number",
+          "unit": "short"
+        },
+        {
+          "alias": "Pool ID",
+          "colorMode": null,
+          "colors": [
+            "rgba(245, 54, 54, 0.9)",
+            "rgba(237, 129, 40, 0.89)",
+            "rgba(50, 172, 45, 0.97)"
+          ],
+          "dateFormat": "YYYY-MM-DD HH:mm:ss",
+          "decimals": 2,
+          "pattern": "pool_id",
+          "thresholds": [],
+          "type": "number",
+          "unit": "short"
+        },
+        {
+          "alias": "Write Throughput",
+          "colorMode": null,
+          "colors": [
+            "rgba(245, 54, 54, 0.9)",
+            "rgba(237, 129, 40, 0.89)",
+            "rgba(50, 172, 45, 0.97)"
+          ],
+          "dateFormat": "YYYY-MM-DD HH:mm:ss",
+          "decimals": 0,
+          "pattern": "Value",
+          "thresholds": [],
+          "type": "number",
+          "unit": "Bps"
+        }
+      ],
+      "targets": [
+        {
+          "expr": "topk(5,avg(rate(ceph_pool_wr_bytes{pool_id=~\"[[pool_id]]\"}[$rate_interval]) * on(pool_id) group_left(instance,name) ceph_pool_metadata{pool_id=~\"[[pool_id]]\"}) without (instance))",
+          "format": "table",
+          "instant": true,
+          "intervalFactor": 2,
+          "refId": "A",
+          "textEditor": true
+        }
+      ],
+      "title": "Top 5 Pools by Write Throughput",
       "transform": "table",
       "type": "table"
     },
@@ -509,9 +981,9 @@
       "fontSize": "100%",
       "gridPos": {
         "h": 7,
-        "w": 8,
-        "x": 16,
-        "y": 15
+        "w": 12,
+        "x": 0,
+        "y": 30
       },
       "id": 5,
       "links": [],
@@ -520,7 +992,7 @@
       "scroll": true,
       "showHeader": true,
       "sort": {
-        "col": 5,
+        "col": 6,
         "desc": true
       },
       "styles": [
@@ -620,7 +1092,7 @@
       ],
       "targets": [
         {
-          "expr": "topk(5,((ceph_pool_bytes_used / (ceph_pool_bytes_used + ceph_pool_max_avail)) * on(pool_id) group_left(name) ceph_pool_metadata))",
+          "expr": "topk(5,avg((ceph_pool_bytes_used / (ceph_pool_bytes_used + ceph_pool_max_avail)) * on(pool_id) group_left(name) ceph_pool_metadata) without (instance))",
           "format": "table",
           "hide": false,
           "instant": true,
@@ -690,6 +1162,49 @@
         "tagsQuery": "",
         "type": "query",
         "useTags": false
+      },
+      {
+        "allValue": null,
+        "current": {
+          "tags": [],
+          "text": "3m",
+          "value": "3m"
+        },
+        "hide": 0,
+        "includeAll": false,
+        "label": "rate_interval",
+        "multi": false,
+        "name": "rate_interval",
+        "options": [
+          {
+            "selected": false,
+            "text": "1m",
+            "value": "1m"
+          },
+          {
+            "selected": true,
+            "text": "3m",
+            "value": "3m"
+          },
+          {
+            "selected": false,
+            "text": "5m",
+            "value": "5m"
+          },
+          {
+            "selected": false,
+            "text": "10m",
+            "value": "10m"
+          },
+          {
+            "selected": false,
+            "text": "15m",
+            "value": "15m"
+          }
+        ],
+        "query": "1m,3m,5m,10m,15m",
+        "skipUrlSync": false,
+        "type": "custom"
       }
     ]
   },
@@ -725,6 +1240,6 @@
   },
   "timezone": "browser",
   "title": "Ceph Pools Overview",
-  "version": 11
+  "version": 12
 }
 {%- endraw %}
diff --git a/ceph/files/grafana_dashboards/radosgw-detail_prometheus.json b/ceph/files/grafana_dashboards/radosgw-detail_prometheus.json
index fa95510..c536351 100644
--- a/ceph/files/grafana_dashboards/radosgw-detail_prometheus.json
+++ b/ceph/files/grafana_dashboards/radosgw-detail_prometheus.json
@@ -71,14 +71,14 @@
       "steppedLine": false,
       "targets": [
         {
-          "expr": "rate(ceph_rgw_get_initial_lat_sum{ceph_daemon=~\"($rgw_servers)\"}[1m]) / rate(ceph_rgw_get_initial_lat_count{ceph_daemon=~\"($rgw_servers)\"}[1m])",
+          "expr": "rate(ceph_rgw_get_initial_lat_sum{ceph_daemon=~\"($rgw_servers)\"}[$rate_interval]) / rate(ceph_rgw_get_initial_lat_count{ceph_daemon=~\"($rgw_servers)\"}[$rate_interval])",
           "format": "time_series",
           "intervalFactor": 1,
           "legendFormat": "GET {{ceph_daemon}}",
           "refId": "A"
         },
         {
-          "expr": "rate(ceph_rgw_put_initial_lat_sum{ceph_daemon=~\"($rgw_servers)\"}[1m]) / rate(ceph_rgw_put_initial_lat_count{ceph_daemon=~\"($rgw_servers)\"}[1m])",
+          "expr": "rate(ceph_rgw_put_initial_lat_sum{ceph_daemon=~\"($rgw_servers)\"}[$rate_interval]) / rate(ceph_rgw_put_initial_lat_count{ceph_daemon=~\"($rgw_servers)\"}[$rate_interval])",
           "format": "time_series",
           "intervalFactor": 1,
           "legendFormat": "PUT {{ceph_daemon}}",
@@ -162,14 +162,14 @@
       "steppedLine": false,
       "targets": [
         {
-          "expr": "rate(ceph_rgw_get_b{ceph_daemon=~\"[[rgw_servers]]\"}[1m])",
+          "expr": "rate(ceph_rgw_get_b{ceph_daemon=~\"[[rgw_servers]]\"}[$rate_interval])",
           "format": "time_series",
           "intervalFactor": 1,
           "legendFormat": "GETs {{ceph_daemon}}",
           "refId": "B"
         },
         {
-          "expr": "rate(ceph_rgw_put_b{ceph_daemon=~\"[[rgw_servers]]\"}[1m])",
+          "expr": "rate(ceph_rgw_put_b{ceph_daemon=~\"[[rgw_servers]]\"}[$rate_interval])",
           "format": "time_series",
           "intervalFactor": 1,
           "legendFormat": "PUTs {{ceph_daemon}}",
@@ -260,28 +260,28 @@
       "steppedLine": false,
       "targets": [
         {
-          "expr": "rate(ceph_rgw_failed_req{ceph_daemon=~\"[[rgw_servers]]\"}[1m])",
+          "expr": "rate(ceph_rgw_failed_req{ceph_daemon=~\"[[rgw_servers]]\"}[$rate_interval])",
           "format": "time_series",
           "intervalFactor": 1,
           "legendFormat": "Requests Failed {{ceph_daemon}}",
           "refId": "B"
         },
         {
-          "expr": "rate(ceph_rgw_get{ceph_daemon=~\"[[rgw_servers]]\"}[1m])",
+          "expr": "rate(ceph_rgw_get{ceph_daemon=~\"[[rgw_servers]]\"}[$rate_interval])",
           "format": "time_series",
           "intervalFactor": 1,
           "legendFormat": "GETs  {{ceph_daemon}}",
           "refId": "C"
         },
         {
-          "expr": "rate(ceph_rgw_put{ceph_daemon=~\"[[rgw_servers]]\"}[1m])",
+          "expr": "rate(ceph_rgw_put{ceph_daemon=~\"[[rgw_servers]]\"}[$rate_interval])",
           "format": "time_series",
           "intervalFactor": 1,
           "legendFormat": "PUTs  {{ceph_daemon}}",
           "refId": "D"
         },
         {
-          "expr": "rate(ceph_rgw_req{ceph_daemon=~\"[[rgw_servers]]\"}[1m]) -\n  (rate(ceph_rgw_get{ceph_daemon=~\"[[rgw_servers]]\"}[1m]) +\n   rate(ceph_rgw_put{ceph_daemon=~\"[[rgw_servers]]\"}[1m]))",
+          "expr": "rate(ceph_rgw_req{ceph_daemon=~\"[[rgw_servers]]\"}[$rate_interval]) -\n  (rate(ceph_rgw_get{ceph_daemon=~\"[[rgw_servers]]\"}[$rate_interval]) +\n   rate(ceph_rgw_put{ceph_daemon=~\"[[rgw_servers]]\"}[$rate_interval]))",
           "format": "time_series",
           "intervalFactor": 1,
           "legendFormat": "Other  {{ceph_daemon}}",
@@ -361,6 +361,48 @@
         "tagsQuery": "",
         "type": "query",
         "useTags": false
+      },
+      {
+        "allValue": null,
+        "current": {
+          "text": "3m",
+          "value": "3m"
+        },
+        "hide": 0,
+        "includeAll": false,
+        "label": "rate_interval",
+        "multi": false,
+        "name": "rate_interval",
+        "options": [
+          {
+            "selected": false,
+            "text": "1m",
+            "value": "1m"
+          },
+          {
+            "selected": true,
+            "text": "3m",
+            "value": "3m"
+          },
+          {
+            "selected": false,
+            "text": "5m",
+            "value": "5m"
+          },
+          {
+            "selected": false,
+            "text": "10m",
+            "value": "10m"
+          },
+          {
+            "selected": false,
+            "text": "15m",
+            "value": "15m"
+          }
+        ],
+        "query": "1m,3m,5m,10m,15m",
+        "skipUrlSync": false,
+        "type": "custom"
       }
     ]
   },
diff --git a/ceph/files/grafana_dashboards/radosgw-overview_prometheus.json b/ceph/files/grafana_dashboards/radosgw-overview_prometheus.json
index 1cafb7a..4a69c5c 100644
--- a/ceph/files/grafana_dashboards/radosgw-overview_prometheus.json
+++ b/ceph/files/grafana_dashboards/radosgw-overview_prometheus.json
@@ -70,14 +70,14 @@
       "steppedLine": false,
       "targets": [
         {
-          "expr": "avg(rate(ceph_rgw_get_initial_lat_sum[1m]) / rate(ceph_rgw_get_initial_lat_count[1m]))",
+          "expr": "avg(rate(ceph_rgw_get_initial_lat_sum[$rate_interval]) / rate(ceph_rgw_get_initial_lat_count[$rate_interval]))",
           "format": "time_series",
           "intervalFactor": 1,
           "legendFormat": "GET AVG",
           "refId": "A"
         },
         {
-          "expr": "avg(rate(ceph_rgw_put_initial_lat_sum[1m]) / rate(ceph_rgw_put_initial_lat_count[1m]))",
+          "expr": "avg(rate(ceph_rgw_put_initial_lat_sum[$rate_interval]) / rate(ceph_rgw_put_initial_lat_count[$rate_interval]))",
           "format": "time_series",
           "intervalFactor": 1,
           "legendFormat": "PUT AVG",
@@ -161,7 +161,7 @@
       "steppedLine": false,
       "targets": [
         {
-          "expr": "sum by(rgw_host) (label_replace(rate(ceph_rgw_req[1m]), \"rgw_host\", \"$1\", \"ceph_daemon\", \"rgw.(.*)\"))",
+          "expr": "sum by(rgw_host) (label_replace(rate(ceph_rgw_req[$rate_interval]), \"rgw_host\", \"$1\", \"ceph_daemon\", \"rgw.(.*)\"))",
           "format": "time_series",
           "intervalFactor": 1,
           "legendFormat": "{{rgw_host}}",
@@ -247,7 +247,7 @@
       "steppedLine": false,
       "targets": [
         {
-          "expr": "label_replace(rate(ceph_rgw_get_initial_lat_sum[1m]),\"rgw_host\",\"$1\",\"ceph_daemon\",\"rgw.(.*)\") / \nlabel_replace(rate(ceph_rgw_get_initial_lat_count[1m]),\"rgw_host\",\"$1\",\"ceph_daemon\",\"rgw.(.*)\")",
+          "expr": "label_replace(rate(ceph_rgw_get_initial_lat_sum[$rate_interval]),\"rgw_host\",\"$1\",\"ceph_daemon\",\"rgw.(.*)\") / \nlabel_replace(rate(ceph_rgw_get_initial_lat_count[$rate_interval]),\"rgw_host\",\"$1\",\"ceph_daemon\",\"rgw.(.*)\")",
           "format": "time_series",
           "intervalFactor": 1,
           "legendFormat": "{{rgw_host}}",
@@ -333,14 +333,14 @@
       "steppedLine": false,
       "targets": [
         {
-          "expr": "sum(rate(ceph_rgw_get_b[1m]))",
+          "expr": "sum(rate(ceph_rgw_get_b[$rate_interval]))",
           "format": "time_series",
           "intervalFactor": 1,
           "legendFormat": "GETs",
           "refId": "A"
         },
         {
-          "expr": "sum(rate(ceph_rgw_put_b[1m]))",
+          "expr": "sum(rate(ceph_rgw_put_b[$rate_interval]))",
           "format": "time_series",
           "intervalFactor": 1,
           "legendFormat": "PUTs",
@@ -425,7 +425,7 @@
       "steppedLine": false,
       "targets": [
         {
-          "expr": "sum by(rgw_host) (\n  (label_replace(rate(ceph_rgw_get_b[1m]), \"rgw_host\",\"$1\",\"ceph_daemon\",\"rgw.(.*)\")) + \n  (label_replace(rate(ceph_rgw_put_b[1m]), \"rgw_host\",\"$1\",\"ceph_daemon\",\"rgw.(.*)\"))\n)",
+          "expr": "sum by(rgw_host) (\n  (label_replace(rate(ceph_rgw_get_b[$rate_interval]), \"rgw_host\",\"$1\",\"ceph_daemon\",\"rgw.(.*)\")) + \n  (label_replace(rate(ceph_rgw_put_b[$rate_interval]), \"rgw_host\",\"$1\",\"ceph_daemon\",\"rgw.(.*)\"))\n)",
           "format": "time_series",
           "intervalFactor": 1,
           "legendFormat": "{{rgw_host}}",
@@ -510,7 +510,7 @@
       "steppedLine": false,
       "targets": [
         {
-          "expr": "label_replace(rate(ceph_rgw_put_initial_lat_sum[1m]),\"rgw_host\",\"$1\",\"ceph_daemon\",\"rgw.(.*)\") / \nlabel_replace(rate(ceph_rgw_put_initial_lat_count[1m]),\"rgw_host\",\"$1\",\"ceph_daemon\",\"rgw.(.*)\")",
+          "expr": "label_replace(rate(ceph_rgw_put_initial_lat_sum[$rate_interval]),\"rgw_host\",\"$1\",\"ceph_daemon\",\"rgw.(.*)\") / \nlabel_replace(rate(ceph_rgw_put_initial_lat_count[$rate_interval]),\"rgw_host\",\"$1\",\"ceph_daemon\",\"rgw.(.*)\")",
           "format": "time_series",
           "intervalFactor": 1,
           "legendFormat": "{{rgw_host}}",
@@ -591,6 +591,48 @@
         "tagsQuery": "",
         "type": "query",
         "useTags": false
+      },
+      {
+        "allValue": null,
+        "current": {
+          "text": "3m",
+          "value": "3m"
+        },
+        "hide": 0,
+        "includeAll": false,
+        "label": "rate_interval",
+        "multi": false,
+        "name": "rate_interval",
+        "options": [
+          {
+            "selected": false,
+            "text": "1m",
+            "value": "1m"
+          },
+          {
+            "selected": true,
+            "text": "3m",
+            "value": "3m"
+          },
+          {
+            "selected": false,
+            "text": "5m",
+            "value": "5m"
+          },
+          {
+            "selected": false,
+            "text": "10m",
+            "value": "10m"
+          },
+          {
+            "selected": false,
+            "text": "15m",
+            "value": "15m"
+          }
+        ],
+        "query": "1m,3m,5m,10m,15m",
+        "skipUrlSync": false,
+        "type": "custom"
       }
     ]
   },
diff --git a/ceph/files/luminous/ceph.conf.Debian b/ceph/files/luminous/ceph.conf.Debian
index 43cb2f7..725109f 100644
--- a/ceph/files/luminous/ceph.conf.Debian
+++ b/ceph/files/luminous/ceph.conf.Debian
@@ -89,9 +89,17 @@
 
 {%- if pillar.ceph.radosgw is defined %}
 
+{%- if radosgw.keyring_user is defined %}
+[client.{{ radosgw.keyring_user }}]
+{%- else %}
 [client.rgw.{{ grains.host }}]
+{%- endif %}
 host = {{ grains.host }}
+{%- if radosgw.keyring_path is defined %}
+keyring = {{ radosgw.keyring_path }}
+{%- else %}
 keyring = /etc/ceph/{{ common.get('cluster_name', 'ceph') }}.client.rgw.{{ grains.host }}.keyring
+{%- endif %}
 rgw socket path = /tmp/radosgw-{{ grains.host }}.sock
 log file = /var/log/ceph/{{ common.get('cluster_name', 'ceph') }}-rgw-{{ grains.host }}.log
 rgw data = /var/lib/ceph/radosgw/{{ common.get('cluster_name', 'ceph') }}-rgw.{{ grains.host }}
diff --git a/ceph/map.jinja b/ceph/map.jinja
index 9533c09..1833052 100644
--- a/ceph/map.jinja
+++ b/ceph/map.jinja
@@ -46,7 +46,11 @@
   - librados2
   services:
   {%- if grains.get('init', None) == 'systemd' %}
+    {%- if pillar.get('ceph',{}).get('radosgw',{}).keyring_user is defined %}
+  - ceph-radosgw@{{ pillar.ceph.radosgw.keyring_user }}
+    {%- else %}
   - ceph-radosgw@rgw.{{ grains.host }}
+    {%- endif %}
   {%- else %}
   - radosgw-all
   {%- endif %}
diff --git a/ceph/meta/logrotate.yml b/ceph/meta/logrotate.yml
new file mode 100644
index 0000000..8f2730c
--- /dev/null
+++ b/ceph/meta/logrotate.yml
@@ -0,0 +1,19 @@
+## Default: Daily rotation with 28 rotations kept
+{%- from "ceph/map.jinja" import common, mgr, mon, osd, radosgw with context %}
+
+{%- if mgr.get('enabled', False) or mon.get('enabled', False) or osd.get('enabled', False) or radosgw.get('enabled', False) %}
+job:
+  ceph-common:
+    - files:
+        - /var/log/ceph/*.log
+      options:
+        - {{ common.get('logrotate', {}).get('interval', 'daily') }}
+        - rotate: {{ common.get('logrotate', {}).get('rotate', 28) }}
+        - compress
+        - sharedscripts
+        - missingok
+        - notifempty
+        - su root ceph
+        - postrotate:
+              killall -q -1 ceph-mon ceph-mgr ceph-mds ceph-osd ceph-fuse radosgw || true
+{%- endif %}
diff --git a/ceph/osd/init.sls b/ceph/osd/init.sls
index 0813222..881c905 100644
--- a/ceph/osd/init.sls
+++ b/ceph/osd/init.sls
@@ -2,3 +2,4 @@
 - ceph.common
 - ceph.setup.keyring
 - ceph.osd.setup
+- ceph.osd.custom
diff --git a/ceph/osd/setup.sls b/ceph/osd/setup.sls
index 5b1a832..e028575 100644
--- a/ceph/osd/setup.sls
+++ b/ceph/osd/setup.sls
@@ -1,236 +1,11 @@
 {%- from "ceph/map.jinja" import osd, common with context %}
 
-ceph_osd_packages:
-  pkg.installed:
-  - names: {{ osd.pkgs }}
-
-/etc/ceph/{{ common.get('cluster_name', 'ceph') }}.conf:
-  file.managed:
-  - source: salt://ceph/files/{{ common.version }}/ceph.conf.{{ grains.os_family }}
-  - template: jinja
-  - require:
-    - pkg: ceph_osd_packages
-
-{% set ceph_version = pillar.ceph.common.version %}
-
-{%- if osd.backend is defined %}
-
-{%- for backend_name, backend in osd.backend.iteritems() %}
-
-{%- for disk in backend.disks %}
-
-{%- if disk.get('enabled', True) %}
-
-{% set dev = disk.dev %}
-
-# for uniqueness
-{% set dev_device = dev + disk.get('data_partition_prefix', '') + disk.get('data_partition', 1)|string %}
-
-#{{ dev }}{{ disk.get('data_partition', 1) }}
-
-zap_disk_{{ dev_device }}:
-  cmd.run:
-  - name: "ceph-disk zap {{ dev }}"
-  - unless: "ceph-disk list | grep {{ dev }} | grep -e 'ceph' -e 'mounted'"
-  - require:
-    - pkg: ceph_osd_packages
-    - file: /etc/ceph/{{ common.get('cluster_name', 'ceph') }}.conf
-  {%- if grains.get('noservices') %}
-  - onlyif: /bin/false
-  {%- endif %}
-
-{%- if disk.journal is defined %}
-
-zap_disk_journal_{{ disk.journal }}_for_{{ dev_device }}:
-  cmd.run:
-  - name: "ceph-disk zap {{ disk.journal }}"
-  - unless: "ceph-disk list | grep {{ disk.journal }} | grep -e 'ceph' -e 'mounted'"
-  - require:
-    - pkg: ceph_osd_packages
-    - file: /etc/ceph/{{ common.get('cluster_name', 'ceph') }}.conf
-    - cmd: zap_disk_{{ dev_device }}
-  {%- if grains.get('noservices') %}
-  - onlyif: /bin/false
-  {%- endif %}
-
+include:
+{%- if osd.zap_disks is defined and osd.zap_disks == true %}
+- ceph.osd.setup.partitioning
 {%- endif %}
-
-{%- if disk.block_db is defined %}
-
-zap_disk_blockdb_{{ disk.block_db }}_for_{{ dev_device }}:
-  cmd.run:
-  - name: "ceph-disk zap {{ disk.block_db }}"
-  - unless: "ceph-disk list | grep {{ disk.block_db }} | grep -e 'ceph' -e 'mounted'"
-  - require:
-    - pkg: ceph_osd_packages
-    - file: /etc/ceph/{{ common.get('cluster_name', 'ceph') }}.conf
-    - cmd: zap_disk_{{ dev_device }}
-  {%- if grains.get('noservices') %}
-  - onlyif: /bin/false
-  {%- endif %}
-
-{%- endif %}
-
-{%- if disk.block_wal is defined %}
-
-zap_disk_blockwal_{{ disk.block_wal }}_for_{{ dev_device }}:
-  cmd.run:
-  - name: "ceph-disk zap {{ disk.block_wal }}"
-  - unless: "ceph-disk list | grep {{ disk.block_wal }} | grep -e 'ceph' -e 'mounted'"
-  - require:
-    - pkg: ceph_osd_packages
-    - file: /etc/ceph/{{ common.get('cluster_name', 'ceph') }}.conf
-    - cmd: zap_disk_{{ dev_device }}
-  {%- if grains.get('noservices') %}
-  - onlyif: /bin/false
-  {%- endif %}
-
-{%- endif %}
-
-{%- set cmd = [] %}
-{%- do cmd.append('--cluster ' + common.get('cluster_name', 'ceph')) %}
-{%- do cmd.append('--cluster-uuid ' + common.fsid) %}
-{%- if disk.get('dmcrypt', False) %}
-  {%- do cmd.append('--dmcrypt') %}
-  {%- do cmd.append('--dmcrypt-key-dir ' + disk.get('dmcrypt_key_dir', '/etc/ceph/dmcrypt-keys')) %}
-{%- endif %}
-{%- if disk.lockbox_partition is defined %}
-  {%- do cmd.append('--lockbox-partition-number ' + disk.lockbox_partition|string) %}
-{%- endif %}
-{%- do cmd.append("--prepare-key /etc/ceph/" + common.get('cluster_name', 'ceph') + ".client.bootstrap-osd.keyring") %}
-{%- if disk.data_partition is defined %}
-  {%- do cmd.append('--data-partition-number ' + disk.data_partition|string) %}
-{%- endif %}
-{%- if disk.data_partition_size is defined %}
-  {%- do cmd.append('--data-partition-size ' + disk.data_partition_size|string) %}
-{%- endif %}
-{%- if backend_name == 'bluestore' %}
-  {%- do cmd.append('--bluestore') %}
-  {%- if disk.block_partition is defined %}
-    {%- do cmd.append('--block-partition-number ' + disk.block_partition|string) %}
-  {%- endif %}
-  {%- if disk.block_db is defined %}
-    {%- if disk.block_db_dmcrypt is defined and not disk.block_db_dmcrypt %}
-      {%- do cmd.append('--block-db-non-dmcrypt') %}
-    {%- elif disk.get('block_db_dmcrypt', False) %}
-      {%- do cmd.append('--block-db-dmcrypt') %}
-    {%- endif %}
-    {%- if disk.block_db_partition is defined %}
-      {%- do cmd.append('--block-db-partition-number ' + disk.block_db_partition|string) %}
-    {%- endif %}
-  {%- do cmd.append('--block.db ' + disk.block_db) %}
-  {%- endif %}
-  {%- if disk.block_wal is defined %}
-    {%- if disk.block_wal_dmcrypt is defined and not disk.block_wal_dmcrypt %}
-      {%- do cmd.append('--block-wal-non-dmcrypt') %}
-    {%- elif disk.get('block_wal_dmcrypt', False) %}
-      {%- do cmd.append('--block-wal-dmcrypt') %}
-    {%- endif %}
-    {%- if disk.block_wal_partition is defined %}
-      {%- do cmd.append('--block-wal-partition-number ' + disk.block_wal_partition|string) %}
-    {%- endif %}
-    {%- do cmd.append('--block.wal ' + disk.block_wal) %}
-  {%- endif %}
-  {%- do cmd.append(dev) %}
-{%- elif backend_name == 'filestore' and ceph_version not in ['kraken', 'jewel'] %}
-  {%- if disk.journal_dmcrypt is defined and not disk.journal_dmcrypt %}
-    {%- do cmd.append('--journal-non-dmcrypt') %}
-  {%- elif disk.get('journal_dmcrypt', False) %}
-    {%- do cmd.append('--journal-dmcrypt') %}
-  {%- endif %}
-  {%- if disk.journal_partition is defined %}
-    {%- do cmd.append('--journal-partition-number ' + disk.journal_partition|string) %}
-  {%- endif %}
-  {%- do cmd.append('--filestore') %}
-  {%- do cmd.append(dev) %}
-  {%- if disk.journal is defined %}
-    {%- do cmd.append(disk.journal) %}
-  {%- endif %}
-{%- elif backend_name == 'filestore' %}
-  {%- if disk.journal_dmcrypt is defined and not disk.journal_dmcrypt %}
-    {%- do cmd.append('--journal-non-dmcrypt') %}
-  {%- elif disk.get('journal_dmcrypt', False) %}
-    {%- do cmd.append('--journal-dmcrypt') %}
-  {%- endif %}
-  {%- if disk.journal_partition is defined %}
-    {%- do cmd.append('--journal-partition-number ' + disk.journal_partition|string) %}
-  {%- endif %}
-  {%- do cmd.append(dev) %}
-  {%- if disk.journal is defined %}
-    {%- do cmd.append(disk.journal) %}
-  {%- endif %}
-{%- endif %}
-
-prepare_disk_{{ dev_device }}:
-  cmd.run:
-  - name: "yes | ceph-disk prepare {{ cmd|join(' ') }}"
-  - unless: "ceph-disk list | grep {{ dev_device }} | grep -e 'ceph' -e 'mounted'"
-  - require:
-    - cmd: zap_disk_{{ dev_device }}
-    - pkg: ceph_osd_packages
-    - file: /etc/ceph/{{ common.get('cluster_name', 'ceph') }}.conf
-  {%- if grains.get('noservices') %}
-  - onlyif: /bin/false
-  {%- endif %}
-
-reload_partition_table_{{ dev_device }}:
-  cmd.run:
-  - name: "partprobe"
-  - unless: "lsblk -p | grep {{ dev_device }} -A1 | grep -v lockbox | grep {{ dev_device }} | grep ceph | grep osd"
-  - require:
-    - cmd: prepare_disk_{{ dev_device }}
-    - cmd: zap_disk_{{ dev_device }}
-    - pkg: ceph_osd_packages
-    - file: /etc/ceph/{{ common.get('cluster_name', 'ceph') }}.conf
-  {%- if grains.get('noservices') %}
-  - onlyif: /bin/false
-  {%- else %}
-  - onlyif: ceph-disk list | grep {{ dev_device }} | grep ceph
-  {%- endif %}
-
-activate_disk_{{ dev_device }}:
-  cmd.run:
-{%- if disk.get('dmcrypt', False) %}
-  - name: "ceph-disk activate --dmcrypt --activate-key /etc/ceph/{{ common.get('cluster_name', 'ceph') }}.client.bootstrap-osd.keyring {{ dev_device }}"
+{%- if osd.lvm_enabled is defined and osd.lvm_enabled == true %}
+- ceph.osd.setup.lvm
 {%- else %}
-  - name: "ceph-disk activate --activate-key /etc/ceph/{{ common.get('cluster_name', 'ceph') }}.client.bootstrap-osd.keyring {{ dev_device }}"
+- ceph.osd.setup.disk
 {%- endif %}
-  - unless: "lsblk -p | grep {{ dev_device }} -A1 | grep -v lockbox | grep {{ dev_device }} | grep ceph | grep osd"
-  - require:
-    - cmd: prepare_disk_{{ dev_device }}
-    - cmd: zap_disk_{{ dev_device }}
-    - pkg: ceph_osd_packages
-    - file: /etc/ceph/{{ common.get('cluster_name', 'ceph') }}.conf
-  {%- if grains.get('noservices') %}
-  - onlyif: /bin/false
-  {%- else %}
-  - onlyif: ceph-disk list | grep {{ dev_device }} | grep ceph
-  {%- endif %}
-
-{%- endif %}
-
-{%- endfor %}
-
-{%- endfor %}
-
-{%- endif %}
-
-osd_services_global:
-  service.running:
-  - enable: true
-  - names: ['ceph-osd.target']
-  - watch:
-    - file: /etc/ceph/{{ common.get('cluster_name', 'ceph') }}.conf
-  {%- if grains.get('noservices') %}
-  - onlyif: /bin/false
-  {%- endif %}
-
-osd_services:
-  service.running:
-  - enable: true
-  - names: ['ceph.target']
-  - watch:
-    - file: /etc/ceph/{{ common.get('cluster_name', 'ceph') }}.conf
-  {%- if grains.get('noservices') %}
-  - onlyif: /bin/false
-  {%- endif %}
diff --git a/ceph/osd/setup/disk.sls b/ceph/osd/setup/disk.sls
new file mode 100644
index 0000000..5b1a832
--- /dev/null
+++ b/ceph/osd/setup/disk.sls
@@ -0,0 +1,236 @@
+{%- from "ceph/map.jinja" import osd, common with context %}
+
+ceph_osd_packages:
+  pkg.installed:
+  - names: {{ osd.pkgs }}
+
+/etc/ceph/{{ common.get('cluster_name', 'ceph') }}.conf:
+  file.managed:
+  - source: salt://ceph/files/{{ common.version }}/ceph.conf.{{ grains.os_family }}
+  - template: jinja
+  - require:
+    - pkg: ceph_osd_packages
+
+{% set ceph_version = pillar.ceph.common.version %}
+
+{%- if osd.backend is defined %}
+
+{%- for backend_name, backend in osd.backend.iteritems() %}
+
+{%- for disk in backend.disks %}
+
+{%- if disk.get('enabled', True) %}
+
+{% set dev = disk.dev %}
+
+# for uniqueness
+{% set dev_device = dev + disk.get('data_partition_prefix', '') + disk.get('data_partition', 1)|string %}
+
+#{{ dev }}{{ disk.get('data_partition', 1) }}
+
+zap_disk_{{ dev_device }}:
+  cmd.run:
+  - name: "ceph-disk zap {{ dev }}"
+  - unless: "ceph-disk list | grep {{ dev }} | grep -e 'ceph' -e 'mounted'"
+  - require:
+    - pkg: ceph_osd_packages
+    - file: /etc/ceph/{{ common.get('cluster_name', 'ceph') }}.conf
+  {%- if grains.get('noservices') %}
+  - onlyif: /bin/false
+  {%- endif %}
+
+{%- if disk.journal is defined %}
+
+zap_disk_journal_{{ disk.journal }}_for_{{ dev_device }}:
+  cmd.run:
+  - name: "ceph-disk zap {{ disk.journal }}"
+  - unless: "ceph-disk list | grep {{ disk.journal }} | grep -e 'ceph' -e 'mounted'"
+  - require:
+    - pkg: ceph_osd_packages
+    - file: /etc/ceph/{{ common.get('cluster_name', 'ceph') }}.conf
+    - cmd: zap_disk_{{ dev_device }}
+  {%- if grains.get('noservices') %}
+  - onlyif: /bin/false
+  {%- endif %}
+
+{%- endif %}
+
+{%- if disk.block_db is defined %}
+
+zap_disk_blockdb_{{ disk.block_db }}_for_{{ dev_device }}:
+  cmd.run:
+  - name: "ceph-disk zap {{ disk.block_db }}"
+  - unless: "ceph-disk list | grep {{ disk.block_db }} | grep -e 'ceph' -e 'mounted'"
+  - require:
+    - pkg: ceph_osd_packages
+    - file: /etc/ceph/{{ common.get('cluster_name', 'ceph') }}.conf
+    - cmd: zap_disk_{{ dev_device }}
+  {%- if grains.get('noservices') %}
+  - onlyif: /bin/false
+  {%- endif %}
+
+{%- endif %}
+
+{%- if disk.block_wal is defined %}
+
+zap_disk_blockwal_{{ disk.block_wal }}_for_{{ dev_device }}:
+  cmd.run:
+  - name: "ceph-disk zap {{ disk.block_wal }}"
+  - unless: "ceph-disk list | grep {{ disk.block_wal }} | grep -e 'ceph' -e 'mounted'"
+  - require:
+    - pkg: ceph_osd_packages
+    - file: /etc/ceph/{{ common.get('cluster_name', 'ceph') }}.conf
+    - cmd: zap_disk_{{ dev_device }}
+  {%- if grains.get('noservices') %}
+  - onlyif: /bin/false
+  {%- endif %}
+
+{%- endif %}
+
+{%- set cmd = [] %}
+{%- do cmd.append('--cluster ' + common.get('cluster_name', 'ceph')) %}
+{%- do cmd.append('--cluster-uuid ' + common.fsid) %}
+{%- if disk.get('dmcrypt', False) %}
+  {%- do cmd.append('--dmcrypt') %}
+  {%- do cmd.append('--dmcrypt-key-dir ' + disk.get('dmcrypt_key_dir', '/etc/ceph/dmcrypt-keys')) %}
+{%- endif %}
+{%- if disk.lockbox_partition is defined %}
+  {%- do cmd.append('--lockbox-partition-number ' + disk.lockbox_partition|string) %}
+{%- endif %}
+{%- do cmd.append("--prepare-key /etc/ceph/" + common.get('cluster_name', 'ceph') + ".client.bootstrap-osd.keyring") %}
+{%- if disk.data_partition is defined %}
+  {%- do cmd.append('--data-partition-number ' + disk.data_partition|string) %}
+{%- endif %}
+{%- if disk.data_partition_size is defined %}
+  {%- do cmd.append('--data-partition-size ' + disk.data_partition_size|string) %}
+{%- endif %}
+{%- if backend_name == 'bluestore' %}
+  {%- do cmd.append('--bluestore') %}
+  {%- if disk.block_partition is defined %}
+    {%- do cmd.append('--block-partition-number ' + disk.block_partition|string) %}
+  {%- endif %}
+  {%- if disk.block_db is defined %}
+    {%- if disk.block_db_dmcrypt is defined and not disk.block_db_dmcrypt %}
+      {%- do cmd.append('--block-db-non-dmcrypt') %}
+    {%- elif disk.get('block_db_dmcrypt', False) %}
+      {%- do cmd.append('--block-db-dmcrypt') %}
+    {%- endif %}
+    {%- if disk.block_db_partition is defined %}
+      {%- do cmd.append('--block-db-partition-number ' + disk.block_db_partition|string) %}
+    {%- endif %}
+  {%- do cmd.append('--block.db ' + disk.block_db) %}
+  {%- endif %}
+  {%- if disk.block_wal is defined %}
+    {%- if disk.block_wal_dmcrypt is defined and not disk.block_wal_dmcrypt %}
+      {%- do cmd.append('--block-wal-non-dmcrypt') %}
+    {%- elif disk.get('block_wal_dmcrypt', False) %}
+      {%- do cmd.append('--block-wal-dmcrypt') %}
+    {%- endif %}
+    {%- if disk.block_wal_partition is defined %}
+      {%- do cmd.append('--block-wal-partition-number ' + disk.block_wal_partition|string) %}
+    {%- endif %}
+    {%- do cmd.append('--block.wal ' + disk.block_wal) %}
+  {%- endif %}
+  {%- do cmd.append(dev) %}
+{%- elif backend_name == 'filestore' and ceph_version not in ['kraken', 'jewel'] %}
+  {%- if disk.journal_dmcrypt is defined and not disk.journal_dmcrypt %}
+    {%- do cmd.append('--journal-non-dmcrypt') %}
+  {%- elif disk.get('journal_dmcrypt', False) %}
+    {%- do cmd.append('--journal-dmcrypt') %}
+  {%- endif %}
+  {%- if disk.journal_partition is defined %}
+    {%- do cmd.append('--journal-partition-number ' + disk.journal_partition|string) %}
+  {%- endif %}
+  {%- do cmd.append('--filestore') %}
+  {%- do cmd.append(dev) %}
+  {%- if disk.journal is defined %}
+    {%- do cmd.append(disk.journal) %}
+  {%- endif %}
+{%- elif backend_name == 'filestore' %}
+  {%- if disk.journal_dmcrypt is defined and not disk.journal_dmcrypt %}
+    {%- do cmd.append('--journal-non-dmcrypt') %}
+  {%- elif disk.get('journal_dmcrypt', False) %}
+    {%- do cmd.append('--journal-dmcrypt') %}
+  {%- endif %}
+  {%- if disk.journal_partition is defined %}
+    {%- do cmd.append('--journal-partition-number ' + disk.journal_partition|string) %}
+  {%- endif %}
+  {%- do cmd.append(dev) %}
+  {%- if disk.journal is defined %}
+    {%- do cmd.append(disk.journal) %}
+  {%- endif %}
+{%- endif %}
+
+prepare_disk_{{ dev_device }}:
+  cmd.run:
+  - name: "yes | ceph-disk prepare {{ cmd|join(' ') }}"
+  - unless: "ceph-disk list | grep {{ dev_device }} | grep -e 'ceph' -e 'mounted'"
+  - require:
+    - cmd: zap_disk_{{ dev_device }}
+    - pkg: ceph_osd_packages
+    - file: /etc/ceph/{{ common.get('cluster_name', 'ceph') }}.conf
+  {%- if grains.get('noservices') %}
+  - onlyif: /bin/false
+  {%- endif %}
+
+reload_partition_table_{{ dev_device }}:
+  cmd.run:
+  - name: "partprobe"
+  - unless: "lsblk -p | grep {{ dev_device }} -A1 | grep -v lockbox | grep {{ dev_device }} | grep ceph | grep osd"
+  - require:
+    - cmd: prepare_disk_{{ dev_device }}
+    - cmd: zap_disk_{{ dev_device }}
+    - pkg: ceph_osd_packages
+    - file: /etc/ceph/{{ common.get('cluster_name', 'ceph') }}.conf
+  {%- if grains.get('noservices') %}
+  - onlyif: /bin/false
+  {%- else %}
+  - onlyif: ceph-disk list | grep {{ dev_device }} | grep ceph
+  {%- endif %}
+
+activate_disk_{{ dev_device }}:
+  cmd.run:
+{%- if disk.get('dmcrypt', False) %}
+  - name: "ceph-disk activate --dmcrypt --activate-key /etc/ceph/{{ common.get('cluster_name', 'ceph') }}.client.bootstrap-osd.keyring {{ dev_device }}"
+{%- else %}
+  - name: "ceph-disk activate --activate-key /etc/ceph/{{ common.get('cluster_name', 'ceph') }}.client.bootstrap-osd.keyring {{ dev_device }}"
+{%- endif %}
+  - unless: "lsblk -p | grep {{ dev_device }} -A1 | grep -v lockbox | grep {{ dev_device }} | grep ceph | grep osd"
+  - require:
+    - cmd: prepare_disk_{{ dev_device }}
+    - cmd: zap_disk_{{ dev_device }}
+    - pkg: ceph_osd_packages
+    - file: /etc/ceph/{{ common.get('cluster_name', 'ceph') }}.conf
+  {%- if grains.get('noservices') %}
+  - onlyif: /bin/false
+  {%- else %}
+  - onlyif: ceph-disk list | grep {{ dev_device }} | grep ceph
+  {%- endif %}
+
+{%- endif %}
+
+{%- endfor %}
+
+{%- endfor %}
+
+{%- endif %}
+
+osd_services_global:
+  service.running:
+  - enable: true
+  - names: ['ceph-osd.target']
+  - watch:
+    - file: /etc/ceph/{{ common.get('cluster_name', 'ceph') }}.conf
+  {%- if grains.get('noservices') %}
+  - onlyif: /bin/false
+  {%- endif %}
+
+osd_services:
+  service.running:
+  - enable: true
+  - names: ['ceph.target']
+  - watch:
+    - file: /etc/ceph/{{ common.get('cluster_name', 'ceph') }}.conf
+  {%- if grains.get('noservices') %}
+  - onlyif: /bin/false
+  {%- endif %}
diff --git a/ceph/osd/setup/partitioning.sls b/ceph/osd/setup/partitioning.sls
new file mode 100644
index 0000000..16ced44
--- /dev/null
+++ b/ceph/osd/setup/partitioning.sls
@@ -0,0 +1,80 @@
+{%- from "ceph/map.jinja" import osd, common with context %}
+
+{%- set devs = [] %}
+{%- set dbs = [] %}
+{%- set wals = [] %}
+{%- for backend_name, backend in osd.backend.iteritems() %}
+{%- for disk in backend.disks %}
+{%- set dev = disk.dev %}
+
+{%- if disk.block_db is defined %}
+{%- set db = disk.block_db %}
+{%- do dbs.append(db) %}
+{%- endif %}
+{%- if disk.block_wal is defined %}
+{%- set wal = disk.block_wal %}
+{%- do wals.append(wal) %}
+{%- endif %}
+{%- do devs.append(dev) %}
+{%- endfor %}
+{%- endfor %}
+{%- set end_size = {} %}
+{%- set counter = {} %}
+
+{%- if dbs != [] %}
+{%- for db in dbs | unique %}
+{%- do end_size.update({db: 1048576}) %}
+{%- do counter.update({db: 1}) %}
+create_disk_label_{{ db }}:
+  module.run:
+  - name: partition.mklabel
+  - device: {{ db }}
+  - label_type: gpt
+  - unless: "fdisk -l {{ db }} | grep -i 'Disklabel type: gpt'"
+{%- endfor %}
+{%- for db in dbs %}
+create_partition_{{ db }}_{{ counter[db] }}:
+  module.run:
+  - name: partition.mkpart
+  - device: {{ db }}
+  - part_type: primary
+  - start: {{ end_size[db] }}B
+  - end: {{ end_size[db] + osd.bluestore_block_db_size }}B
+  - size: {{ osd.bluestore_block_db_size }}B
+  - unless: "blkid {{ db }}{{ counter[db] }} {{ db }}p{{ counter[db] }}"
+  - require:
+    - module: create_disk_label_{{ db }}
+
+{%- do counter.update({db: counter[db] + 1}) %}
+{%- do end_size.update({db: end_size[db] + osd.bluestore_block_db_size + 1048576}) %}
+{%- endfor %}
+{%- endif %}
+
+{%- if wals != [] %}
+{%- for wal in wals | unique %}
+{%- do end_size.update({wal: 1048576}) %}
+{%- do counter.update({wal: 1}) %}
+create_disk_label_{{ wal }}:
+  module.run:
+  - name: partition.mklabel
+  - device: {{ wal }}
+  - label_type: gpt
+  - unless: "fdisk -l {{ wal }} | grep -i 'Disklabel type: gpt'"
+{%- endfor %}
+{%- for wal in wals %}
+create_partition_{{ wal }}_{{ counter[wal] }}:
+  module.run:
+  - name: partition.mkpart
+  - device: {{ wal }}
+  - part_type: primary
+  - start: {{ end_size[wal] }}B
+  - end: {{ end_size[wal] + osd.bluestore_block_db_size }}B
+  - size: {{ osd.bluestore_block_wal_size }}B
+  - unless: "blkid {{ wal }}{{ counter[wal] }} {{ wal }}p{{ counter[wal] }}"
+  - require:
+    - module: create_disk_label_{{ wal }}
+
+{%- do counter.update({wal: counter[wal] + 1}) %}
+{%- do end_size.update({wal: end_size[wal] + osd.bluestore_block_wal_size + 1048576}) %}
+{%- endfor %}
+{% endif %}
diff --git a/ceph/setup/keyring.sls b/ceph/setup/keyring.sls
index f26c608..d51a109 100644
--- a/ceph/setup/keyring.sls
+++ b/ceph/setup/keyring.sls
@@ -2,18 +2,18 @@
 
 {% if not common.get('container_mode', False) %}
 
-{# run only if ceph cluster is present #}
-{%- for node_name, node_grains in salt['mine.get']('ceph:common:keyring:admin', 'grains.items', 'pillar').iteritems() %}
+  {# run only if ceph cluster is present #}
+  {%- for node_name, node_grains in salt['mine.get']('ceph:common:keyring:admin', 'grains.items', 'pillar').iteritems() %}
 
-{%- if node_grains.ceph is defined and node_grains.ceph.ceph_keyring is defined and node_grains.ceph.ceph_keyring.admin is defined and node_grains.ceph.get('fsid', '') == common.fsid %}
+    {%- if node_grains.ceph is defined and node_grains.ceph.ceph_keyring is defined and node_grains.ceph.ceph_keyring.admin is defined and node_grains.ceph.get('fsid', '') == common.fsid %}
 
-{%- if loop.index0 == 0 %}
+      {%- if loop.index0 == 0 %}
 
-{% for keyring_name, keyring in common.get('keyring', {}).iteritems() %}
+        {% for keyring_name, keyring in common.get('keyring', {}).iteritems() %}
 
-{%- if keyring.name is defined %}
+          {%- if keyring.name is defined %}
 
-{%- if keyring.name != 'admin' and keyring.key is defined and common.get("manage_keyring", False) %}
+            {%- if keyring.name != 'admin' and keyring.key is defined and common.get("manage_keyring", False) %}
 
 {{ common.prefix_dir }}/etc/ceph/{{ common.get('cluster_name', 'ceph') }}.client.{{ keyring.name }}.keyring:
   file.managed:
@@ -25,22 +25,43 @@
 
 ceph_import_keyring_{{ keyring.name }}:
   cmd.run:
-  - name: "ceph -c /etc/ceph/{{ common.get('cluster_name', 'ceph') }}.conf auth import -i {{ common.prefix_dir }}/etc/ceph/{{ common.get('cluster_name', 'ceph') }}.client.{{ keyring.name }}.keyring"
+  - name: ceph -c /etc/ceph/{{ common.get('cluster_name', 'ceph') }}.conf auth import -i {{ common.prefix_dir }}/etc/ceph/{{ common.get('cluster_name', 'ceph') }}.client.{{ keyring.name }}.keyring
   - onchanges:
     - file: {{ common.prefix_dir }}/etc/ceph/{{ common.get('cluster_name', 'ceph') }}.client.{{ keyring.name }}.keyring
 
-{%- elif keyring.name != 'admin' %}
+ceph_update_caps_for_{{ keyring.name }}:
+  cmd.run:
+  - name: ceph -c /etc/ceph/{{ common.get('cluster_name', 'ceph') }}.conf auth caps client.{{ keyring.name }} {%- for cap_name, cap in  keyring.caps.iteritems() %} {{ cap_name }} '{{ cap }}' {%- endfor %}
+  - onchanges:
+    - file: {{ common.prefix_dir }}/etc/ceph/{{ common.get('cluster_name', 'ceph') }}.client.{{ keyring.name }}.keyring
+
+            {%- elif keyring.name != 'admin' %}
 
 ceph_create_keyring_{{ keyring.name }}:
   cmd.run:
-  - name: "ceph -c /etc/ceph/{{ common.get('cluster_name', 'ceph') }}.conf auth get-or-create client.{{ keyring.name }} {%- for cap_name, cap in  keyring.caps.iteritems() %} {{ cap_name }} '{{ cap }}' {%- endfor %} > {{ common.prefix_dir }}/etc/ceph/{{ common.get('cluster_name', 'ceph') }}.client.{{ keyring.name }}.keyring"
+  - name: ceph -c /etc/ceph/{{ common.get('cluster_name', 'ceph') }}.conf auth get-or-create client.{{ keyring.name }} {%- for cap_name, cap in  keyring.caps.iteritems() %} {{ cap_name }} '{{ cap }}' {%- endfor %} > {{ common.prefix_dir }}/etc/ceph/{{ common.get('cluster_name', 'ceph') }}.client.{{ keyring.name }}.keyring
   - unless: "test -f {{ common.prefix_dir }}/etc/ceph/{{ common.get('cluster_name', 'ceph') }}.client.{{ keyring.name }}.keyring"
 
-{%- endif %}
+              {%- if salt['file.file_exists']('/usr/bin/ceph') %}
+                {%- set caps = salt['cmd.shell']('ceph auth list --format json') | load_json %}
+                {%- for client in caps['auth_dump'] %}
+                  {%- if client['entity'] == "client." + keyring.name %}
+                    {%- for cap_name, cap in  client.caps.iteritems() %}
+                      {%- if cap != keyring.caps[cap_name] %}
+ceph_update_caps_for_{{ keyring.name }}:
+  cmd.run:
+  - name: ceph -c /etc/ceph/{{ common.get('cluster_name', 'ceph') }}.conf auth caps client.{{ keyring.name }} {%- for cap_name, cap in  keyring.caps.iteritems() %} {{ cap_name }} '{{ cap }}' {%- endfor %}
+                      {%- endif %}
+                    {%- endfor %}
+                  {%- endif %}
+                {%- endfor %}
+              {%- endif %}
 
-{%- else %}
+            {%- endif %}
 
-{%- if keyring_name != 'admin' and keyring.key is defined and common.get("manage_keyring", False) %}
+          {%- else %}
+
+            {%- if keyring_name != 'admin' and keyring.key is defined and common.get("manage_keyring", False) %}
 
 {{ common.prefix_dir }}/etc/ceph/{{ common.get('cluster_name', 'ceph') }}.client.{{ keyring_name }}.keyring:
   file.managed:
@@ -56,23 +77,44 @@
   - onchanges:
     - file: {{ common.prefix_dir }}/etc/ceph/{{ common.get('cluster_name', 'ceph') }}.client.{{ keyring_name }}.keyring
 
-{%- elif keyring_name != 'admin' %}
+ceph_update_caps_for_{{ keyring_name }}:
+  cmd.run:
+  - name: ceph -c /etc/ceph/{{ common.get('cluster_name', 'ceph') }}.conf auth caps client.{{ keyring_name }} {%- for cap_name, cap in  keyring.caps.iteritems() %} {{ cap_name }} '{{ cap }}' {%- endfor %}
+  - onchanges:
+    - file: {{ common.prefix_dir }}/etc/ceph/{{ common.get('cluster_name', 'ceph') }}.client.{{ keyring_name }}.keyring
+
+            {%- elif keyring_name != 'admin' %}
 
 ceph_create_keyring_{{ keyring_name }}:
   cmd.run:
-  - name: "ceph -c /etc/ceph/{{ common.get('cluster_name', 'ceph') }}.conf auth get-or-create client.{{ keyring_name }} {%- for cap_name, cap in  keyring.caps.iteritems() %} {{ cap_name }} '{{ cap }}' {%- endfor %} > {{ common.prefix_dir }}/etc/ceph/{{ common.get('cluster_name', 'ceph') }}.client.{{ keyring_name }}.keyring"
+  - name: ceph -c /etc/ceph/{{ common.get('cluster_name', 'ceph') }}.conf auth get-or-create client.{{ keyring_name }} {%- for cap_name, cap in  keyring.caps.iteritems() %} {{ cap_name }} '{{ cap }}' {%- endfor %} > {{ common.prefix_dir }}/etc/ceph/{{ common.get('cluster_name', 'ceph') }}.client.{{ keyring_name }}.keyring
   - unless: "test -f {{ common.prefix_dir }}/etc/ceph/{{ common.get('cluster_name', 'ceph') }}.client.{{ keyring_name }}.keyring"
 
-{%- endif %}
+              {%- if salt['file.file_exists']('/usr/bin/ceph') %}
+                {%- set caps = salt['cmd.shell']('ceph auth list --format json') | load_json %}
+                {%- for client in caps['auth_dump'] %}
+                  {%- if client['entity'] == "client." + keyring_name %}
+                    {%- for cap_name, cap in  client.caps.iteritems() %}
+                      {%- if cap != keyring.caps[cap_name] %}
+ceph_update_caps_for_{{ keyring_name }}:
+  cmd.run:
+  - name: ceph -c /etc/ceph/{{ common.get('cluster_name', 'ceph') }}.conf auth caps client.{{ keyring_name }} {%- for cap_name, cap in  keyring.caps.iteritems() %} {{ cap_name }} '{{ cap }}' {%- endfor %}
+                      {%- endif %}
+                    {%- endfor %}
+                  {%- endif %}
+                {%- endfor %}
+              {%- endif %}
 
-{%- endif %}
+            {%- endif %}
 
-{% endfor %}
+          {%- endif %}
 
-{%- endif %}
+        {% endfor %}
 
-{%- endif %}
+      {%- endif %}
 
-{%- endfor %}
+    {%- endif %}
+
+  {%- endfor %}
 
 {%- endif %}
diff --git a/metadata/service/common/cluster.yml b/metadata/service/common/cluster.yml
index 6974fe6..09ca6c2 100644
--- a/metadata/service/common/cluster.yml
+++ b/metadata/service/common/cluster.yml
@@ -7,6 +7,9 @@
     ceph_mon_node03_hostname: cmn03
   ceph:
     common:
+      config:
+        global:
+          mon_max_pg_per_osd: 600
       enabled: true
       version: ${_param:ceph_version}
       fsid: ${_param:ceph_cluster_fsid}
@@ -17,4 +20,3 @@
           host: ${_param:ceph_mon_node02_address}
         - name: ${_param:ceph_mon_node03_hostname}
           host: ${_param:ceph_mon_node03_address}
-
diff --git a/metadata/service/common/single.yml b/metadata/service/common/single.yml
index 7aec939..c81947b 100644
--- a/metadata/service/common/single.yml
+++ b/metadata/service/common/single.yml
@@ -5,6 +5,9 @@
     ceph_mon_node01_hostname: cmn01
   ceph:
     common:
+      config:
+        global:
+          mon_max_pg_per_osd: 600
       enabled: true
       version: ${_param:ceph_version}
       fsid: ${_param:ceph_cluster_fsid}
diff --git a/metadata/service/mon/cluster.yml b/metadata/service/mon/cluster.yml
index 94c484b..6a10da2 100644
--- a/metadata/service/mon/cluster.yml
+++ b/metadata/service/mon/cluster.yml
@@ -7,7 +7,3 @@
   ceph:
     mon:
       enabled: true
-    common:
-      config:
-        mon:
-          mon_max_pg_per_osd: 600
diff --git a/metadata/service/mon/single.yml b/metadata/service/mon/single.yml
index ea04ea0..8be5da5 100644
--- a/metadata/service/mon/single.yml
+++ b/metadata/service/mon/single.yml
@@ -12,9 +12,6 @@
           caps:
             mon: "allow *"
     common:
-      config:
-        mon:
-          mon_max_pg_per_osd: 600
       keyring:
         admin:
           caps:
diff --git a/tests/pillar/ceph_mon_single.sls b/tests/pillar/ceph_mon_single.sls
index 05ed121..5ad74c9 100644
--- a/tests/pillar/ceph_mon_single.sls
+++ b/tests/pillar/ceph_mon_single.sls
@@ -1,6 +1,6 @@
 ceph:
   common:
-    version: kraken
+    version: luminous
     cluster_name: ceph
     config:
       global:
@@ -27,7 +27,7 @@
           osd: "allow *"
   mon:
     enabled: true
-    version: kraken
+    version: luminous
     keyring:
       mon:
         key: AQAnQIhZ6in5KxAAdf467upoRMWFcVg5pbh1yg==
@@ -40,3 +40,24 @@
           mgr: "allow *"
           mon: "allow *"
           osd: "allow *"
+  radosgw:
+    enabled: true
+    hostname: gw.ceph.lab
+    keyring_user: radosgw.gateway
+    keyring_path: /etc/ceph/keyring.radosgw.gateway
+    bind:
+      address: 10.10.10.1
+      port: 8080
+    identity:
+      engine: keystone
+      api_version: 3
+      host: 10.10.10.100
+      port: 5000
+      user: admin
+      password: password
+      project: admin
+      domain: default
+    swift:
+      versioning:
+        enabled: true
+      enforce_content_length: true