decapod migration support script / parametrize Ceph custer_name

PROD-16110

Change-Id: Icdd655c2c5e1c99a28bb35410929e744a90d35c7
diff --git a/.kitchen.yml b/.kitchen.yml
index 5136ac7..ce8dabd 100644
--- a/.kitchen.yml
+++ b/.kitchen.yml
@@ -16,7 +16,7 @@
     noservices: True
   vendor_repo:
     - type: apt
-      url: http://cz.ceph.com/debian-jewel
+      url: http://download.ceph.com/debian-jewel
       key_url: https://download.ceph.com/keys/release.asc
       components: main
       distribution: xenial
diff --git a/README.rst b/README.rst
index fd9a3be..eb74d17 100644
--- a/README.rst
+++ b/README.rst
@@ -195,6 +195,7 @@
     ceph:
       common:
         version: luminous
+        cluster_name: ceph
         config:
           global:
             param1: value1
@@ -720,6 +721,19 @@
               enabled: true
               key: ssh_rsa
 
+Migration from Decapod to salt-formula-ceph
+--------------------------------------------
+
+The following configuration will run a python script which will generate ceph config and osd disk mappings to be put in cluster model. 
+
+.. code-block:: yaml
+
+    ceph:
+      decapod:
+        ip: 192.168.1.10
+        user: user
+        password: psswd
+        deploy_config_name: ceph
 
 
 More information
diff --git a/_grains/ceph.py b/_grains/ceph.py
index 5df302c..61254bf 100644
--- a/_grains/ceph.py
+++ b/_grains/ceph.py
@@ -11,11 +11,17 @@
     if os.path.exists('/etc/ceph'):
         grain = {}
         grain["ceph"] = {}
+        conf_dir = '/etc/ceph/'
+        for filename in os.listdir(conf_dir):
+            if filename.endswith(".conf"):
+                cluster_name = re.search('(.+?).conf', filename).group(1)
+                break
+        conf_file = conf_dir + cluster_name + '.conf'
 
         # osd
         if os.path.exists('/var/lib/ceph/osd'):
-            mount_path = check_output("df -h | awk '{print $6}' | grep ceph | grep -v lockbox | sed 's/[0-9]*//g' | awk 'NR==1{print $1}'", shell=True).rstrip()
-            sed = 'sed \'s#{0}##g\''.format(mount_path)
+            mount_path = check_output("df -h | awk '{print $6}' | grep ceph | grep -v lockbox | sed 's/-[0-9]*//g' | awk 'NR==1{print $1}'", shell=True).rstrip()
+            sed = 'sed \'s#{0}-##g\''.format(mount_path)
             cmd = "lsblk -rp | awk '{print $1,$6,$7}' | grep -v lockbox | grep ceph | " + sed
             osd_output = check_output(cmd, shell=True)
             if osd_output:
@@ -27,17 +33,18 @@
                         output = check_output("lsblk -rp | grep -B1 " + device[0], shell=True)
                         for l in output.splitlines():
                             d = l.split()
-                            dev = d[0].replace('1','')
+                            dev = re.sub("\d+", "", device[0])
                             encrypted = True
                             break
                     else:
                         dev = device[0].replace('1','')
+                        dev = re.sub("\d+", "", device[0])
                     device[0] = device[2]
                     devices[device[0]] = {}
                     devices[device[0]]['dev'] = dev
                     if encrypted:
                         devices[device[0]]['dmcrypt'] = 'true'
-                    tline = check_output("ceph osd tree | awk '{print $1,$2,$3,$4}' | grep -w 'osd." + device[0] + "'", shell=True)
+                    tline = check_output("ceph -c " + conf_file + " osd tree | awk '{print $1,$2,$3,$4}' | grep -w 'osd." + device[0] + "'", shell=True)
                     osd = tline.split()
                     if "osd" not in osd[2]:
                         crush_class = osd[1]
@@ -54,9 +61,9 @@
         keyrings = {}
         if os.path.isdir(directory):
             for filename in os.listdir(directory):
-                if filename.endswith(".keyring") and filename.startswith("ceph.client"):
+                if filename.endswith(".keyring") and re.search(".client.", filename):
                     keyring_output = open(os.path.join(directory, filename), "r")
-                    keyring_name = re.search('ceph.client.(.+?).keyring', filename).group(1)
+                    keyring_name = re.search('(.+?).client.(.+?).keyring', filename).group(2)
                     if keyring_output:
                         keyrings[keyring_name] = {}
                         for line in keyring_output:
@@ -74,7 +81,7 @@
 
         # mon keyring
         hostname = check_output("hostname", shell=True).rstrip()
-        filepath = "/var/lib/ceph/mon/ceph-{0}/keyring".format(hostname)
+        filepath = "/var/lib/ceph/mon/{0}-{1}/keyring".format(cluster_name, hostname)
         if os.path.isfile(filepath):
             mon_key_output = open(filepath, "r")
             if mon_key_output:
diff --git a/ceph/common.sls b/ceph/common.sls
index 874a444..f64f810 100644
--- a/ceph/common.sls
+++ b/ceph/common.sls
@@ -6,6 +6,13 @@
   pkg.installed:
   - names: {{ common.pkgs }}
 
+/etc/default/ceph:
+  file.managed:
+  - source: salt://ceph/files/env
+  - template: jinja
+  - require:
+    - pkg: common_packages
+
 {%- endif %}
 
 {{ common.prefix_dir }}/etc/ceph:
@@ -17,7 +24,7 @@
 
 common_config:
   file.managed:
-  - name: {{ common.prefix_dir }}/etc/ceph/ceph.conf
+  - name: {{ common.prefix_dir }}/etc/ceph/{{ common.get('cluster_name', 'ceph') }}.conf
   - source: salt://ceph/files/{{ common.version }}/ceph.conf.{{ grains.os_family }}
   - template: jinja
   {% if not common.get('container_mode', False) %}
@@ -30,8 +37,8 @@
 
 ceph_create_keyring_admin:
   cmd.run:
-  - name: "ceph-authtool --create-keyring {{ common.prefix_dir }}/etc/ceph/ceph.client.admin.keyring --gen-key -n client.admin {%- for cap_name, cap in  common.keyring.admin.caps.iteritems() %} --cap {{ cap_name }} '{{ cap }}' {%- endfor %}"
-  - unless: "test -f {{ common.prefix_dir }}/etc/ceph/ceph.client.admin.keyring"
+  - name: "ceph-authtool --create-keyring {{ common.prefix_dir }}/etc/ceph/{{ common.get('cluster_name', 'ceph') }}.client.admin.keyring --gen-key -n client.admin {%- for cap_name, cap in  common.keyring.admin.caps.iteritems() %} --cap {{ cap_name }} '{{ cap }}' {%- endfor %}"
+  - unless: "test -f {{ common.prefix_dir }}/etc/ceph/{{ common.get('cluster_name', 'ceph') }}.client.admin.keyring"
   - require:
   {% if not common.get('container_mode', False) %}
     - pkg: common_packages
@@ -46,11 +53,11 @@
 
 {%- if loop.index0 == 0 %}
 
-{{ common.prefix_dir }}/etc/ceph/ceph.client.admin.keyring:
+{{ common.prefix_dir }}/etc/ceph/{{ common.get('cluster_name', 'ceph') }}.client.admin.keyring:
   file.managed:
   - source: salt://ceph/files/keyring
   - template: jinja
-  - unless: "test -f {{ common.prefix_dir }}/etc/ceph/ceph.client.admin.keyring"
+  - unless: "test -f {{ common.prefix_dir }}/etc/ceph/{{ common.get('cluster_name', 'ceph') }}.client.admin.keyring"
   - defaults:
       node_grains: {{ node_grains|yaml }}
   - require:
diff --git a/ceph/files/backup/ceph-backup-client-runner.sh b/ceph/files/backup/ceph-backup-client-runner.sh
index d2601eb..1dadc89 100644
--- a/ceph/files/backup/ceph-backup-client-runner.sh
+++ b/ceph/files/backup/ceph-backup-client-runner.sh
@@ -1,4 +1,4 @@
-{%- from "ceph/map.jinja" import backup, mon, osd with context -%}
+{%- from "ceph/map.jinja" import backup, mon, osd, common with context -%}
 #!/bin/bash
 # Script to backup ceph schema and create snapshot of keyspaces
 
@@ -23,10 +23,10 @@
         exit 1
     fi
 
-    if [ ! -d "$TMPDIR/ceph-$HOSTNAME" ] && [ ! -e "$TMPDIR/ceph-$HOSTNAME" ]; then
-        mkdir -p "$TMPDIR/ceph-$HOSTNAME"
+    if [ ! -d "$TMPDIR/{{ common.get('cluster_name', 'ceph') }}-$HOSTNAME" ] && [ ! -e "$TMPDIR/{{ common.get('cluster_name', 'ceph') }}-$HOSTNAME" ]; then
+        mkdir -p "$TMPDIR/{{ common.get('cluster_name', 'ceph') }}-$HOSTNAME"
     else
-        printf "Error creating temporary directory $TMPDIR/ceph-$HOSTNAME"
+        printf "Error creating temporary directory $TMPDIR/{{ common.get('cluster_name', 'ceph') }}-$HOSTNAME"
         exit 1
     fi
 
@@ -42,15 +42,15 @@
 
 {%- if osd.get('enabled', False) %}
     cp -a /etc/ceph/ $TMPDIR/
-    cp -a /var/lib/ceph/ $TMPDIR/ceph-$HOSTNAME/
+    cp -a /var/lib/ceph/ $TMPDIR/{{ common.get('cluster_name', 'ceph') }}-$HOSTNAME/
 {%- elif mon.get('enabled', False) %}
     cp -a /etc/ceph/ $TMPDIR/
     service ceph-mon@$HOSTNAME stop
-    cp -a /var/lib/ceph/ $TMPDIR/ceph-$HOSTNAME/
+    cp -a /var/lib/ceph/ $TMPDIR/{{ common.get('cluster_name', 'ceph') }}-$HOSTNAME/
     service ceph-mon@$HOSTNAME start
 {%- endif %}
 
-    tar -cvzf $BACKUPDIR/$HOSTNAME/ceph-$HOSTNAME-$TIMESTAMP.tgz $TMPDIR
+    tar -cvzf $BACKUPDIR/$HOSTNAME/{{ common.get('cluster_name', 'ceph') }}-$HOSTNAME-$TIMESTAMP.tgz $TMPDIR
     RC=$?
 
     if [ $RC -gt 0 ]; then
diff --git a/ceph/files/env b/ceph/files/env
new file mode 100644
index 0000000..d6a5e80
--- /dev/null
+++ b/ceph/files/env
@@ -0,0 +1,11 @@
+{%- from "ceph/map.jinja" import common with context %}
+# /etc/default/ceph
+#
+# Environment file for ceph daemon systemd unit files.
+#
+
+# Increase tcmalloc cache size
+TCMALLOC_MAX_TOTAL_THREAD_CACHE_BYTES=134217728
+
+# Ceph cluster name
+CLUSTER={{ common.get('cluster_name', 'ceph') }}
diff --git a/ceph/files/jewel/ceph.conf.Debian b/ceph/files/jewel/ceph.conf.Debian
index 1e4d5ea..6e8c3c3 100644
--- a/ceph/files/jewel/ceph.conf.Debian
+++ b/ceph/files/jewel/ceph.conf.Debian
@@ -102,10 +102,10 @@
 
 [client.rgw.{{ grains.host }}]
 host = {{ grains.host }}
-keyring = /etc/ceph/ceph.client.rgw.{{ grains.host }}.keyring
+keyring = /etc/ceph/{{ common.get('cluster_name', 'ceph') }}.client.rgw.{{ grains.host }}.keyring
 rgw socket path = /tmp/radosgw-{{ grains.host }}.sock
-log file = /var/log/ceph/ceph-rgw-{{ grains.host }}.log
-rgw data = /var/lib/ceph/radosgw/ceph-rgw.{{ grains.host }}
+log file = /var/log/ceph/{{ common.get('cluster_name', 'ceph') }}-rgw-{{ grains.host }}.log
+rgw data = /var/lib/ceph/radosgw/{{ common.get('cluster_name', 'ceph') }}-rgw.{{ grains.host }}
 rgw frontends = civetweb port={{ radosgw.bind.address }}:{{ radosgw.bind.port }} num_threads={{ radosgw.threads }}
 rgw dns name = {{ radosgw.get('hostname', grains.host) }}
 
diff --git a/ceph/files/keyring b/ceph/files/keyring
index a902b2f..03b9ee3 100644
--- a/ceph/files/keyring
+++ b/ceph/files/keyring
@@ -16,7 +16,7 @@
 
 [client.{{ name }}]
     key = {{ keyring.key }}
-    {%- for cap_name, cap in  keyring.caps.iteritems() %}
+    {%- for cap_name, cap in  keyring.get("caps", {}).iteritems() %}
     caps {{ cap_name }} = "{{ cap }}"
     {%- endfor %}
 
diff --git a/ceph/files/kraken/ceph.conf.Debian b/ceph/files/kraken/ceph.conf.Debian
index 1e4d5ea..6e8c3c3 100644
--- a/ceph/files/kraken/ceph.conf.Debian
+++ b/ceph/files/kraken/ceph.conf.Debian
@@ -102,10 +102,10 @@
 
 [client.rgw.{{ grains.host }}]
 host = {{ grains.host }}
-keyring = /etc/ceph/ceph.client.rgw.{{ grains.host }}.keyring
+keyring = /etc/ceph/{{ common.get('cluster_name', 'ceph') }}.client.rgw.{{ grains.host }}.keyring
 rgw socket path = /tmp/radosgw-{{ grains.host }}.sock
-log file = /var/log/ceph/ceph-rgw-{{ grains.host }}.log
-rgw data = /var/lib/ceph/radosgw/ceph-rgw.{{ grains.host }}
+log file = /var/log/ceph/{{ common.get('cluster_name', 'ceph') }}-rgw-{{ grains.host }}.log
+rgw data = /var/lib/ceph/radosgw/{{ common.get('cluster_name', 'ceph') }}-rgw.{{ grains.host }}
 rgw frontends = civetweb port={{ radosgw.bind.address }}:{{ radosgw.bind.port }} num_threads={{ radosgw.threads }}
 rgw dns name = {{ radosgw.get('hostname', grains.host) }}
 
diff --git a/ceph/files/luminous/ceph.conf.Debian b/ceph/files/luminous/ceph.conf.Debian
index 1e4d5ea..6e8c3c3 100644
--- a/ceph/files/luminous/ceph.conf.Debian
+++ b/ceph/files/luminous/ceph.conf.Debian
@@ -102,10 +102,10 @@
 
 [client.rgw.{{ grains.host }}]
 host = {{ grains.host }}
-keyring = /etc/ceph/ceph.client.rgw.{{ grains.host }}.keyring
+keyring = /etc/ceph/{{ common.get('cluster_name', 'ceph') }}.client.rgw.{{ grains.host }}.keyring
 rgw socket path = /tmp/radosgw-{{ grains.host }}.sock
-log file = /var/log/ceph/ceph-rgw-{{ grains.host }}.log
-rgw data = /var/lib/ceph/radosgw/ceph-rgw.{{ grains.host }}
+log file = /var/log/ceph/{{ common.get('cluster_name', 'ceph') }}-rgw-{{ grains.host }}.log
+rgw data = /var/lib/ceph/radosgw/{{ common.get('cluster_name', 'ceph') }}-rgw.{{ grains.host }}
 rgw frontends = civetweb port={{ radosgw.bind.address }}:{{ radosgw.bind.port }} num_threads={{ radosgw.threads }}
 rgw dns name = {{ radosgw.get('hostname', grains.host) }}
 
diff --git a/ceph/files/migration b/ceph/files/migration
new file mode 100644
index 0000000..ea977df
--- /dev/null
+++ b/ceph/files/migration
@@ -0,0 +1,98 @@
+import decapodlib
+
+import sys
+import yaml
+# usage:
+# decapod_migration.py decapod_ip decapod_user decapod_pass deploy_configuration_name_in_decapod
+
+
+def decapod_api(decapod_ip, decapod_user, decapod_pass):
+    decapod_ip = "http://" + decapod_ip + ":8081"
+    client = decapodlib.Client(decapod_ip, decapod_user, decapod_pass)
+    return client
+
+
+def get_playbook_conf(playbook_name, client):
+    for playbook in client.get_playbook_configurations()['items']:
+        if playbook['data']['name'] == playbook_name:
+            playbook_config = playbook['data']['configuration']
+
+    return playbook_config
+
+
+def main():
+    client = decapod_api(sys.argv[1], sys.argv[2], sys.argv[3])
+    config = get_playbook_conf(sys.argv[4], client)
+
+    common_yaml = {'classes': [
+        'system.linux.system.repo.mcp.extra',
+        'system.linux.system.repo.mcp.apt_mirantis.ubuntu',
+        'system.linux.system.repo.mcp.apt_mirantis.saltstack_2016_3',
+        'system.linux.system.repo.mcp.apt_mirantis.ceph'
+    ],
+        'parameters': {
+            'ceph': {
+                'common': {
+                    'public_network': '',
+                    'cluster_network': '',
+                    'version': '',
+                    'fsid': '',
+                    'config': {
+                        'global': {}
+                    }
+                }
+            }
+        }
+    }
+    include = 'cluster.' + config['global_vars']['cluster'].encode('ascii') + '.infra'
+    common_yaml['classes'].append(include)
+
+    osd_yaml = {'classes': [
+        'system.ceph.osd.cluster'
+    ],
+        'parameters': {
+            'ceph': {
+                'osd': {
+                    'enabled': 'True',
+                    'backend': {
+                        'filestore': {
+                            'disks': []
+                        }
+                    }
+                },
+            }
+        }
+    }
+
+    common_yaml['parameters']['ceph']['common']['version'] = config['global_vars']['ceph_stable_release'].encode('ascii')
+    common_yaml['parameters']['ceph']['common']['public_network'] = config['global_vars']['public_network'].encode('ascii')
+    common_yaml['parameters']['ceph']['common']['cluster_network'] = config['global_vars']['cluster_network'].encode('ascii')
+    common_yaml['parameters']['ceph']['common']['fsid'] = config['global_vars']['fsid'].encode('ascii')
+    include = 'cluster.' + config['global_vars']['cluster'].encode('ascii') + '.ceph' + '.common'
+    osd_yaml['classes'].append(include)
+
+    for key in config['global_vars']:
+        if "ceph_nfs" in key:
+            common_yaml['parameters']['ceph']['common']['config']['global'][key] = config['global_vars'][key]
+        if "radosgw_" in key:
+            common_yaml['parameters']['ceph']['common']['config']['global'][key] = config['global_vars'][key]
+
+    osd = config['inventory']['osds'][0]
+    disks = config['inventory']['_meta']['hostvars'][osd]['devices']
+    journals = \
+        config['inventory']['_meta']['hostvars'][osd]['raw_journal_devices']
+    for i in range(len(disks)):
+        disk = {
+            'dev': disks[i].encode('ascii'),
+            'journal': journals[i].encode('ascii')
+        }
+        osd_yaml['parameters']['ceph']['osd']['backend']['filestore']['disks'].append(disk)
+
+    common = yaml.safe_dump(common_yaml, default_flow_style=False)
+    osd = yaml.safe_dump(osd_yaml, default_flow_style=False)
+
+    print(common)
+    print(osd)
+
+
+main()
diff --git a/ceph/mgr.sls b/ceph/mgr.sls
index e5dbf54..58e6dba 100644
--- a/ceph/mgr.sls
+++ b/ceph/mgr.sls
@@ -5,45 +5,45 @@
 include:
 - ceph.common
 
-mon_packages:
+mgr_packages:
   pkg.installed:
   - names: {{ mgr.pkgs }}
 
-/etc/ceph/ceph.conf:
+/etc/ceph/{{ common.get('cluster_name', 'ceph') }}.conf:
   file.managed:
   - source: salt://ceph/files/{{ common.version }}/ceph.conf.{{ grains.os_family }}
   - template: jinja
   - require:
-    - pkg: mon_packages
+    - pkg: mgr_packages
 
-/var/lib/ceph/mgr/ceph-{{ grains.host }}/:
+/var/lib/ceph/mgr/{{ common.get('cluster_name', 'ceph') }}-{{ grains.host }}/:
   file.directory:
   - template: jinja
   - user: ceph
   - group: ceph
   - require:
-    - pkg: mon_packages
+    - pkg: mgr_packages
 
 reload_systemctl_daemon:
   cmd.run:
   - name: "systemctl daemon-reload"
-  - unless: "test -f /var/lib/ceph/mgr/ceph-{{ grains.host }}/keyring"
+  - unless: "test -f /var/lib/ceph/mgr/{{ common.get('cluster_name', 'ceph') }}-{{ grains.host }}/keyring"
 
 ceph_create_mgr_keyring_{{ grains.host }}:
   cmd.run:
-  - name: "ceph auth get-or-create mgr.{{ grains.host }} mon 'allow profile mgr' osd 'allow *' mds 'allow *' > /var/lib/ceph/mgr/ceph-{{ grains.host }}/keyring"
-  - unless: "test -f /var/lib/ceph/mgr/ceph-{{ grains.host }}/keyring"
+  - name: "ceph -c /etc/ceph/{{ common.get('cluster_name', 'ceph') }}.conf  auth get-or-create mgr.{{ grains.host }} mon 'allow profile mgr' osd 'allow *' mds 'allow *' > /var/lib/ceph/mgr/{{ common.get('cluster_name', 'ceph') }}-{{ grains.host }}/keyring"
+  - unless: "test -f /var/lib/ceph/mgr/{{ common.get('cluster_name', 'ceph') }}-{{ grains.host }}/keyring"
   - require:
-    - file: /var/lib/ceph/mgr/ceph-{{ grains.host }}/
+    - file: /var/lib/ceph/mgr/{{ common.get('cluster_name', 'ceph') }}-{{ grains.host }}/
 
 mgr_services:
   service.running:
     - enable: true
     - names: [ceph-mgr@{{ grains.host }}]
     - watch:
-      - file: /etc/ceph/ceph.conf
+      - file: /etc/ceph/{{ common.get('cluster_name', 'ceph') }}.conf
     - require:
-      - pkg: mon_packages
+      - pkg: mgr_packages
       - cmd: ceph_create_mgr_keyring_{{ grains.host }}
     {%- if grains.get('noservices') %}
     - onlyif: /bin/false
@@ -55,34 +55,34 @@
 
 ceph_dashboard_address:
   cmd.run:
-  - name: "ceph config-key put mgr/dashboard/{{ grains.host }}/server_addr {{ mgr.dashboard.get('host', '::') }}"
-  - unless: "ceph config-key get mgr/dashboard/{{ grains.host }}/server_addr | grep {{ mgr.dashboard.get('host', '::') }}"
+  - name: "ceph -c /etc/ceph/{{ common.get('cluster_name', 'ceph') }}.conf config-key put mgr/dashboard/{{ grains.host }}/server_addr {{ mgr.dashboard.get('host', '::') }}"
+  - unless: "ceph -c /etc/ceph/{{ common.get('cluster_name', 'ceph') }}.conf config-key get mgr/dashboard/{{ grains.host }}/server_addr | grep {{ mgr.dashboard.get('host', '::') }}"
 
 ceph_dashboard_port:
   cmd.run:
-  - name: "ceph config-key put mgr/dashboard/{{ grains.host }}/server_port {{ mgr.dashboard.get('port', '7000') }}"
-  - unless: "ceph config-key get mgr/dashboard/{{ grains.host }}/server_port | grep {{ mgr.dashboard.get('port', '7000') }}"
+  - name: "ceph -c /etc/ceph/{{ common.get('cluster_name', 'ceph') }}.conf config-key put mgr/dashboard/{{ grains.host }}/server_port {{ mgr.dashboard.get('port', '7000') }}"
+  - unless: "ceph -c /etc/ceph/{{ common.get('cluster_name', 'ceph') }}.conf config-key get mgr/dashboard/{{ grains.host }}/server_port | grep {{ mgr.dashboard.get('port', '7000') }}"
 
 ceph_restart_dashboard_plugin:
   cmd.wait:
-  - name: "ceph mgr module disable dashboard;ceph mgr module enable dashboard --force"
+  - name: "ceph -c /etc/ceph/{{ common.get('cluster_name', 'ceph') }}.conf mgr module disable dashboard;ceph -c /etc/ceph/{{ common.get('cluster_name', 'ceph') }}.conf mgr module enable dashboard --force"
   - watch:
       - cmd: ceph_dashboard_address
       - cmd: ceph_dashboard_port
 
 enable_ceph_dashboard:
   cmd.run:
-  - name: "ceph mgr module enable dashboard"
-  - unless: "ceph mgr module ls | grep dashboard"
+  - name: "ceph -c /etc/ceph/{{ common.get('cluster_name', 'ceph') }}.conf mgr module enable dashboard"
+  - unless: "ceph -c /etc/ceph/{{ common.get('cluster_name', 'ceph') }}.conf mgr module ls | grep dashboard"
 
 {%- else %}
 
 disable_ceph_dashboard:
   cmd.run:
-  - name: "ceph mgr module disable dashboard"
-  - onlyif: "ceph mgr module ls | grep dashboard"
+  - name: "ceph -c /etc/ceph/{{ common.get('cluster_name', 'ceph') }}.conf mgr module disable dashboard"
+  - onlyif: "ceph -c /etc/ceph/{{ common.get('cluster_name', 'ceph') }}.conf mgr module ls | grep dashboard"
   - require:
-    - file: /var/lib/ceph/mgr/ceph-{{ grains.host }}/
+    - file: /var/lib/ceph/mgr/{{ common.get('cluster_name', 'ceph') }}-{{ grains.host }}/
 
 {%- endif %}
 
diff --git a/ceph/migration.sls b/ceph/migration.sls
new file mode 100644
index 0000000..8c6f5ed
--- /dev/null
+++ b/ceph/migration.sls
@@ -0,0 +1,50 @@
+# This script is used only for migration from Decapod deployed Ceph envs to salt-formula-ceph managed Ceph envs.
+
+{%- if pillar.ceph.get('decapod') %}
+
+packages:
+  pkg.installed:
+  - names:
+    - git
+    - gcc
+    - libssl-dev
+    - libyaml-dev
+    - python
+    - python-pip
+    - python-setuptools
+    - python-wheel
+
+migration_script:
+  file.managed:
+  - name: /root/decapod_migration.py
+  - source: salt://ceph/files/migration
+  - require:
+    - pkg: packages
+
+git_clone_decapod:
+  cmd.run:
+  - name: "git clone -b stable-1.1 --depth 1 https://github.com/Mirantis/ceph-lcm.git /root/decapod"
+  - unless: "test -d /root/decapod"
+  - require:
+    - pkg: packages
+    - file: migration_script
+
+install_decapodlib:
+  cmd.run:
+  - name: "pip2 install /root/decapod/decapodlib"
+  - unless: "pip2 list | grep decapod"
+  - require:
+    - pkg: packages
+    - file: migration_script
+    - cmd: git_clone_decapod
+
+run_migration_script:
+  cmd.run:
+  - name: "python decapod_migration.py {{ pillar.ceph.decapod.ip }} {{ pillar.ceph.decapod.user }} {{ pillar.ceph.decapod.password }} {{ pillar.ceph.decapod.deploy_config_name }}"
+  - require:
+    - pkg: packages
+    - file: migration_script
+    - cmd: git_clone_decapod
+    - cmd: install_decapodlib
+
+{%- endif %}
diff --git a/ceph/mon.sls b/ceph/mon.sls
index 262095d..a01a487 100644
--- a/ceph/mon.sls
+++ b/ceph/mon.sls
@@ -7,7 +7,7 @@
   pkg.installed:
   - names: {{ mon.pkgs }}
 
-/etc/ceph/ceph.conf:
+/etc/ceph/{{ common.get('cluster_name', 'ceph') }}.conf:
   file.managed:
   - source: salt://ceph/files/{{ common.version }}/ceph.conf.{{ grains.os_family }}
   - template: jinja
@@ -16,15 +16,15 @@
 
 cluster_{{ grains.host }}_secret_key:
   cmd.run:
-  - name: "ceph-authtool --create-keyring /etc/ceph/ceph.mon.{{ grains.host }}.keyring --gen-key -n mon. --cap mon 'allow *'"
-  - unless: "test -f /etc/ceph/ceph.mon.{{ grains.host }}.keyring"
+  - name: "ceph-authtool --create-keyring /etc/ceph/{{ common.get('cluster_name', 'ceph') }}.mon.{{ grains.host }}.keyring --gen-key -n mon. --cap mon 'allow *'"
+  - unless: "test -f /etc/ceph/{{ common.get('cluster_name', 'ceph') }}.mon.{{ grains.host }}.keyring"
   - require:
     - pkg: mon_packages
 
 add_admin_keyring_to_mon_keyring:
   cmd.run:
-  - name: "ceph-authtool /etc/ceph/ceph.mon.{{ grains.host }}.keyring --import-keyring /etc/ceph/ceph.client.admin.keyring"
-  - unless: "test -f /var/lib/ceph/mon/ceph-{{ grains.host }}/done"
+  - name: "ceph-authtool /etc/ceph/{{ common.get('cluster_name', 'ceph') }}.mon.{{ grains.host }}.keyring --import-keyring /etc/ceph/{{ common.get('cluster_name', 'ceph') }}.client.admin.keyring"
+  - unless: "test -f /var/lib/ceph/mon/{{ common.get('cluster_name', 'ceph') }}-{{ grains.host }}/done"
   - require:
     - pkg: mon_packages
 
@@ -35,14 +35,14 @@
   - require:
     - pkg: mon_packages
 
-#/var/lib/ceph/mon/ceph-{{ grains.host }}:
+#/var/lib/ceph/mon/{{ common.get('cluster_name', 'ceph') }}-{{ grains.host }}:
 #  file.directory:
 #    - user: ceph
 #    - group: ceph
 #    - mode: 655
 #    - makedirs: True
 
-/etc/ceph/ceph.mon.{{ grains.host }}.keyring:
+/etc/ceph/{{ common.get('cluster_name', 'ceph') }}.mon.{{ grains.host }}.keyring:
   file.managed:
   - user: ceph
   - group: ceph
@@ -52,8 +52,8 @@
 
 populate_monmap:
   cmd.run:
-  - name: "sudo -u ceph ceph-mon --mkfs -i {{ grains.host }} --monmap /tmp/monmap"
-  - unless: "test -f /var/lib/ceph/mon/ceph-{{ grains.host }}/kv_backend"
+  - name: "sudo -u ceph ceph-mon -c /etc/ceph/{{ common.get('cluster_name', 'ceph') }}.conf --mkfs -i {{ grains.host }} --monmap /tmp/monmap"
+  - unless: "test -f /var/lib/ceph/mon/{{ common.get('cluster_name', 'ceph') }}-{{ grains.host }}/kv_backend"
   - require:
     - pkg: mon_packages
 
@@ -63,14 +63,14 @@
 
 cluster_secret_key:
   cmd.run:
-  - name: "ceph-authtool --create-keyring /var/lib/ceph/mon/ceph-{{ grains.host }}/keyring --gen-key -n mon. {%- for cap_name, cap in  keyring.caps.iteritems() %} --cap {{ cap_name }} '{{ cap }}' {%- endfor %}"
-  - unless: "test -f /var/lib/ceph/mon/ceph-{{ grains.host }}/done"
+  - name: "ceph-authtool --create-keyring /var/lib/ceph/mon/{{ common.get('cluster_name', 'ceph') }}-{{ grains.host }}/keyring --gen-key -n mon. {%- for cap_name, cap in  keyring.caps.iteritems() %} --cap {{ cap_name }} '{{ cap }}' {%- endfor %}"
+  - unless: "test -f /var/lib/ceph/mon/{{ common.get('cluster_name', 'ceph') }}-{{ grains.host }}/done"
   - require:
     - pkg: mon_packages
 
 cluster_secret_key_flag:
   file.managed:
-  - name: /var/lib/ceph/mon/ceph-{{ grains.host }}/done
+  - name: /var/lib/ceph/mon/{{ common.get('cluster_name', 'ceph') }}-{{ grains.host }}/done
   - user: ceph
   - group: ceph
   - content: { }
@@ -81,15 +81,15 @@
 
 {% endfor %}
 
-/var/lib/ceph/mon/ceph-{{ grains.host }}/keyring:
+/var/lib/ceph/mon/{{ common.get('cluster_name', 'ceph') }}-{{ grains.host }}/keyring:
   file.managed:
   - source: salt://ceph/files/mon_keyring
   - template: jinja
-  - unless: "test -f /var/lib/ceph/mon/ceph-{{ grains.host }}/done"
+  - unless: "test -f /var/lib/ceph/mon/{{ common.get('cluster_name', 'ceph') }}-{{ grains.host }}/done"
   - require:
     - pkg: mon_packages
 
-/var/lib/ceph/mon/ceph-{{ grains.host }}/done:
+/var/lib/ceph/mon/{{ common.get('cluster_name', 'ceph') }}-{{ grains.host }}/done:
   file.managed:
   - user: ceph
   - group: ceph
@@ -102,7 +102,7 @@
   - enable: true
   - names: [ceph-mon@{{ grains.host }}]
   - watch:
-    - file: /etc/ceph/ceph.conf
+    - file: /etc/ceph/{{ common.get('cluster_name', 'ceph') }}.conf
   - require:
     - pkg: mon_packages
   {%- if grains.get('noservices') %}
diff --git a/ceph/osd/custom.sls b/ceph/osd/custom.sls
index fb504a1..abdd6b9 100644
--- a/ceph/osd/custom.sls
+++ b/ceph/osd/custom.sls
@@ -20,8 +20,8 @@
 
 update_class_disk_{{ dev }}:
   cmd.run:
-  - name: "ceph osd crush rm-device-class osd.{{ disk_id }}; ceph osd crush set-device-class {{ disk.class }} osd.{{ disk_id }}"
-  - unless: "ceph osd tree | awk '{print $2,$4}' | grep -w osd.{{ disk_id }} | grep {{ disk.class }}"
+  - name: "ceph -c /etc/ceph/{{ common.get('cluster_name', 'ceph') }}.conf osd crush rm-device-class osd.{{ disk_id }}; ceph -c /etc/ceph/{{ common.get('cluster_name', 'ceph') }}.conf osd crush set-device-class {{ disk.class }} osd.{{ disk_id }}"
+  - unless: "ceph -c /etc/ceph/{{ common.get('cluster_name', 'ceph') }}.conf osd tree | awk '{print $2,$4}' | grep -w osd.{{ disk_id }} | grep {{ disk.class }}"
   {%- if grains.get('noservices') %}
   - onlyif: /bin/false
   {%- endif %}
@@ -34,8 +34,8 @@
 
 update_weight_disk_{{ dev }}:
   cmd.run:
-  - name: "ceph osd crush reweight osd.{{ disk_id }} {{ disk.weight }}"
-  - unless: "ceph osd tree | awk '{print $2,$3,$4}' | grep -w osd.{{ disk_id }} | grep {{ disk.weight }}"
+  - name: "ceph -c /etc/ceph/{{ common.get('cluster_name', 'ceph') }}.conf osd crush reweight osd.{{ disk_id }} {{ disk.weight }}"
+  - unless: "ceph -c /etc/ceph/{{ common.get('cluster_name', 'ceph') }}.conf osd tree | awk '{print $2,$3,$4}' | grep -w osd.{{ disk_id }} | grep {{ disk.weight }}"
   {%- if grains.get('noservices') %}
   - onlyif: /bin/false
   {%- endif %}
diff --git a/ceph/osd/setup.sls b/ceph/osd/setup.sls
index de58cf3..ee8db09 100644
--- a/ceph/osd/setup.sls
+++ b/ceph/osd/setup.sls
@@ -4,7 +4,7 @@
   pkg.installed:
   - names: {{ osd.pkgs }}
 
-/etc/ceph/ceph.conf:
+/etc/ceph/{{ common.get('cluster_name', 'ceph') }}.conf:
   file.managed:
   - source: salt://ceph/files/{{ common.version }}/ceph.conf.{{ grains.os_family }}
   - template: jinja
@@ -34,7 +34,7 @@
   - unless: "ceph-disk list | grep {{ dev }} | grep ceph"
   - require:
     - pkg: ceph_osd_packages
-    - file: /etc/ceph/ceph.conf
+    - file: /etc/ceph/{{ common.get('cluster_name', 'ceph') }}.conf
   {%- if grains.get('noservices') %}
   - onlyif: /bin/false
   {%- endif %}
@@ -47,7 +47,7 @@
   - unless: "ceph-disk list | grep {{ disk.journal }} | grep ceph"
   - require:
     - pkg: ceph_osd_packages
-    - file: /etc/ceph/ceph.conf
+    - file: /etc/ceph/{{ common.get('cluster_name', 'ceph') }}.conf
     - cmd: zap_disk_{{ dev_device }}
   {%- if grains.get('noservices') %}
   - onlyif: /bin/false
@@ -63,7 +63,7 @@
   - unless: "ceph-disk list | grep {{ disk.block_db }} | grep ceph"
   - require:
     - pkg: ceph_osd_packages
-    - file: /etc/ceph/ceph.conf
+    - file: /etc/ceph/{{ common.get('cluster_name', 'ceph') }}.conf
     - cmd: zap_disk_{{ dev_device }}
   {%- if grains.get('noservices') %}
   - onlyif: /bin/false
@@ -79,7 +79,7 @@
   - unless: "ceph-disk list | grep {{ disk.block_wal }} | grep ceph"
   - require:
     - pkg: ceph_osd_packages
-    - file: /etc/ceph/ceph.conf
+    - file: /etc/ceph/{{ common.get('cluster_name', 'ceph') }}.conf
     - cmd: zap_disk_{{ dev_device }}
   {%- if grains.get('noservices') %}
   - onlyif: /bin/false
@@ -88,6 +88,8 @@
 {%- endif %}
 
 {%- set cmd = [] %}
+{%- do cmd.append('--cluster ' + common.get('cluster_name', 'ceph')) %}
+{%- do cmd.append('--cluster-uuid ' + common.fsid) %}
 {%- if disk.get('dmcrypt', False) %}
   {%- do cmd.append('--dmcrypt') %}
   {%- do cmd.append('--dmcrypt-key-dir ' + disk.get('dmcrypt_key_dir', '/etc/ceph/dmcrypt-keys')) %}
@@ -95,7 +97,7 @@
 {%- if disk.lockbox_partition is defined %}
   {%- do cmd.append('--lockbox-partition-number ' + disk.lockbox_partition|string) %}
 {%- endif %}
-{%- do cmd.append('--prepare-key /etc/ceph/ceph.client.bootstrap-osd.keyring') %}
+{%- do cmd.append("--prepare-key /etc/ceph/" + common.get('cluster_name', 'ceph') + ".client.bootstrap-osd.keyring") %}
 {%- if disk.data_partition is defined %}
   {%- do cmd.append('--data-partition-number ' + disk.data_partition|string) %}
 {%- endif %}
@@ -166,7 +168,7 @@
   - require:
     - cmd: zap_disk_{{ dev_device }}
     - pkg: ceph_osd_packages
-    - file: /etc/ceph/ceph.conf
+    - file: /etc/ceph/{{ common.get('cluster_name', 'ceph') }}.conf
   {%- if grains.get('noservices') %}
   - onlyif: /bin/false
   {%- endif %}
@@ -179,7 +181,7 @@
     - cmd: prepare_disk_{{ dev_device }}
     - cmd: zap_disk_{{ dev_device }}
     - pkg: ceph_osd_packages
-    - file: /etc/ceph/ceph.conf
+    - file: /etc/ceph/{{ common.get('cluster_name', 'ceph') }}.conf
   {%- if grains.get('noservices') %}
   - onlyif: /bin/false
   {%- endif %}
@@ -187,16 +189,16 @@
 activate_disk_{{ dev_device }}:
   cmd.run:
 {%- if disk.get('dmcrypt', False) %}
-  - name: "ceph-disk activate --dmcrypt --activate-key /etc/ceph/ceph.client.bootstrap-osd.keyring {{ dev_device }}"
+  - name: "ceph-disk activate --dmcrypt --activate-key /etc/ceph/{{ common.get('cluster_name', 'ceph') }}.client.bootstrap-osd.keyring {{ dev_device }}"
 {%- else %}
-  - name: "ceph-disk activate --activate-key /etc/ceph/ceph.client.bootstrap-osd.keyring {{ dev_device }}"
+  - name: "ceph-disk activate --activate-key /etc/ceph/{{ common.get('cluster_name', 'ceph') }}.client.bootstrap-osd.keyring {{ dev_device }}"
 {%- endif %}
   - unless: "lsblk -p | grep {{ dev_device }} -A1 | grep -v lockbox | grep ceph | grep osd"
   - require:
     - cmd: prepare_disk_{{ dev_device }}
     - cmd: zap_disk_{{ dev_device }}
     - pkg: ceph_osd_packages
-    - file: /etc/ceph/ceph.conf
+    - file: /etc/ceph/{{ common.get('cluster_name', 'ceph') }}.conf
   {%- if grains.get('noservices') %}
   - onlyif: /bin/false
   {%- endif %}
@@ -214,7 +216,7 @@
   - enable: true
   - names: ['ceph-osd.target']
   - watch:
-    - file: /etc/ceph/ceph.conf
+    - file: /etc/ceph/{{ common.get('cluster_name', 'ceph') }}.conf
   {%- if grains.get('noservices') %}
   - onlyif: /bin/false
   {%- endif %}
@@ -224,7 +226,7 @@
   - enable: true
   - names: ['ceph.target']
   - watch:
-    - file: /etc/ceph/ceph.conf
+    - file: /etc/ceph/{{ common.get('cluster_name', 'ceph') }}.conf
   {%- if grains.get('noservices') %}
   - onlyif: /bin/false
   {%- endif %}
diff --git a/ceph/radosgw.sls b/ceph/radosgw.sls
index d941039..5a3ff5d 100644
--- a/ceph/radosgw.sls
+++ b/ceph/radosgw.sls
@@ -17,10 +17,10 @@
   - require:
     - pkg: ceph_radosgw_packages
 
-/var/lib/ceph/radosgw/ceph-radosgw.gateway/done:
+/var/lib/ceph/radosgw/{{ common.get('cluster_name', 'ceph') }}-radosgw.gateway/done:
   file.touch:
   - makedirs: true
-  - unless: "test -f /var/lib/ceph/radosgw/ceph-radosgw.gateway/done"
+  - unless: "test -f /var/lib/ceph/radosgw/{{ common.get('cluster_name', 'ceph') }}-radosgw.gateway/done"
   - require:
     - pkg: ceph_radosgw_packages
 
@@ -30,7 +30,7 @@
   - enable: True
   - require:
     - pkg: ceph_radosgw_packages
-    - file: /var/lib/ceph/radosgw/ceph-radosgw.gateway/done
+    - file: /var/lib/ceph/radosgw/{{ common.get('cluster_name', 'ceph') }}-radosgw.gateway/done
   - watch:
     - file: /etc/ceph/ceph.conf
   {%- if grains.get('noservices') %}
diff --git a/ceph/setup/crush.sls b/ceph/setup/crush.sls
index 704208d..427ec75 100644
--- a/ceph/setup/crush.sls
+++ b/ceph/setup/crush.sls
@@ -1,4 +1,4 @@
-{%- from "ceph/map.jinja" import setup with context %}
+{%- from "ceph/map.jinja" import setup, common with context %}
 
 /etc/ceph/crushmap:
   file.managed:
@@ -15,7 +15,7 @@
 
 ceph_enforce_crush_map:
   cmd.run:
-  - name: ceph osd setcrushmap -i /etc/ceph/crushmap.compiled
+  - name: ceph -c /etc/ceph/{{ common.get('cluster_name', 'ceph') }}.conf osd setcrushmap -i /etc/ceph/crushmap.compiled
   - unless: "test -f /etc/ceph/crushmap.enforced"
   - require:
     - cmd: ceph_compile_crush_map
@@ -43,15 +43,15 @@
 
 ceph_pool_option_{{ pool_name }}_crush_ruleset:
   cmd.run:
-  - name: ceph osd pool set {{ pool_name }} crush_ruleset {{ option_value }}
-  - unless: "ceph osd pool get {{ pool_name }} crush_ruleset | grep 'crush_ruleset: {{ option_value }}'"
+  - name: ceph -c /etc/ceph/{{ common.get('cluster_name', 'ceph') }}.conf osd pool set {{ pool_name }} crush_ruleset {{ option_value }}
+  - unless: "ceph -c /etc/ceph/{{ common.get('cluster_name', 'ceph') }}.conf osd pool get {{ pool_name }} crush_ruleset | grep 'crush_ruleset: {{ option_value }}'"
 
 {%- else %}
 
 ceph_pool_option_{{ pool_name }}_{{ option_name }}:
   cmd.run:
-  - name: ceph osd pool set {{ pool_name }} {{ option_name }} {{ option_value }}
-  - unless: "ceph osd pool get {{ pool_name }} {{ option_name }} | grep '{{ option_name }}: {{ option_value }}'"
+  - name: ceph -c /etc/ceph/{{ common.get('cluster_name', 'ceph') }}.conf osd pool set {{ pool_name }} {{ option_name }} {{ option_value }}
+  - unless: "ceph -c /etc/ceph/{{ common.get('cluster_name', 'ceph') }}.conf osd pool get {{ pool_name }} {{ option_name }} | grep '{{ option_name }}: {{ option_value }}'"
 
 {%- endif %}
 
diff --git a/ceph/setup/keyring.sls b/ceph/setup/keyring.sls
index aeafc70..b434cbd 100644
--- a/ceph/setup/keyring.sls
+++ b/ceph/setup/keyring.sls
@@ -15,7 +15,7 @@
 
 {%- if keyring.name != 'admin' and keyring.key is defined and common.get("manage_keyring", False) %}
 
-{{ common.prefix_dir }}/etc/ceph/ceph.client.{{ keyring.name }}.keyring:
+{{ common.prefix_dir }}/etc/ceph/{{ common.get('cluster_name', 'ceph') }}.client.{{ keyring.name }}.keyring:
   file.managed:
   - source: salt://ceph/files/keyring
   - template: jinja
@@ -25,16 +25,16 @@
 
 ceph_import_keyring_{{ keyring.name }}:
   cmd.run:
-  - name: "ceph auth import -i {{ common.prefix_dir }}/etc/ceph/ceph.client.{{ keyring.name }}.keyring"
+  - name: "ceph -c /etc/ceph/{{ common.get('cluster_name', 'ceph') }}.conf auth import -i {{ common.prefix_dir }}/etc/ceph/{{ common.get('cluster_name', 'ceph') }}.client.{{ keyring.name }}.keyring"
   - onchanges:
-    - file: {{ common.prefix_dir }}/etc/ceph/ceph.client.{{ keyring.name }}.keyring
+    - file: {{ common.prefix_dir }}/etc/ceph/{{ common.get('cluster_name', 'ceph') }}.client.{{ keyring.name }}.keyring
 
 {%- elif keyring.name != 'admin' %}
 
 ceph_create_keyring_{{ keyring.name }}:
   cmd.run:
-  - name: "ceph auth get-or-create client.{{ keyring.name }} {%- for cap_name, cap in  keyring.caps.iteritems() %} {{ cap_name }} '{{ cap }}' {%- endfor %} > {{ common.prefix_dir }}/etc/ceph/ceph.client.{{ keyring.name }}.keyring"
-  - unless: "test -f {{ common.prefix_dir }}/etc/ceph/ceph.client.{{ keyring.name }}.keyring"
+  - name: "ceph -c /etc/ceph/{{ common.get('cluster_name', 'ceph') }}.conf auth get-or-create client.{{ keyring.name }} {%- for cap_name, cap in  keyring.caps.iteritems() %} {{ cap_name }} '{{ cap }}' {%- endfor %} > {{ common.prefix_dir }}/etc/ceph/{{ common.get('cluster_name', 'ceph') }}.client.{{ keyring.name }}.keyring"
+  - unless: "test -f {{ common.prefix_dir }}/etc/ceph/{{ common.get('cluster_name', 'ceph') }}.client.{{ keyring.name }}.keyring"
 
 {%- endif %}
 
@@ -42,7 +42,7 @@
 
 {%- if keyring_name != 'admin' and keyring.key is defined and common.get("manage_keyring", False) %}
 
-{{ common.prefix_dir }}/etc/ceph/ceph.client.{{ keyring_name }}.keyring:
+{{ common.prefix_dir }}/etc/ceph/{{ common.get('cluster_name', 'ceph') }}.client.{{ keyring_name }}.keyring:
   file.managed:
   - source: salt://ceph/files/keyring
   - template: jinja
@@ -52,16 +52,16 @@
 
 ceph_import_keyring_{{ keyring_name }}:
   cmd.run:
-  - name: "ceph auth import -i {{ common.prefix_dir }}/etc/ceph/ceph.client.{{ keyring_name }}.keyring"
+  - name: "ceph -c /etc/ceph/{{ common.get('cluster_name', 'ceph') }}.conf auth import -i {{ common.prefix_dir }}/etc/ceph/{{ common.get('cluster_name', 'ceph') }}.client.{{ keyring_name }}.keyring"
   - onchanges:
-    - file: {{ common.prefix_dir }}/etc/ceph/ceph.client.{{ keyring_name }}.keyring
+    - file: {{ common.prefix_dir }}/etc/ceph/{{ common.get('cluster_name', 'ceph') }}.client.{{ keyring_name }}.keyring
 
 {%- elif keyring_name != 'admin' %}
 
 ceph_create_keyring_{{ keyring_name }}:
   cmd.run:
-  - name: "ceph auth get-or-create client.{{ keyring_name }} {%- for cap_name, cap in  keyring.caps.iteritems() %} {{ cap_name }} '{{ cap }}' {%- endfor %} > {{ common.prefix_dir }}/etc/ceph/ceph.client.{{ keyring_name }}.keyring"
-  - unless: "test -f {{ common.prefix_dir }}/etc/ceph/ceph.client.{{ keyring_name }}.keyring"
+  - name: "ceph -c /etc/ceph/{{ common.get('cluster_name', 'ceph') }}.conf auth get-or-create client.{{ keyring_name }} {%- for cap_name, cap in  keyring.caps.iteritems() %} {{ cap_name }} '{{ cap }}' {%- endfor %} > {{ common.prefix_dir }}/etc/ceph/{{ common.get('cluster_name', 'ceph') }}.client.{{ keyring_name }}.keyring"
+  - unless: "test -f {{ common.prefix_dir }}/etc/ceph/{{ common.get('cluster_name', 'ceph') }}.client.{{ keyring_name }}.keyring"
 
 {%- endif %}
 
diff --git a/ceph/setup/pool.sls b/ceph/setup/pool.sls
index 35c7a08..984e540 100644
--- a/ceph/setup/pool.sls
+++ b/ceph/setup/pool.sls
@@ -1,4 +1,4 @@
-{%- from "ceph/map.jinja" import setup with context %}
+{%- from "ceph/map.jinja" import setup, common with context %}
 
 {% set ceph_version = pillar.ceph.common.version %}
 
@@ -6,15 +6,15 @@
 
 ceph_pool_create_{{ pool_name }}:
   cmd.run:
-  - name: ceph osd pool create {{ pool_name }} {{ pool.pg_num }}{% if pool.pgp_num is defined %} {{ pool.pgp_num }}{% endif %} {{ pool.type }}{% if pool.erasure_code_profile is defined %} {{ pool.erasure_code_profile }}{% endif %}{% if pool.crush_ruleset_name is defined %} {{ pool.crush_ruleset_name }}{% endif %}{% if pool.expected_num_objects is defined %} {{ pool.expected_num_objects }}{% endif %}
-  - unless: "ceph osd pool ls | grep \"^{{ pool_name }}$\""
+  - name: ceph -c /etc/ceph/{{ common.get('cluster_name', 'ceph') }}.conf osd pool create {{ pool_name }} {{ pool.pg_num }}{% if pool.pgp_num is defined %} {{ pool.pgp_num }}{% endif %} {{ pool.type }}{% if pool.erasure_code_profile is defined %} {{ pool.erasure_code_profile }}{% endif %}{% if pool.crush_ruleset_name is defined %} {{ pool.crush_ruleset_name }}{% endif %}{% if pool.expected_num_objects is defined %} {{ pool.expected_num_objects }}{% endif %}
+  - unless: "ceph -c /etc/ceph/{{ common.get('cluster_name', 'ceph') }}.conf osd pool ls | grep \"^{{ pool_name }}$\""
 
 {# We need to ensure pg_num is applied first #}
 {%- if pool.get('pg_num') %}
 ceph_pool_option_{{ pool_name }}_pg_num_first:
   cmd.run:
-  - name: ceph osd pool set {{ pool_name }} pg_num {{ pool.get('pg_num') }}
-  - unless: "ceph osd pool get {{ pool_name }} pg_num | grep 'pg_num: {{ pool.get('pg_num') }}'"
+  - name: ceph -c /etc/ceph/{{ common.get('cluster_name', 'ceph') }}.conf osd pool set {{ pool_name }} pg_num {{ pool.get('pg_num') }}
+  - unless: "ceph -c /etc/ceph/{{ common.get('cluster_name', 'ceph') }}.conf osd pool get {{ pool_name }} pg_num | grep 'pg_num: {{ pool.get('pg_num') }}'"
 {%- endif %}
 
 {%- for option_name, option_value in pool.iteritems() %}
@@ -23,15 +23,15 @@
 
 ceph_pool_{{ pool_name }}_enable_{{ option_name }}:
   cmd.run:
-  - name: ceph osd pool {{ option_name }} enable {{ pool_name }} {{ option_value }}
-  - unless: "ceph osd pool {{ option_name }} get {{ pool_name }} | grep '{{ option_value }}'"
+  - name: ceph -c /etc/ceph/{{ common.get('cluster_name', 'ceph') }}.conf osd pool {{ option_name }} enable {{ pool_name }} {{ option_value }}
+  - unless: "ceph -c /etc/ceph/{{ common.get('cluster_name', 'ceph') }}.conf osd pool {{ option_name }} get {{ pool_name }} | grep '{{ option_value }}'"
 
 {%- elif option_name not in ['type', 'pg_num', 'application', 'crush_rule'] %}
 
 ceph_pool_option_{{ pool_name }}_{{ option_name }}:
   cmd.run:
-  - name: ceph osd pool set {{ pool_name }} {{ option_name }} {{ option_value }}
-  - unless: "ceph osd pool get {{ pool_name }} {{ option_name }} | grep '{{ option_name }}: {{ option_value }}'"
+  - name: ceph -c /etc/ceph/{{ common.get('cluster_name', 'ceph') }}.conf osd pool set {{ pool_name }} {{ option_name }} {{ option_value }}
+  - unless: "ceph -c /etc/ceph/{{ common.get('cluster_name', 'ceph') }}.conf osd pool get {{ pool_name }} {{ option_name }} | grep '{{ option_name }}: {{ option_value }}'"
 
 {%- endif %}
 
diff --git a/tests/pillar/ceph_mon_single.sls b/tests/pillar/ceph_mon_single.sls
index a165407..05ed121 100644
--- a/tests/pillar/ceph_mon_single.sls
+++ b/tests/pillar/ceph_mon_single.sls
@@ -1,6 +1,7 @@
 ceph:
   common:
     version: kraken
+    cluster_name: ceph
     config:
       global:
         param1: value1