Merge tag '2016.4.1' into debian/unstable

2016.4.1
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..1bfce6e
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,4 @@
+tests/build/
+*.swp
+*.pyc
+.ropeproject
\ No newline at end of file
diff --git a/.gitreview b/.gitreview
new file mode 100644
index 0000000..2f2761c
--- /dev/null
+++ b/.gitreview
@@ -0,0 +1,4 @@
+[gerrit]
+host=review.openstack.org
+port=29418
+project=openstack/salt-formula-cinder.git
diff --git a/CHANGELOG.rst b/CHANGELOG.rst
new file mode 100644
index 0000000..d43d804
--- /dev/null
+++ b/CHANGELOG.rst
@@ -0,0 +1,10 @@
+cinder formula
+==============
+
+2016.4.1 (2016-04-15)
+
+- second release
+
+0.0.1 (2015-08-03)
+
+- Initial formula setup
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 0000000..68c771a
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,176 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
diff --git a/Makefile b/Makefile
new file mode 100644
index 0000000..fc83783
--- /dev/null
+++ b/Makefile
@@ -0,0 +1,26 @@
+DESTDIR=/
+SALTENVDIR=/usr/share/salt-formulas/env
+RECLASSDIR=/usr/share/salt-formulas/reclass
+FORMULANAME=$(shell grep name: metadata.yml|head -1|cut -d : -f 2|grep -Eo '[a-z0-9\-]*')
+
+all:
+	@echo "make install - Install into DESTDIR"
+	@echo "make test    - Run tests"
+	@echo "make clean   - Cleanup after tests run"
+
+install:
+	# Formula
+	[ -d $(DESTDIR)/$(SALTENVDIR) ] || mkdir -p $(DESTDIR)/$(SALTENVDIR)
+	cp -a $(FORMULANAME) $(DESTDIR)/$(SALTENVDIR)/
+	[ ! -d _modules ] || cp -a _modules $(DESTDIR)/$(SALTENVDIR)/
+	[ ! -d _states ] || cp -a _states $(DESTDIR)/$(SALTENVDIR)/ || true
+	# Metadata
+	[ -d $(DESTDIR)/$(RECLASSDIR)/service/$(FORMULANAME) ] || mkdir -p $(DESTDIR)/$(RECLASSDIR)/service/$(FORMULANAME)
+	cp -a metadata/service/* $(DESTDIR)/$(RECLASSDIR)/service/$(FORMULANAME)
+
+test:
+	[ ! -d tests ] || (cd tests; ./run_tests.sh)
+
+clean:
+	[ ! -d tests/build ] || rm -rf tests/build
+	[ ! -d build ] || rm -rf build
diff --git a/README.rst b/README.rst
new file mode 100644
index 0000000..6d6eeb0
--- /dev/null
+++ b/README.rst
@@ -0,0 +1,332 @@
+==============================
+Openstack Cinder Block Storage
+==============================
+
+Cinder provides an infrastructure for managing volumes in OpenStack. It was originally a Nova component called nova-volume, but has become an independent project since the Folsom release.
+
+Sample pillars
+==============
+
+New structure divides cinder-api,cinder-scheduler to role controller and cinder-volume to role volume.
+
+.. code-block:: yaml
+
+    cinder:
+      controller:
+        enabled: true
+        version: juno
+        default_volume_type: 7k2SaS
+        database:
+          engine: mysql
+          host: 127.0.0.1
+          port: 3306
+          name: cinder
+          user: cinder
+          password: pwd
+        identity:
+          engine: keystone
+          host: 127.0.0.1
+          port: 35357
+          tenant: service
+          user: cinder
+          password: pwd
+        message_queue:
+          engine: rabbitmq
+          host: 127.0.0.1
+          port: 5672
+          user: openstack
+          password: pwd
+          virtual_host: '/openstack'
+        backend:
+          7k2_SAS:
+            engine: storwize
+            type_name: slow-disks
+            host: 192.168.0.1
+            port: 22
+            user: username
+            password: pass
+            connection: FC/iSCSI
+            multihost: true
+            multipath: true
+            pool: SAS7K2
+
+    cinder:
+      volume:
+        enabled: true
+        version: juno
+        default_volume_type: 7k2SaS
+        database:
+          engine: mysql
+          host: 127.0.0.1
+          port: 3306
+          name: cinder
+          user: cinder
+          password: pwd
+        identity:
+          engine: keystone
+          host: 127.0.0.1
+          port: 35357
+          tenant: service
+          user: cinder
+          password: pwd
+        message_queue:
+          engine: rabbitmq
+          host: 127.0.0.1
+          port: 5672
+          user: openstack
+          password: pwd
+          virtual_host: '/openstack'
+        backend:
+          7k2_SAS:
+            engine: storwize
+            type_name: 7k2 SAS disk
+            host: 192.168.0.1
+            port: 22
+            user: username
+            password: pass
+            connection: FC/iSCSI
+            multihost: true
+            multipath: true
+            pool: SAS7K2
+
+Cinder setup with zeroing deleted volumes
+
+    cinder:
+      controller:
+        enabled: true
+        wipe_method: zero
+        ...
+
+Cinder setup with shreding deleted volumes
+
+.. code-block:: yaml
+
+    cinder:
+      controller:
+        enabled: true
+        wipe_method: shred
+        ...
+
+
+Default Cinder setup with iSCSI target
+
+.. code-block:: yaml
+
+    cinder:
+      controller:
+        enabled: true
+        version: mitaka
+        default_volume_type: lvmdriver-1
+        database:
+          engine: mysql
+          host: 127.0.0.1
+          port: 3306
+          name: cinder
+          user: cinder
+          password: pwd
+        identity:
+          engine: keystone
+          host: 127.0.0.1
+          port: 35357
+          tenant: service
+          user: cinder
+          password: pwd
+        message_queue:
+          engine: rabbitmq
+          host: 127.0.0.1
+          port: 5672
+          user: openstack
+          password: pwd
+          virtual_host: '/openstack'
+        backend:
+          lvmdriver-1:
+            engine: lvm
+            type_name: lvmdriver-1
+            volume_group: cinder-volume
+
+Cinder setup for IBM Storwize
+
+.. code-block:: yaml
+
+    cinder:
+      volume:
+        enabled: true
+        backend:
+          7k2_SAS:
+            engine: storwize
+            type_name: 7k2 SAS disk
+            host: 192.168.0.1
+            port: 22
+            user: username
+            password: pass
+            connection: FC/iSCSI
+            multihost: true
+            multipath: true
+            pool: SAS7K2
+          10k_SAS:
+            engine: storwize
+            type_name: 10k SAS disk
+            host: 192.168.0.1
+            port: 22
+            user: username
+            password: pass
+            connection: FC/iSCSI
+            multihost: true
+            multipath: true
+            pool: SAS10K
+          15k_SAS:
+            engine: storwize
+            type_name: 15k SAS
+            host: 192.168.0.1
+            port: 22
+            user: username
+            password: pass
+            connection: FC/iSCSI
+            multihost: true
+            multipath: true
+            pool: SAS15K
+
+Cinder setup with Hitachi VPS
+
+.. code-block:: yaml
+
+    cinder:
+      controller:
+        enabled: true
+        backend:
+          hus100_backend:
+            type_name: HUS100
+            backend: hus100_backend
+            engine: hitachi_vsp
+            connection: FC
+
+Cinder setup with CEPH
+
+.. code-block:: yaml
+
+    cinder:
+      controller:
+        enabled: true
+        backend:
+          ceph_backend:
+            type_name: standard-iops
+            backend: ceph_backend
+            pool: volumes
+            engine: ceph
+            user: cinder
+            secret_uuid: da74ccb7-aa59-1721-a172-0006b1aa4e3e
+            client_cinder_key: AQDOavlU6BsSJhAAnpFR906mvdgdfRqLHwu0Uw==
+
+http://ceph.com/docs/master/rbd/rbd-openstack/
+
+
+Cinder setup with HP3par
+
+.. code-block:: yaml
+
+    cinder:
+      controller:
+        enabled: true
+        backend:
+          hp3par_backend:
+            type_name: hp3par
+            backend: hp3par_backend
+            user: hp3paruser
+            password: something
+            url: http://10.10.10.10/api/v1
+            cpg: OpenStackCPG
+            host: 10.10.10.10
+            login: hp3paradmin
+            sanpassword: something
+            debug: True
+            snapcpg: OpenStackSNAPCPG
+
+Cinder setup with Fujitsu Eternus
+
+.. code-block:: yaml
+
+    cinder:
+      volume:
+        enabled: true
+        backend:
+          10kThinPro:
+            type_name: 10kThinPro
+            engine: fujitsu
+            pool: 10kThinPro
+            host: 192.168.0.1
+            port: 5988
+            user: username
+            password: pass
+            connection: FC/iSCSI
+            name: 10kThinPro
+          10k_SAS:
+            type_name: 10k_SAS
+            pool: SAS10K
+            engine: fujitsu
+            host: 192.168.0.1
+            port: 5988
+            user: username
+            password: pass
+            connection: FC/iSCSI
+            name: 10k_SAS
+
+Cinder setup with IBM GPFS filesystem
+
+.. code-block:: yaml
+
+    cinder:
+      volume:
+        enabled: true
+        backend:
+          GPFS-GOLD:
+            type_name: GPFS-GOLD
+            engine: gpfs
+            mount_point: '/mnt/gpfs-openstack/cinder/gold'
+          GPFS-SILVER:
+            type_name: GPFS-SILVER
+            engine: gpfs
+            mount_point: '/mnt/gpfs-openstack/cinder/silver'
+  
+Cinder setup with HP LeftHand
+
+.. code-block:: yaml
+
+    cinder:
+      volume:
+        enabled: true
+        backend:
+          HP-LeftHand:
+            type_name: normal-storage
+            engine: hp_lefthand
+            api_url: 'https://10.10.10.10:8081/lhos'
+            username: user
+            password: password
+            clustername: cluster1
+            iscsi_chap_enabled: false
+
+Extra parameters for HP LeftHand
+
+.. code-block:: yaml
+
+    cinder type-key normal-storage set hplh:data_pl=r-10-2 hplh:provisioning=full 
+
+Cinder setup with Solidfire
+
+.. code-block:: yaml
+
+    cinder:
+      volume:
+        enabled: true
+        backend:
+          solidfire:
+            type_name: normal-storage
+            engine: solidfire
+            san_ip: 10.10.10.10
+            san_login: user
+            san_password: password
+            clustername: cluster1
+            sf_emulate_512: false
+## Read more
+
+* https://wiki.openstack.org/wiki/Cinder
+* http://docs.openstack.org/juno/config-reference/content/hitachi-configuration.html
diff --git a/VERSION b/VERSION
new file mode 100644
index 0000000..1bad316
--- /dev/null
+++ b/VERSION
@@ -0,0 +1 @@
+2016.4.1
diff --git a/cinder/controller.sls b/cinder/controller.sls
new file mode 100644
index 0000000..a890055
--- /dev/null
+++ b/cinder/controller.sls
@@ -0,0 +1,79 @@
+{%- from "cinder/map.jinja" import controller with context %}
+{%- if controller.enabled %}
+
+include:
+- cinder.user
+
+cinder_controller_packages:
+  pkg.installed:
+  - names: {{ controller.pkgs }}
+
+/etc/cinder/cinder.conf:
+  file.managed:
+  - source: salt://cinder/files/{{ controller.version }}/cinder.conf.controller.{{ grains.os_family }}
+  - template: jinja
+  - require:
+    - pkg: cinder_controller_packages
+
+/etc/cinder/api-paste.ini:
+  file.managed:
+  - source: salt://cinder/files/{{ controller.version }}/api-paste.ini.controller.{{ grains.os_family }}
+  - template: jinja
+  - require:
+    - pkg: cinder_controller_packages
+
+cinder_controller_services:
+  service.running:
+  - names: {{ controller.services }}
+  - enable: true
+  - watch:
+    - file: /etc/cinder/cinder.conf
+    - file: /etc/cinder/api-paste.ini
+
+cinder_syncdb:
+  cmd.run:
+  - name: cinder-manage db sync
+  - require:
+    - service: cinder_controller_services
+
+{# new way #}
+
+{%- for backend_name, backend in controller.get('backend', {}).iteritems() %}
+
+cinder_type_create_{{ backend_name }}:
+  cmd.run:
+  - name: "source /root/keystonerc; cinder type-create {{ backend.type_name }}"
+  - unless: "source /root/keystonerc; cinder type-list | grep {{ backend.type_name }}"
+  - require:
+    - service: cinder_controller_services
+
+cinder_type_update_{{ backend_name }}:
+  cmd.run:
+  - name: "source /root/keystonerc; cinder type-key {{ backend.type_name }} set volume_backend_name={{ backend_name }}"
+  - unless: "source /root/keystonerc; cinder extra-specs-list | grep \"{u'volume_backend_name': u'{{ backend_name }}'}\""
+  - require:
+    - cmd: cinder_type_create_{{ backend_name }}
+
+{%- endfor %}
+
+{# old way #}
+
+{% for type in controller.get('types', []) %}
+
+cinder_type_create_{{ type.name }}:
+  cmd.run:
+  - name: "source /root/keystonerc; cinder type-create {{ type.name }}"
+  - unless: "source /root/keystonerc; cinder type-list | grep {{ type.name }}"
+  - require:
+    - service: cinder_controller_services
+
+cinder_type_update_{{ type.name }}:
+  cmd.run:
+  - name: "source /root/keystonerc; cinder type-key {{ type.name }} set volume_backend_name={{ type.get('backend', type.name) }}"
+  - unless: "source /root/keystonerc; cinder extra-specs-list | grep \"{u'volume_backend_name': u'{{ type.get('backend', type.name) }}'}\""
+  - require:
+    - cmd: cinder_type_create_{{ type.name }}
+
+{% endfor %}
+
+{%- endif %}
diff --git a/cinder/files/backend/_ceph.conf b/cinder/files/backend/_ceph.conf
new file mode 100644
index 0000000..42151b0
--- /dev/null
+++ b/cinder/files/backend/_ceph.conf
@@ -0,0 +1,47 @@
+
+[{{ backend_name }}]
+volume_backend_name={{ backend_name }}
+volume_driver = cinder.volume.drivers.rbd.RBDDriver
+#
+# Options defined in cinder.volume.drivers.rbd
+#
+# The RADOS pool where rbd volumes are stored (string value)
+#rbd_pool=volumes
+rbd_pool={{ backend.pool }}
+
+# The RADOS client name for accessing rbd volumes - only set
+# when using cephx authentication (string value)
+#rbd_user=cinder
+rbd_user={{ backend.user }}
+
+# Path to the ceph configuration file (string value)
+#rbd_ceph_conf=
+rbd_ceph_conf=/etc/ceph/ceph.conf
+
+# Flatten volumes created from snapshots to remove dependency
+# from volume to snapshot (boolean value)
+#rbd_flatten_volume_from_snapshot=false
+
+# The libvirt uuid of the secret for the rbd_user volumes
+# (string value)
+#rbd_secret_uuid=da74ccb7-aa59-1721-a172-0006b1aa4e3e
+rbd_secret_uuid={{ backend.secret_uuid }}
+
+# Directory where temporary image files are stored when the
+# volume driver does not write them directly to the volume.
+# (string value)
+#volume_tmp_dir=<None>
+
+# Maximum number of nested volume clones that are taken before
+# a flatten occurs. Set to 0 to disable cloning. (integer
+# value)
+#rbd_max_clone_depth=5
+
+# Volumes will be chunked into objects of this size (in
+# megabytes). (integer value)
+#rbd_store_chunk_size=4
+
+# Timeout value (in seconds) used when connecting to ceph
+# cluster. If value < 0, no timeout is set and default
+# librados value is used. (integer value)
+#rados_connect_timeout=-1
\ No newline at end of file
diff --git a/cinder/files/backend/_fujitsu.conf b/cinder/files/backend/_fujitsu.conf
new file mode 100644
index 0000000..c9e8245
--- /dev/null
+++ b/cinder/files/backend/_fujitsu.conf
@@ -0,0 +1,5 @@
+
+[{{ backend_name }}]
+volume_backend_name={{ backend_name }}
+volume_driver=cinder.volume.drivers.fujitsu.fujitsu_eternus_dx_fc.FJDXFCDriver
+cinder_eternus_config_file=/etc/cinder/cinder_fujitsu_eternus_dx_{{ backend_name }}.xml
\ No newline at end of file
diff --git a/cinder/files/backend/_gpfs.conf b/cinder/files/backend/_gpfs.conf
new file mode 100644
index 0000000..3000c39
--- /dev/null
+++ b/cinder/files/backend/_gpfs.conf
@@ -0,0 +1,9 @@
+
+[{{ backend_name }}]
+volume_backend_name={{ backend_name }}
+volume_driver = cinder.volume.drivers.ibm.gpfs.GPFSDriver
+gpfs_mount_point_base={{ backend.mount_point }}
+#gpfs_mount_point_base=/mnt/gpfs-openstack/cinder/gold
+gpfs_max_clone_depth=3
+gpfs_sparse_volumes=true
+gpfs_storage_pool=system
\ No newline at end of file
diff --git a/cinder/files/backend/_hitachi_vsp.conf b/cinder/files/backend/_hitachi_vsp.conf
new file mode 100644
index 0000000..0f6ae75
--- /dev/null
+++ b/cinder/files/backend/_hitachi_vsp.conf
@@ -0,0 +1,64 @@
+
+[{{ backend_name }}]
+
+volume_backend_name={{ backend_name }}
+volume_driver = cinder.volume.drivers.hitachi.hbsd.hbsd_fc.HBSDFCDriver
+
+#
+# Options defined in cinder.volume.drivers.hitachi.hbsd_common
+#
+# Serial number of storage system (string value)
+#hitachi_serial_number=<None>
+#hitachi_serial_number=86644
+hitachi_storage_id={{ backend.storage_id }}
+
+# Pool ID of storage system (integer value)
+#hitachi_pool_id=<None>
+hitachi_pool={{ backend.pool_id }}
+
+# Thin pool ID of storage system (integer value)
+hitachi_thin_pool={{ backend.thin_pool_id }}
+
+# Default copy method of storage system (string value)
+#hitachi_default_copy_method=FULL
+hitachi_default_copy_method=THIN
+
+# Control port names for HostGroup or iSCSI Target (string
+# value)
+#hitachi_target_ports=<None>
+hitachi_target_ports={{ backend.target_ports }}
+
+hitachi_compute_target_ports={{ backend.compute_target_ports }}
+
+# Range of group number (string value)
+#hitachi_group_range=
+#hitachi_group_range=
+
+# Request for creating HostGroup or iSCSI Target (boolean
+# value)
+#JPA
+hitachi_group_request=True
+#hitachi_group_request=false
+
+# Instance numbers for HORCM (string value)
+#hitachi_horcm_numbers=200,201
+hitachi_horcm_numbers=0,1
+
+# Username of storage system for HORCM (string value)
+#hitachi_horcm_user=<None>
+#hitachi_horcm_user=openstack
+hitachi_horcm_user={{ backend.user }}
+
+# Password of storage system for HORCM (string value)
+#hitachi_horcm_password=<None>
+#hitachi_horcm_password=avg2014
+hitachi_horcm_password={{ backend.password }}
+
+# Add to HORCM configuration (boolean value)
+#hitachi_horcm_add_conf=true
+hitachi_horcm_add_conf=true
+
+#hitachi multipath advice
+use_multipath_for_image_xfer=false
+
+hitachi_storage_cli=HORCM
\ No newline at end of file
diff --git a/cinder/files/backend/_hp3par.conf b/cinder/files/backend/_hp3par.conf
new file mode 100644
index 0000000..f28a4ba
--- /dev/null
+++ b/cinder/files/backend/_hp3par.conf
@@ -0,0 +1,53 @@
+
+[{{ backend_name }}]
+volume_backend_name={{ backend_name }}
+hp3par_api_url={{ backend.url }}
+
+# 3PAR Super user username
+hp3par_username={{ backend.storage.user }}
+
+# 3PAR Super user password
+hp3par_password={{ backend.password }}
+
+# 3PAR CPG to use for volume creation
+hp3par_cpg={{ backend.cpg }}
+
+# IP address of SAN volume for SSH access to the array
+san_ip={{ backend.host }}
+
+# Username for SAN volume for SSH access to the array
+san_login={{ backend.login }}
+
+# Password for SAN volume for SSH access to the array
+san_password={{ backend.password }}
+
+# FIBRE CHANNEL(uncomment the next line to enable the FC driver)
+volume_driver=cinder.volume.drivers.san.hp.hp_3par_fc.HP3PARFCDriver
+
+# iSCSI (uncomment the next line to enable the iSCSI driver and
+# hp3par_iscsi_ips or iscsi_ip_address)
+#volume_driver=cinder.volume.drivers.san.hp.hp_3par_iscsi.HP3PARISCSIDriver
+
+# iSCSI multiple port configuration
+# hp3par_iscsi_ips=10.10.220.253:3261,10.10.222.234
+#hp3par_iscsi_ips=10.10.103.151
+
+# Still available for single port iSCSI configuration
+#iscsi_ip_address=10.10.103.151
+
+## OPTIONAL SETTINGS
+# Enable HTTP debugging to 3PAR
+# hp3par_debug=True
+hp3par_debug={{ backend.debug }}
+
+# Enable CHAP authentication for iSCSI connections.
+hp3par_iscsi_chap_enabled=false
+
+# The CPG to use for Snapshots for volumes. If empty hp3par_cpg will be used.
+hp3par_snap_cpg={{ backend.snapcpg }}
+
+# Time in hours to retain a snapshot. You can't delete it before this expires.
+hp3par_snapshot_retention=2
+
+# Time in hours when a snapshot expires and is deleted. This must be larger than retention.
+hp3par_snapshot_expiration=4
\ No newline at end of file
diff --git a/cinder/files/backend/_hp_lefthand.conf b/cinder/files/backend/_hp_lefthand.conf
new file mode 100644
index 0000000..12d52b6
--- /dev/null
+++ b/cinder/files/backend/_hp_lefthand.conf
@@ -0,0 +1,15 @@
+[{{ backend_name }}]
+volume_backend_name={{ backend_name }}
+hplefthand_api_url={{ backend.api_url }}
+
+hplefthand_username={{ backend.username }}
+
+hplefthand_password={{ backend.password }}
+
+hplefthand_clustername={{ backend.clustername }}
+
+volume_driver=cinder.volume.drivers.san.hp.hp_lefthand_iscsi.HPLeftHandISCSIDriver
+
+hplefthand_iscsi_chap_enabled={{ backend.iscsi_chap_enabled }}
+
+hplefthand_debug=false
diff --git a/cinder/files/backend/_lvm.conf b/cinder/files/backend/_lvm.conf
new file mode 100644
index 0000000..04a5bd8
--- /dev/null
+++ b/cinder/files/backend/_lvm.conf
@@ -0,0 +1,7 @@
+
+[{{ backend_name }}]
+volume_driver = cinder.volume.drivers.lvm.LVMVolumeDriver
+volume_backend_name={{ backend_name }}
+lvm_type = default
+iscsi_helper = tgtadm
+volume_group = {{ backend.volume_group }}
\ No newline at end of file
diff --git a/cinder/files/backend/_solidfire.conf b/cinder/files/backend/_solidfire.conf
new file mode 100644
index 0000000..bc7ecd4
--- /dev/null
+++ b/cinder/files/backend/_solidfire.conf
@@ -0,0 +1,8 @@
+
+[{{ backend_name }}]
+volume_backend_name={{ backend_name }}
+san_ip={{ backend.san_ip }}
+san_login={{ backend.san_login }}
+san_password={{ backend.san_password }}
+sf_emulate_512={{ backend.sf_emulate_512 }}
+volume_driver=cinder.volume.drivers.solidfire.SolidFireDriver
\ No newline at end of file
diff --git a/cinder/files/backend/_storwize.conf b/cinder/files/backend/_storwize.conf
new file mode 100644
index 0000000..1f09fca
--- /dev/null
+++ b/cinder/files/backend/_storwize.conf
@@ -0,0 +1,15 @@
+
+[{{ backend_name }}]
+volume_driver = cinder.volume.drivers.ibm.storwize_svc.StorwizeSVCDriver
+volume_backend_name={{ backend_name }}
+san_ip={{ backend.host }}
+san_ssh_port={{ backend.port }}
+san_login={{ backend.user }}
+san_password={{ backend.password }}
+
+storwize_svc_volpool_name={{ backend.pool }}
+#storwize_svc_connection_protocol=iSCSI
+storwize_svc_connection_protocol={{ backend.connection }}
+#storwize_svc_iscsi_chap_enabled=true
+storwize_svc_multihost_enabled={{ backend.multihost }}
+storwize_svc_multipath_enabled={{ backend.multipath }}
\ No newline at end of file
diff --git a/cinder/files/heka.toml b/cinder/files/heka.toml
new file mode 100644
index 0000000..7a200bb
--- /dev/null
+++ b/cinder/files/heka.toml
@@ -0,0 +1,13 @@
+[logstreamer_cinder]
+type = "LogstreamerInput"
+log_directory = "/var/log/cinder"
+file_match = '(?P<Service>.+)\.log\.?(?P<Seq>\d*)$'
+differentiator = ['cinder','_','Service']
+priority = ["^Index"]
+decoder = "openstack"
+
+[openstack]
+type = "SandboxDecoder"
+filename = "lua_modules/decoders/openstack.lua"
+module_directory = "/usr/share/heka/lua_modules;/usr/share/heka/lua_modules/common"
+
diff --git a/cinder/files/iscsitarget b/cinder/files/iscsitarget
new file mode 100644
index 0000000..c69e4e5
--- /dev/null
+++ b/cinder/files/iscsitarget
@@ -0,0 +1 @@
+ISCSITARGET_ENABLE=true 
\ No newline at end of file
diff --git a/cinder/files/juno/api-paste.ini.controller.Debian b/cinder/files/juno/api-paste.ini.controller.Debian
new file mode 100644
index 0000000..35693bd
--- /dev/null
+++ b/cinder/files/juno/api-paste.ini.controller.Debian
@@ -0,0 +1,74 @@
+#############
+# OpenStack #
+#############
+
+[composite:osapi_volume]
+use = call:cinder.api:root_app_factory
+/: apiversions
+/v1: openstack_volume_api_v1
+/v2: openstack_volume_api_v2
+
+[composite:openstack_volume_api_v1]
+use = call:cinder.api.middleware.auth:pipeline_factory
+noauth = request_id faultwrap sizelimit osprofiler noauth apiv1
+keystone = request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv1
+keystone_nolimit = request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv1
+
+[composite:openstack_volume_api_v2]
+use = call:cinder.api.middleware.auth:pipeline_factory
+noauth = request_id faultwrap sizelimit osprofiler noauth apiv2
+keystone = request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv2
+keystone_nolimit = request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv2
+
+[filter:request_id]
+paste.filter_factory = cinder.openstack.common.middleware.request_id:RequestIdMiddleware.factory
+
+[filter:faultwrap]
+paste.filter_factory = cinder.api.middleware.fault:FaultWrapper.factory
+
+[filter:osprofiler]
+paste.filter_factory = osprofiler.web:WsgiMiddleware.factory
+hmac_keys = SECRET_KEY
+enabled = yes
+
+[filter:noauth]
+paste.filter_factory = cinder.api.middleware.auth:NoAuthMiddleware.factory
+
+[filter:sizelimit]
+paste.filter_factory = cinder.api.middleware.sizelimit:RequestBodySizeLimiter.factory
+
+[app:apiv1]
+paste.app_factory = cinder.api.v1.router:APIRouter.factory
+
+[app:apiv2]
+paste.app_factory = cinder.api.v2.router:APIRouter.factory
+
+[pipeline:apiversions]
+pipeline = faultwrap osvolumeversionapp
+
+[app:osvolumeversionapp]
+paste.app_factory = cinder.api.versions:Versions.factory
+
+##########
+# Shared #
+##########
+
+[filter:keystonecontext]
+paste.filter_factory = cinder.api.middleware.auth:CinderKeystoneContext.factory
+
+[filter:authtoken]
+paste.filter_factory = keystonemiddleware.auth_token:filter_factory
+service_port=5000
+service_host={{ pillar.cinder.controller.identity.host }}
+service_protocol=http
+auth_host = {{ pillar.cinder.controller.identity.host }}
+auth_port = {{ pillar.cinder.controller.identity.port }}
+auth_protocol = http
+admin_tenant_name = {{ pillar.cinder.controller.identity.tenant }}
+admin_user = {{ pillar.cinder.controller.identity.user }}
+admin_password = {{ pillar.cinder.controller.identity.password }}
+auth_uri=http://{{ pillar.cinder.controller.identity.host }}:5000/
+
+[filter:ratelimit]
+limits=POST_SERVERS100000PUT100000GET100000DELETE100000POST100000
+paste.filter_factory=cinder.api.v1.limits:RateLimitingMiddleware.factory
diff --git a/cinder/files/juno/api-paste.ini.controller.RedHat b/cinder/files/juno/api-paste.ini.controller.RedHat
new file mode 120000
index 0000000..341baca
--- /dev/null
+++ b/cinder/files/juno/api-paste.ini.controller.RedHat
@@ -0,0 +1 @@
+api-paste.ini.controller.Debian
\ No newline at end of file
diff --git a/cinder/files/juno/api-paste.ini.volume.Debian b/cinder/files/juno/api-paste.ini.volume.Debian
new file mode 100644
index 0000000..83136cc
--- /dev/null
+++ b/cinder/files/juno/api-paste.ini.volume.Debian
@@ -0,0 +1,74 @@
+#############
+# OpenStack #
+#############
+
+[composite:osapi_volume]
+use = call:cinder.api:root_app_factory
+/: apiversions
+/v1: openstack_volume_api_v1
+/v2: openstack_volume_api_v2
+
+[composite:openstack_volume_api_v1]
+use = call:cinder.api.middleware.auth:pipeline_factory
+noauth = request_id faultwrap sizelimit osprofiler noauth apiv1
+keystone = request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv1
+keystone_nolimit = request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv1
+
+[composite:openstack_volume_api_v2]
+use = call:cinder.api.middleware.auth:pipeline_factory
+noauth = request_id faultwrap sizelimit osprofiler noauth apiv2
+keystone = request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv2
+keystone_nolimit = request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv2
+
+[filter:request_id]
+paste.filter_factory = cinder.openstack.common.middleware.request_id:RequestIdMiddleware.factory
+
+[filter:faultwrap]
+paste.filter_factory = cinder.api.middleware.fault:FaultWrapper.factory
+
+[filter:osprofiler]
+paste.filter_factory = osprofiler.web:WsgiMiddleware.factory
+hmac_keys = SECRET_KEY
+enabled = yes
+
+[filter:noauth]
+paste.filter_factory = cinder.api.middleware.auth:NoAuthMiddleware.factory
+
+[filter:sizelimit]
+paste.filter_factory = cinder.api.middleware.sizelimit:RequestBodySizeLimiter.factory
+
+[app:apiv1]
+paste.app_factory = cinder.api.v1.router:APIRouter.factory
+
+[app:apiv2]
+paste.app_factory = cinder.api.v2.router:APIRouter.factory
+
+[pipeline:apiversions]
+pipeline = faultwrap osvolumeversionapp
+
+[app:osvolumeversionapp]
+paste.app_factory = cinder.api.versions:Versions.factory
+
+##########
+# Shared #
+##########
+
+[filter:keystonecontext]
+paste.filter_factory = cinder.api.middleware.auth:CinderKeystoneContext.factory
+
+[filter:authtoken]
+paste.filter_factory = keystonemiddleware.auth_token:filter_factory
+service_port=5000
+service_host={{ pillar.cinder.volume.identity.host }}
+service_protocol=http
+auth_host = {{ pillar.cinder.volume.identity.host }}
+auth_port = {{ pillar.cinder.volume.identity.port }}
+auth_protocol = http
+admin_tenant_name = {{ pillar.cinder.volume.identity.tenant }}
+admin_user = {{ pillar.cinder.volume.identity.user }}
+admin_password = {{ pillar.cinder.volume.identity.password }}
+auth_uri=http://{{ pillar.cinder.volume.identity.host }}:5000/
+
+[filter:ratelimit]
+limits=POST_SERVERS100000PUT100000GET100000DELETE100000POST100000
+paste.filter_factory=cinder.api.v1.limits:RateLimitingMiddleware.factory
diff --git a/cinder/files/juno/api-paste.ini.volume.RedHat b/cinder/files/juno/api-paste.ini.volume.RedHat
new file mode 120000
index 0000000..c5204ec
--- /dev/null
+++ b/cinder/files/juno/api-paste.ini.volume.RedHat
@@ -0,0 +1 @@
+api-paste.ini.volume.Debian
\ No newline at end of file
diff --git a/cinder/files/juno/cinder.conf.controller.Debian b/cinder/files/juno/cinder.conf.controller.Debian
new file mode 100644
index 0000000..fa62135
--- /dev/null
+++ b/cinder/files/juno/cinder.conf.controller.Debian
@@ -0,0 +1,361 @@
+{%- from "cinder/map.jinja" import controller with context %}
+
+[DEFAULT]
+rootwrap_config = /etc/cinder/rootwrap.conf
+api_paste_confg = /etc/cinder/api-paste.ini
+
+iscsi_helper = tgtadm
+volume_name_template = volume-%s
+#volume_group = cinder
+
+verbose = True
+
+auth_strategy = keystone
+
+state_path = /var/lib/cinder
+lock_path = /var/lock/cinder
+
+use_syslog=False
+
+glance_num_retries=0
+debug=False
+
+#glance_api_ssl_compression=False
+#glance_api_insecure=False
+
+osapi_volume_listen={{ controller.osapi.host }}
+
+glance_host={{ controller.glance.host }}
+glance_port={{ controller.glance.port }}
+glance_api_version=2
+
+volume_backend_name=DEFAULT
+
+{%- if controller.backend is defined %}
+
+default_volume_type={{ controller.default_volume_type }}
+
+enabled_backends={% for backend_name, backend in controller.get('backend', {}).iteritems() %}{{ backend_name }}{% if not loop.last %},{% endif %}{% endfor %}
+
+{%- else %}
+
+default_volume_type={% for type in controller.get('types', []) %}{% if loop.first %}{{ type.name }}{% endif %}{% endfor %}
+
+enabled_backends={% for type in controller.get('types', []) %}{{ type.backend }}{% if not loop.last %},{% endif %}{% endfor %}{% if controller.storage.engine == "openvstorage" %}{% for vpool in pillar.openvstorage.server.get('vpools', []) %}{{ vpool }}{% if not loop.last %},{% endif %}{% endfor %}{% endif %}
+
+{%- endif %}
+
+#RPC response timeout recommended by Hitachi
+rpc_response_timeout=3600
+
+#Rabbit
+kombu_reconnect_delay=5.0
+rpc_backend=cinder.openstack.common.rpc.impl_kombu
+control_exchange=cinder
+rabbit_host = {{ controller.message_queue.host }}
+rabbit_port = {{ controller.message_queue.port }}
+rabbit_userid = {{ controller.message_queue.user }}
+rabbit_password = {{ controller.message_queue.password }}
+rabbit_virtual_host = {{ controller.message_queue.virtual_host }}
+rabbit_ha_queues={{ controller.message_queue.ha_queues }}
+
+
+volume_clear={{ controller.wipe_method }}
+
+{%- if controller.notification %}
+control_exchange = cinder
+notification_driver = messagingv2
+{%- endif %}
+
+
+volume_name_template = volume-%s
+
+#volume_group = vg_cinder_volume
+
+volumes_dir = /var/lib/cinder/volumes
+log_dir=/var/log/cinder
+
+# Use syslog for logging. (boolean value)
+#use_syslog=false
+
+use_syslog=false
+verbose=True
+lock_path=/var/lock/cinder
+
+[keystone_authtoken]
+signing_dir=/tmp/keystone-signing-cinder
+admin_password={{ controller.identity.password }}
+auth_port={{ controller.identity.port }}
+auth_host={{ controller.identity.host }}
+admin_tenant_name={{ controller.identity.tenant }}
+auth_protocol=http
+admin_user=cinder
+signing_dirname=/tmp/keystone-signing-cinder
+auth_uri=http://{{ controller.identity.host }}:{{ controller.identity.port }}
+
+[database]
+idle_timeout=3600
+max_pool_size=30
+max_retries=-1
+max_overflow=40
+connection = {{ controller.database.engine }}://{{ controller.database.user }}:{{ controller.database.password }}@{{ controller.database.host }}/{{ controller.database.name }}
+
+{# new way #}
+
+{%- if controller.backend is defined %}
+
+{%- for backend_name, backend in controller.get('backend', {}).iteritems() %}
+
+{%- set backend_fragment = "cinder/files/backend/_" + backend.engine + ".conf" %}
+{%- include backend_fragment %}
+
+{%- endfor %}
+
+{%- else %}
+
+{# old way #}
+
+{%- if controller.storage.engine == "storwize" %}
+
+{%- for type in controller.get('types', []) %}
+
+[{{ type.name }}]
+volume_driver = cinder.volume.drivers.storwize_svc.StorwizeSVCDriver
+volume_backend_name={{ type.name }}
+san_ip={{ controller.storage.host }}
+san_ssh_port={{ controller.storage.port }}
+san_login={{ controller.storage.user }}
+san_password={{ controller.storage.password }}
+
+storwize_svc_volpool_name={{ type.pool }}
+#storwize_svc_connection_protocol=iSCSI
+storwize_svc_connection_protocol={{ controller.storage.connection }}
+#storwize_svc_iscsi_chap_enabled=true
+storwize_svc_multihost_enabled={{ controller.storage.multihost }}
+storwize_svc_multipath_enabled={{ controller.storage.multipath }}
+
+{%- endfor %}
+
+{%- endif %}
+
+{%- if controller.storage.engine == "hitachi_vsp" %}
+
+{%- for type in controller.get('types', []) %}
+
+[{{ type.name }}]
+volume_driver = cinder.volume.drivers.hitachi.hbsd_fc.HBSDFCDriver
+volume_backend_name={{ type.name }}
+
+#
+# Options defined in cinder.volume.drivers.hitachi.hbsd_common
+#
+# Serial number of storage system (string value)
+#hitachi_serial_number=<None>
+hitachi_serial_number=86644
+#hitachi_serial_number=355316
+
+# Name of an array unit (string value)
+#hitachi_unit_name=<None>
+#hitachi_unit_name=fiat
+
+# Pool ID of storage system (integer value)
+#hitachi_pool_id=<None>
+hitachi_pool_id=4
+
+# Thin pool ID of storage system (integer value)
+hitachi_thin_pool_id=14
+
+# Range of logical device of storage system (string value)
+#hitachi_ldev_range=<None>
+#hitachi_ldev_range=00:05:00-00:05:FF
+
+# Default copy method of storage system (string value)
+#hitachi_default_copy_method=FULL
+hitachi_default_copy_method=THIN
+
+# Copy speed of storage system (integer value)
+#hitachi_copy_speed=3
+
+# Interval to check copy (integer value)
+#hitachi_copy_check_interval=3
+
+# Interval to check copy asynchronously (integer value)
+#hitachi_async_copy_check_interval=10
+
+# Control port names for HostGroup or iSCSI Target (string
+# value)
+#hitachi_target_ports=<None>
+hitachi_target_ports=CL1-E,CL2-E,CL3-B,CL4-D
+
+# Range of group number (string value)
+#hitachi_group_range=
+#hitachi_group_range=
+
+# Request for creating HostGroup or iSCSI Target (boolean
+# value)
+hitachi_group_request=True
+#hitachi_group_request=false
+#
+# Options defined in cinder.volume.drivers.hitachi.hbsd_fc
+#
+# Request for FC Zone creating HostGroup (boolean value)
+hitachi_zoning_request=true
+#
+# Options defined in cinder.volume.drivers.hitachi.hbsd_horcm
+#
+# Instance numbers for HORCM (string value)
+#hitachi_horcm_numbers=200,201
+hitachi_horcm_numbers=0,1
+
+# Username of storage system for HORCM (string value)
+#hitachi_horcm_user=<None>
+#hitachi_horcm_user=openstack
+hitachi_horcm_user=root
+
+# Password of storage system for HORCM (string value)
+#hitachi_horcm_password=<None>
+#hitachi_horcm_password=avg2014
+hitachi_horcm_password=X3tT35va
+
+# Add to HORCM configuration (boolean value)
+#hitachi_horcm_add_conf=true
+hitachi_horcm_add_conf=true
+
+#hitachi multipath advice
+use_multipath_for_image_xfer=false
+
+{%- endfor %}
+
+{%- endif %}
+
+{%- if controller.storage.engine == "ceph" %}
+
+{%- for type in controller.get('types', []) %}
+
+[{{ type.name }}]
+
+volume_driver = cinder.volume.drivers.rbd.RBDDriver
+#
+# Options defined in cinder.volume.drivers.rbd
+#
+# The RADOS pool where rbd volumes are stored (string value)
+#rbd_pool=volumes
+rbd_pool={{ type.pool }}
+
+# The RADOS client name for accessing rbd volumes - only set
+# when using cephx authentication (string value)
+#rbd_user=cinder
+rbd_user={{ controller.storage.user }}
+
+# Path to the ceph configuration file (string value)
+#rbd_ceph_conf=
+rbd_ceph_conf=/etc/ceph/ceph.conf
+
+# Flatten volumes created from snapshots to remove dependency
+# from volume to snapshot (boolean value)
+#rbd_flatten_volume_from_snapshot=false
+
+# The libvirt uuid of the secret for the rbd_user volumes
+# (string value)
+#rbd_secret_uuid=da74ccb7-aa59-1721-a172-0006b1aa4e3e
+rbd_secret_uuid={{ controller.storage.secret_uuid }}
+
+# Directory where temporary image files are stored when the
+# volume driver does not write them directly to the volume.
+# (string value)
+#volume_tmp_dir=<None>
+
+# Maximum number of nested volume clones that are taken before
+# a flatten occurs. Set to 0 to disable cloning. (integer
+# value)
+#rbd_max_clone_depth=5
+
+# Volumes will be chunked into objects of this size (in
+# megabytes). (integer value)
+#rbd_store_chunk_size=4
+
+# Timeout value (in seconds) used when connecting to ceph
+# cluster. If value < 0, no timeout is set and default
+# librados value is used. (integer value)
+#rados_connect_timeout=-1
+
+{%- endfor %}
+
+{%- endif %}
+
+
+{%- if controller.storage.engine == "hp3par" %}
+
+{%- for type in controller.get('types', []) %}
+
+[{{ type.name }}]
+
+hp3par_api_url={{ controller.storage.url }}
+
+# 3PAR Super user username
+hp3par_username={{ controller.storage.user }}
+
+# 3PAR Super user password
+hp3par_password={{ controller.storage.password }}
+
+# 3PAR CPG to use for volume creation
+hp3par_cpg={{ controller.storage.cpg }}
+
+# IP address of SAN controller for SSH access to the array
+san_ip={{ controller.storage.host }}
+
+# Username for SAN controller for SSH access to the array
+san_login={{ controller.storage.login }}
+
+# Password for SAN controller for SSH access to the array
+san_password={{ controller.storage.password }}
+
+# FIBRE CHANNEL(uncomment the next line to enable the FC driver)
+volume_driver=cinder.volume.drivers.san.hp.hp_3par_fc.HP3PARFCDriver
+
+# iSCSI (uncomment the next line to enable the iSCSI driver and
+# hp3par_iscsi_ips or iscsi_ip_address)
+#volume_driver=cinder.volume.drivers.san.hp.hp_3par_iscsi.HP3PARISCSIDriver
+
+# iSCSI multiple port configuration
+# hp3par_iscsi_ips=10.10.220.253:3261,10.10.222.234
+#hp3par_iscsi_ips=10.10.103.151
+
+# Still available for single port iSCSI configuration
+#iscsi_ip_address=10.10.103.151
+
+## OPTIONAL SETTINGS
+# Enable HTTP debugging to 3PAR
+# hp3par_debug=True
+hp3par_debug={{ controller.storage.debug }}
+
+# Enable CHAP authentication for iSCSI connections.
+hp3par_iscsi_chap_enabled=false
+
+# The CPG to use for Snapshots for volumes. If empty hp3par_cpg will be used.
+hp3par_snap_cpg={{ controller.storage.snapcpg }}
+
+# Time in hours to retain a snapshot. You can't delete it before this expires.
+hp3par_snapshot_retention=2
+
+# Time in hours when a snapshot expires and is deleted. This must be larger than retention.
+hp3par_snapshot_expiration=4
+
+{%- endfor %}
+
+{%- endif %}
+
+{%- if controller.storage.engine == "fujitsu" %}
+
+{%- for type in controller.get('types', []) %}
+
+[{{ type.name }}]
+volume_backend_name={{ type.name }}
+volume_driver=cinder.volume.drivers.fujitsu.fujitsu_eternus_dx_fc.FJDXFCDriver
+cinder_eternus_config_file=/etc/cinder/cinder_fujitsu_eternus_dx_{{ type.name }}.xml
+
+{%- endfor %}
+
+{%- endif %}
+
+{%- endif %}
\ No newline at end of file
diff --git a/cinder/files/juno/cinder.conf.controller.RedHat b/cinder/files/juno/cinder.conf.controller.RedHat
new file mode 120000
index 0000000..563de88
--- /dev/null
+++ b/cinder/files/juno/cinder.conf.controller.RedHat
@@ -0,0 +1 @@
+cinder.conf.controller.Debian
\ No newline at end of file
diff --git a/cinder/files/juno/cinder.conf.volume.Debian b/cinder/files/juno/cinder.conf.volume.Debian
new file mode 100644
index 0000000..383a8f5
--- /dev/null
+++ b/cinder/files/juno/cinder.conf.volume.Debian
@@ -0,0 +1,438 @@
+{%- from "cinder/map.jinja" import volume with context %}
+
+[DEFAULT]
+rootwrap_config = /etc/cinder/rootwrap.conf
+api_paste_confg = /etc/cinder/api-paste.ini
+
+iscsi_helper = tgtadm
+volume_name_template = volume-%s
+#volume_group = cinder
+
+verbose = True
+
+auth_strategy = keystone
+
+state_path = /var/lib/cinder
+lock_path = /var/lock/cinder
+
+use_syslog=False
+
+glance_num_retries=0
+debug=False
+
+#glance_api_ssl_compression=False
+#glance_api_insecure=False
+
+osapi_volume_listen={{ volume.osapi.host }}
+
+glance_host={{ volume.glance.host }}
+glance_port={{ volume.glance.port }}
+glance_api_version=2
+
+volume_backend_name=DEFAULT
+
+{%- if volume.backend is defined %}
+
+default_volume_type={{ volume.default_volume_type }}
+
+enabled_backends={% for backend_name, backend in volume.get('backend', {}).iteritems() %}{{ backend_name }}{% if not loop.last %},{% endif %}{% endfor %}
+
+{%- else %}
+
+default_volume_type={% for type in volume.get('types', []) %}{% if loop.first %}{{ type.name }}{% endif %}{% endfor %}
+
+enabled_backends={% for type in volume.get('types', []) %}{{ type.backend }}{% if not loop.last %},{% endif %}{% endfor %}{% if volume.storage.engine == "openvstorage" %}{% for vpool in pillar.openvstorage.server.get('vpools', []) %}{{ vpool }}{% if not loop.last %},{% endif %}{% endfor %}{% endif %}
+
+{%- endif %}
+
+#RPC response timeout recommended by Hitachi
+rpc_response_timeout=3600
+
+#Rabbit
+kombu_reconnect_delay=5.0
+rpc_backend=cinder.openstack.common.rpc.impl_kombu
+control_exchange=cinder
+rabbit_host = {{ volume.message_queue.host }}
+rabbit_port = {{ volume.message_queue.port }}
+rabbit_userid = {{ volume.message_queue.user }}
+rabbit_password = {{ volume.message_queue.password }}
+rabbit_virtual_host = {{ volume.message_queue.virtual_host }}
+rabbit_ha_queues={{ volume.message_queue.ha_queues }}
+
+
+volume_clear={{ volume.wipe_method }}
+
+{%- if volume.notification %}
+control_exchange = cinder
+
+{%- if volume.notification.driver is defined %}
+notification_driver = {{ volume.notification.driver }}
+{%- else %}
+notification_driver = messagingv2
+{%- endif %}
+
+{%- if volume.notification.topics is defined %}
+notification_topics = {{ volume.notification.topics }}
+{%- endif %}
+
+{%- endif %}
+
+volume_name_template = volume-%s
+
+#volume_group = vg_cinder_volume
+
+volumes_dir = /var/lib/cinder/volumes
+log_dir=/var/log/cinder
+
+# Use syslog for logging. (boolean value)
+#use_syslog=false
+
+use_syslog=false
+verbose=True
+lock_path=/var/lock/cinder
+
+[keystone_authtoken]
+signing_dir=/tmp/keystone-signing-cinder
+admin_password={{ volume.identity.password }}
+auth_port={{ volume.identity.port }}
+auth_host={{ volume.identity.host }}
+admin_tenant_name={{ volume.identity.tenant }}
+auth_protocol=http
+admin_user=cinder
+signing_dirname=/tmp/keystone-signing-cinder
+auth_uri=http://{{ volume.identity.host }}:{{ volume.identity.port }}
+
+[database]
+idle_timeout=3600
+max_pool_size=30
+max_retries=-1
+max_overflow=40
+connection = {{ volume.database.engine }}://{{ volume.database.user }}:{{ volume.database.password }}@{{ volume.database.host }}/{{ volume.database.name }}
+
+{# new way #}
+
+{%- if volume.backend is defined %}
+
+{%- for backend_name, backend in volume.get('backend', {}).iteritems() %}
+
+{%- set backend_fragment = "cinder/files/backend/_" + backend.engine + ".conf" %}
+{%- include backend_fragment %}
+
+{%- endfor %}
+
+{%- else %}
+
+{# old way #}
+
+{%- if volume.storage.engine == "storwize" %}
+
+{%- for type in volume.get('types', []) %}
+
+[{{ type.name }}]
+volume_driver = cinder.volume.drivers.storwize_svc.StorwizeSVCDriver
+volume_backend_name={{ type.name }}
+san_ip={{ volume.storage.host }}
+san_ssh_port={{ volume.storage.port }}
+san_login={{ volume.storage.user }}
+san_password={{ volume.storage.password }}
+
+storwize_svc_volpool_name={{ type.pool }}
+#storwize_svc_connection_protocol=iSCSI
+storwize_svc_connection_protocol={{ volume.storage.connection }}
+#storwize_svc_iscsi_chap_enabled=true
+storwize_svc_multihost_enabled={{ volume.storage.multihost }}
+storwize_svc_multipath_enabled={{ volume.storage.multipath }}
+
+{%- endfor %}
+
+{%- endif %}
+
+{%- if volume.storage.engine == "hitachi_vsp" %}
+
+{%- for type in volume.get('types', []) %}
+
+[{{ type.name }}]
+
+{%- if volume.storage.version == "1.0" %}
+
+volume_driver = cinder.volume.drivers.hitachi.hbsd_fc.HBSDFCDriver
+volume_backend_name={{ type.name }}
+
+#
+# Options defined in cinder.volume.drivers.hitachi.hbsd_common
+#
+# Serial number of storage system (string value)
+#hitachi_serial_number=<None>
+hitachi_serial_number={{ volume.storage.storage_id }}
+#hitachi_serial_number=355316
+
+# Name of an array unit (string value)
+#hitachi_unit_name=<None>
+#hitachi_unit_name=fiat
+
+# Pool ID of storage system (integer value)
+#hitachi_pool_id=<None>
+hitachi_pool_id={{ volume.storage.pool_id }}
+
+# Thin pool ID of storage system (integer value)
+hitachi_thin_pool_id={{ volume.storage.thin_pool_id }}
+
+# Default copy method of storage system (string value)
+#hitachi_default_copy_method=FULL
+hitachi_default_copy_method=THIN
+
+# Control port names for HostGroup or iSCSI Target (string
+# value)
+#hitachi_target_ports=<None>
+hitachi_target_ports={{ volume.storage.compute_target_ports }}
+
+# Request for creating HostGroup or iSCSI Target (boolean
+# value)
+hitachi_group_request=True
+#hitachi_group_request=false
+#
+# Options defined in cinder.volume.drivers.hitachi.hbsd_fc
+#
+# Request for FC Zone creating HostGroup (boolean value)
+hitachi_zoning_request=true
+#
+# Options defined in cinder.volume.drivers.hitachi.hbsd_horcm
+#
+# Instance numbers for HORCM (string value)
+#hitachi_horcm_numbers=200,201
+hitachi_horcm_numbers=0,1
+
+# Username of storage system for HORCM (string value)
+#hitachi_horcm_user=<None>
+#hitachi_horcm_user=openstack
+hitachi_horcm_user={{ volume.storage.user }}
+
+# Password of storage system for HORCM (string value)
+#hitachi_horcm_password=<None>
+#hitachi_horcm_password=avg2014
+hitachi_horcm_password={{ volume.storage.password }}
+
+# Add to HORCM configuration (boolean value)
+#hitachi_horcm_add_conf=true
+hitachi_horcm_add_conf=true
+
+#hitachi multipath advice
+use_multipath_for_image_xfer=false
+
+{%- endif %}
+
+{%- if volume.storage.version == "1.3" %}
+
+volume_driver = cinder.volume.drivers.hitachi.hbsd.hbsd_fc.HBSDFCDriver
+volume_backend_name=hitachi_vsp
+
+#
+# Options defined in cinder.volume.drivers.hitachi.hbsd_common
+#
+# Serial number of storage system (string value)
+#hitachi_serial_number=<None>
+#hitachi_serial_number=86644
+hitachi_storage_id={{ volume.storage.storage_id }}
+
+# Pool ID of storage system (integer value)
+#hitachi_pool_id=<None>
+hitachi_pool={{ volume.storage.pool_id }}
+
+# Thin pool ID of storage system (integer value)
+hitachi_thin_pool={{ volume.storage.thin_pool_id }}
+
+# Default copy method of storage system (string value)
+#hitachi_default_copy_method=FULL
+hitachi_default_copy_method=THIN
+
+# Control port names for HostGroup or iSCSI Target (string
+# value)
+#hitachi_target_ports=<None>
+hitachi_target_ports={{ volume.storage.target_ports }}
+
+hitachi_compute_target_ports={{ volume.storage.compute_target_ports }}
+
+# Range of group number (string value)
+#hitachi_group_range=
+#hitachi_group_range=
+
+# Request for creating HostGroup or iSCSI Target (boolean
+# value)
+#JPA
+hitachi_group_request=True
+#hitachi_group_request=false
+
+# Instance numbers for HORCM (string value)
+#hitachi_horcm_numbers=200,201
+hitachi_horcm_numbers=0,1
+
+# Username of storage system for HORCM (string value)
+#hitachi_horcm_user=<None>
+#hitachi_horcm_user=openstack
+hitachi_horcm_user={{ volume.storage.user }}
+
+# Password of storage system for HORCM (string value)
+#hitachi_horcm_password=<None>
+#hitachi_horcm_password=avg2014
+hitachi_horcm_password={{ volume.storage.password }}
+
+# Add to HORCM configuration (boolean value)
+#hitachi_horcm_add_conf=true
+hitachi_horcm_add_conf=true
+
+#hitachi multipath advice
+use_multipath_for_image_xfer=false
+
+hitachi_storage_cli=HORCM
+
+
+{%- endif %}
+
+{%- endfor %}
+
+{%- endif %}
+
+{%- if volume.storage.engine == "ceph" %}
+
+{%- for type in volume.get('types', []) %}
+
+[{{ type.name }}]
+
+volume_driver = cinder.volume.drivers.rbd.RBDDriver
+#
+# Options defined in cinder.volume.drivers.rbd
+#
+# The RADOS pool where rbd volumes are stored (string value)
+#rbd_pool=volumes
+rbd_pool={{ type.pool }}
+
+# The RADOS client name for accessing rbd volumes - only set
+# when using cephx authentication (string value)
+#rbd_user=cinder
+rbd_user={{ volume.storage.user }}
+
+# Path to the ceph configuration file (string value)
+#rbd_ceph_conf=
+rbd_ceph_conf=/etc/ceph/ceph.conf
+
+# Flatten volumes created from snapshots to remove dependency
+# from volume to snapshot (boolean value)
+#rbd_flatten_volume_from_snapshot=false
+
+# The libvirt uuid of the secret for the rbd_user volumes
+# (string value)
+#rbd_secret_uuid=da74ccb7-aa59-1721-a172-0006b1aa4e3e
+rbd_secret_uuid={{ volume.storage.secret_uuid }}
+
+# Directory where temporary image files are stored when the
+# volume driver does not write them directly to the volume.
+# (string value)
+#volume_tmp_dir=<None>
+
+# Maximum number of nested volume clones that are taken before
+# a flatten occurs. Set to 0 to disable cloning. (integer
+# value)
+#rbd_max_clone_depth=5
+
+# Volumes will be chunked into objects of this size (in
+# megabytes). (integer value)
+#rbd_store_chunk_size=4
+
+# Timeout value (in seconds) used when connecting to ceph
+# cluster. If value < 0, no timeout is set and default
+# librados value is used. (integer value)
+#rados_connect_timeout=-1
+
+{%- endfor %}
+
+{%- endif %}
+
+
+{%- if volume.storage.engine == "hp3par" %}
+
+{%- for type in volume.get('types', []) %}
+
+[{{ type.name }}]
+
+hp3par_api_url={{ volume.storage.url }}
+
+# 3PAR Super user username
+hp3par_username={{ volume.storage.user }}
+
+# 3PAR Super user password
+hp3par_password={{ volume.storage.password }}
+
+# 3PAR CPG to use for volume creation
+hp3par_cpg={{ volume.storage.cpg }}
+
+# IP address of SAN volume for SSH access to the array
+san_ip={{ volume.storage.host }}
+
+# Username for SAN volume for SSH access to the array
+san_login={{ volume.storage.login }}
+
+# Password for SAN volume for SSH access to the array
+san_password={{ volume.storage.password }}
+
+# FIBRE CHANNEL(uncomment the next line to enable the FC driver)
+volume_driver=cinder.volume.drivers.san.hp.hp_3par_fc.HP3PARFCDriver
+
+# iSCSI (uncomment the next line to enable the iSCSI driver and
+# hp3par_iscsi_ips or iscsi_ip_address)
+#volume_driver=cinder.volume.drivers.san.hp.hp_3par_iscsi.HP3PARISCSIDriver
+
+# iSCSI multiple port configuration
+# hp3par_iscsi_ips=10.10.220.253:3261,10.10.222.234
+#hp3par_iscsi_ips=10.10.103.151
+
+# Still available for single port iSCSI configuration
+#iscsi_ip_address=10.10.103.151
+
+## OPTIONAL SETTINGS
+# Enable HTTP debugging to 3PAR
+# hp3par_debug=True
+hp3par_debug={{ volume.storage.debug }}
+
+# Enable CHAP authentication for iSCSI connections.
+hp3par_iscsi_chap_enabled=false
+
+# The CPG to use for Snapshots for volumes. If empty hp3par_cpg will be used.
+hp3par_snap_cpg={{ volume.storage.snapcpg }}
+
+# Time in hours to retain a snapshot. You can't delete it before this expires.
+hp3par_snapshot_retention=2
+
+# Time in hours when a snapshot expires and is deleted. This must be larger than retention.
+hp3par_snapshot_expiration=4
+
+{%- endfor %}
+
+{%- endif %}
+
+{%- if volume.storage.engine == "openvstorage" %}
+
+{%- for vpool in pillar.openvstorage.server.get('vpools', []) %}
+
+[{{ vpool }}]
+volume_driver = cinder.volume.drivers.openvstorage.OVSVolumeDriver
+volume_backend_name={{ vpool }}
+vpool_name={{ vpool }}
+
+{%- endfor %}
+
+{%- endif %}
+
+{%- if volume.storage.engine == "fujitsu" %}
+
+{%- for type in volume.get('types', []) %}
+
+[{{ type.name }}]
+volume_backend_name={{ type.name }}
+volume_driver=cinder.volume.drivers.fujitsu.fujitsu_eternus_dx_fc.FJDXFCDriver
+cinder_eternus_config_file=/etc/cinder/cinder_fujitsu_eternus_dx_{{ type.name }}.xml
+
+{%- endfor %}
+
+{%- endif %}
+
+{%- endif %}
\ No newline at end of file
diff --git a/cinder/files/juno/cinder.conf.volume.RedHat b/cinder/files/juno/cinder.conf.volume.RedHat
new file mode 120000
index 0000000..df997ca
--- /dev/null
+++ b/cinder/files/juno/cinder.conf.volume.RedHat
@@ -0,0 +1 @@
+cinder.conf.volume.Debian
\ No newline at end of file
diff --git a/cinder/files/juno/cinder_fujitsu_eternus_dx.xml b/cinder/files/juno/cinder_fujitsu_eternus_dx.xml
new file mode 100644
index 0000000..afab2d5
--- /dev/null
+++ b/cinder/files/juno/cinder_fujitsu_eternus_dx.xml
@@ -0,0 +1,22 @@
+{%- if volume.storage.engine is defined %}
+{%- from "cinder/map.jinja" import volume with context -%}
+<?xml version='1.0' encoding='UTF-8'?>
+<FUJITSU>
+<EternusIP>{{ volume.storage.host }}</EternusIP>
+<EternusPort>{{ volume.storage.port }}</EternusPort>
+<EternusUser>{{ volume.storage.user }}</EternusUser>
+<EternusPassword>{{ volume.storage.password }}</EternusPassword>
+<EternusISCSIIP></EternusISCSIIP>
+<EternusPool>{{ volume_type_name }}</EternusPool>
+</FUJITSU>
+{%- else %}
+<?xml version='1.0' encoding='UTF-8'?>
+<FUJITSU>
+<EternusIP>{{ volume.backend.host }}</EternusIP>
+<EternusPort>{{ volume.backend.port }}</EternusPort>
+<EternusUser>{{ volume.backend.user }}</EternusUser>
+<EternusPassword>{{ volume.backend.password }}</EternusPassword>
+<EternusISCSIIP></EternusISCSIIP>
+<EternusPool>{{ backend.pool }}</EternusPool>
+</FUJITSU>
+{%- endif %}
\ No newline at end of file
diff --git a/cinder/files/kilo/api-paste.ini.controller.Debian b/cinder/files/kilo/api-paste.ini.controller.Debian
new file mode 100644
index 0000000..6ffb02e
--- /dev/null
+++ b/cinder/files/kilo/api-paste.ini.controller.Debian
@@ -0,0 +1,70 @@
+#############
+# OpenStack #
+#############
+{%- from "cinder/map.jinja" import controller with context %}
+[composite:osapi_volume]
+use = call:cinder.api:root_app_factory
+/: apiversions
+/v1: openstack_volume_api_v1
+/v2: openstack_volume_api_v2
+
+[composite:openstack_volume_api_v1]
+use = call:cinder.api.middleware.auth:pipeline_factory
+noauth = request_id faultwrap sizelimit osprofiler noauth apiv1
+keystone = request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv1
+keystone_nolimit = request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv1
+
+[composite:openstack_volume_api_v2]
+use = call:cinder.api.middleware.auth:pipeline_factory
+noauth = request_id faultwrap sizelimit osprofiler noauth apiv2
+keystone = request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv2
+keystone_nolimit = request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv2
+
+[filter:request_id]
+paste.filter_factory = oslo_middleware.request_id:RequestId.factory
+
+[filter:faultwrap]
+paste.filter_factory = cinder.api.middleware.fault:FaultWrapper.factory
+
+[filter:osprofiler]
+paste.filter_factory = osprofiler.web:WsgiMiddleware.factory
+hmac_keys = SECRET_KEY
+enabled = yes
+
+[filter:noauth]
+paste.filter_factory = cinder.api.middleware.auth:NoAuthMiddleware.factory
+
+[filter:sizelimit]
+paste.filter_factory = cinder.api.middleware.sizelimit:RequestBodySizeLimiter.factory
+
+[app:apiv1]
+paste.app_factory = cinder.api.v1.router:APIRouter.factory
+
+[app:apiv2]
+paste.app_factory = cinder.api.v2.router:APIRouter.factory
+
+[pipeline:apiversions]
+pipeline = faultwrap osvolumeversionapp
+
+[app:osvolumeversionapp]
+paste.app_factory = cinder.api.versions:Versions.factory
+
+##########
+# Shared #
+##########
+
+[filter:keystonecontext]
+paste.filter_factory = cinder.api.middleware.auth:CinderKeystoneContext.factory
+
+[filter:authtoken]
+paste.filter_factory = keystonemiddleware.auth_token:filter_factory
+service_port=5000
+service_host={{ controller.identity.host }}
+service_protocol=http
+auth_host = {{ controller.identity.host }}
+auth_port = {{ controller.identity.port }}
+auth_protocol = http
+admin_tenant_name = {{ controller.identity.tenant }}
+admin_user = {{ controller.identity.user }}
+admin_password = {{ controller.identity.password }}
+auth_uri=http://{{ controller.identity.host }}:5000/{% if controller.identity.get('version', 2) == 3 %}v3{% endif %}
diff --git a/cinder/files/kilo/api-paste.ini.controller.RedHat b/cinder/files/kilo/api-paste.ini.controller.RedHat
new file mode 120000
index 0000000..341baca
--- /dev/null
+++ b/cinder/files/kilo/api-paste.ini.controller.RedHat
@@ -0,0 +1 @@
+api-paste.ini.controller.Debian
\ No newline at end of file
diff --git a/cinder/files/kilo/api-paste.ini.volume.Debian b/cinder/files/kilo/api-paste.ini.volume.Debian
new file mode 100644
index 0000000..0d7ee9f
--- /dev/null
+++ b/cinder/files/kilo/api-paste.ini.volume.Debian
@@ -0,0 +1,70 @@
+#############
+# OpenStack #
+#############
+{%- from "cinder/map.jinja" import volume with context %}
+[composite:osapi_volume]
+use = call:cinder.api:root_app_factory
+/: apiversions
+/v1: openstack_volume_api_v1
+/v2: openstack_volume_api_v2
+
+[composite:openstack_volume_api_v1]
+use = call:cinder.api.middleware.auth:pipeline_factory
+noauth = request_id faultwrap sizelimit osprofiler noauth apiv1
+keystone = request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv1
+keystone_nolimit = request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv1
+
+[composite:openstack_volume_api_v2]
+use = call:cinder.api.middleware.auth:pipeline_factory
+noauth = request_id faultwrap sizelimit osprofiler noauth apiv2
+keystone = request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv2
+keystone_nolimit = request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv2
+
+[filter:request_id]
+paste.filter_factory = oslo_middleware.request_id:RequestId.factory
+
+[filter:faultwrap]
+paste.filter_factory = cinder.api.middleware.fault:FaultWrapper.factory
+
+[filter:osprofiler]
+paste.filter_factory = osprofiler.web:WsgiMiddleware.factory
+hmac_keys = SECRET_KEY
+enabled = yes
+
+[filter:noauth]
+paste.filter_factory = cinder.api.middleware.auth:NoAuthMiddleware.factory
+
+[filter:sizelimit]
+paste.filter_factory = cinder.api.middleware.sizelimit:RequestBodySizeLimiter.factory
+
+[app:apiv1]
+paste.app_factory = cinder.api.v1.router:APIRouter.factory
+
+[app:apiv2]
+paste.app_factory = cinder.api.v2.router:APIRouter.factory
+
+[pipeline:apiversions]
+pipeline = faultwrap osvolumeversionapp
+
+[app:osvolumeversionapp]
+paste.app_factory = cinder.api.versions:Versions.factory
+
+##########
+# Shared #
+##########
+
+[filter:keystonecontext]
+paste.filter_factory = cinder.api.middleware.auth:CinderKeystoneContext.factory
+
+[filter:authtoken]
+paste.filter_factory = keystonemiddleware.auth_token:filter_factory
+service_port=5000
+service_host={{ volume.identity.host }}
+service_protocol=http
+auth_host = {{ volume.identity.host }}
+auth_port = {{ volume.identity.port }}
+auth_protocol = http
+admin_tenant_name = {{ volume.identity.tenant }}
+admin_user = {{ volume.identity.user }}
+admin_password = {{ volume.identity.password }}
+auth_uri=http://{{ volume.identity.host }}:5000/{% if volume.identity.get('version', 2) == 3 %}v3{% endif %}
diff --git a/cinder/files/kilo/api-paste.ini.volume.RedHat b/cinder/files/kilo/api-paste.ini.volume.RedHat
new file mode 120000
index 0000000..c5204ec
--- /dev/null
+++ b/cinder/files/kilo/api-paste.ini.volume.RedHat
@@ -0,0 +1 @@
+api-paste.ini.volume.Debian
\ No newline at end of file
diff --git a/cinder/files/kilo/cinder.conf.controller.Debian b/cinder/files/kilo/cinder.conf.controller.Debian
new file mode 100644
index 0000000..5f723fe
--- /dev/null
+++ b/cinder/files/kilo/cinder.conf.controller.Debian
@@ -0,0 +1,409 @@
+{%- from "cinder/map.jinja" import controller with context %}
+
+[DEFAULT]
+rootwrap_config = /etc/cinder/rootwrap.conf
+api_paste_confg = /etc/cinder/api-paste.ini
+
+iscsi_helper = tgtadm
+volume_name_template = volume-%s
+#volume_group = cinder
+
+verbose = True
+
+auth_strategy = keystone
+
+state_path = /var/lib/cinder
+lock_path = /var/lock/cinder
+
+use_syslog=False
+
+glance_num_retries=0
+debug=False
+
+os_region_name={{ controller.identity.region }}
+
+#glance_api_ssl_compression=False
+#glance_api_insecure=False
+
+osapi_volume_listen={{ controller.osapi.host }}
+
+glance_host={{ controller.glance.host }}
+glance_port={{ controller.glance.port }}
+glance_api_version=2
+
+volume_backend_name=DEFAULT
+
+{%- if controller.backend is defined %}
+
+default_volume_type={{ controller.default_volume_type }}
+
+enabled_backends={% for backend_name, backend in controller.get('backend', {}).iteritems() %}{{ backend_name }}{% if not loop.last %},{% endif %}{% endfor %}
+
+{%- else %}
+
+default_volume_type={% for type in controller.get('types', []) %}{% if loop.first %}{{ type.name }}{% endif %}{% endfor %}
+
+enabled_backends={% for type in controller.get('types', []) %}{{ type.backend }}{% if not loop.last %},{% endif %}{% endfor %}{% if controller.storage.engine == "openvstorage" %}{% for vpool in pillar.openvstorage.server.get('vpools', []) %}{{ vpool }}{% if not loop.last %},{% endif %}{% endfor %}{% endif %}
+
+{%- endif %}
+
+#RPC response timeout recommended by Hitachi
+rpc_response_timeout=3600
+
+#Rabbit
+rpc_backend=cinder.openstack.common.rpc.impl_kombu
+control_exchange=cinder
+
+
+volume_clear={{ controller.wipe_method }}
+
+{%- if controller.notification %}
+control_exchange = cinder
+notification_driver = messagingv2
+{%- endif %}
+
+
+volume_name_template = volume-%s
+
+#volume_group = vg_cinder_volume
+
+volumes_dir = /var/lib/cinder/volumes
+log_dir=/var/log/cinder
+
+# Use syslog for logging. (boolean value)
+#use_syslog=false
+
+use_syslog=false
+verbose=True
+lock_path=/var/lock/cinder
+
+[oslo_concurrency]
+
+lock_path=/var/lock/cinder
+
+[oslo_messaging_rabbit]
+rabbit_host = {{ controller.message_queue.host }}
+rabbit_port = {{ controller.message_queue.port }}
+rabbit_userid = {{ controller.message_queue.user }}
+rabbit_password = {{ controller.message_queue.password }}
+rabbit_virtual_host = {{ controller.message_queue.virtual_host }}
+rabbit_ha_queues={{ controller.message_queue.ha_queues }}
+kombu_reconnect_delay=5.0
+
+{%- if controller.identity.get('version', 2) == 2 %}
+
+[keystone_authtoken]
+signing_dir=/tmp/keystone-signing-cinder
+admin_password={{ controller.identity.password }}
+auth_port={{ controller.identity.port }}
+auth_host={{ controller.identity.host }}
+admin_tenant_name={{ controller.identity.tenant }}
+auth_protocol=http
+admin_user=cinder
+signing_dirname=/tmp/keystone-signing-cinder
+auth_uri=http://{{ controller.identity.host }}:{{ controller.identity.port }}
+
+{%- else %}
+
+[keystone_authtoken]
+signing_dir=/tmp/keystone-signing-cinder
+signing_dirname=/tmp/keystone-signing-cinder
+identity_uri = http://{{ controller.identity.host }}:35357/v3
+revocation_cache_time = 10
+auth_section = generic_password
+
+[generic_password]
+auth_plugin = password
+user_domain_id = {{ controller.identity.get('domain', 'default') }}
+project_domain_id = {{ controller.identity.get('domain', 'default') }}
+project_name = {{ controller.identity.tenant }}
+username = {{ controller.identity.user }}
+password = {{ controller.identity.password }}
+auth_uri=http://{{ controller.identity.host }}:5000/v3
+auth_url=http://{{ controller.identity.host }}:35357/v3
+
+{%- endif %}
+
+[database]
+idle_timeout=3600
+max_pool_size=30
+max_retries=-1
+max_overflow=40
+connection = {{ controller.database.engine }}://{{ controller.database.user }}:{{ controller.database.password }}@{{ controller.database.host }}/{{ controller.database.name }}
+
+{# new way #}
+
+{%- if controller.backend is defined %}
+
+{%- for backend_name, backend in controller.get('backend', {}).iteritems() %}
+
+{%- set backend_fragment = "cinder/files/backend/_" + backend.engine + ".conf" %}
+{%- include backend_fragment %}
+
+{%- endfor %}
+
+{%- else %}
+
+{# old way #}
+
+{%- if controller.storage.engine == "storwize" %}
+
+{%- for type in controller.get('types', []) %}
+
+[{{ type.backend }}]
+volume_driver = cinder.volume.drivers.ibm.storwize_svc.StorwizeSVCDriver
+volume_backend_name={{ type.backend }}
+san_ip={{ controller.storage.host }}
+san_ssh_port={{ controller.storage.port }}
+san_login={{ controller.storage.user }}
+san_password={{ controller.storage.password }}
+
+storwize_svc_volpool_name={{ type.pool }}
+#storwize_svc_connection_protocol=iSCSI
+storwize_svc_connection_protocol={{ controller.storage.connection }}
+#storwize_svc_iscsi_chap_enabled=true
+storwize_svc_multihost_enabled={{ controller.storage.multihost }}
+storwize_svc_multipath_enabled={{ controller.storage.multipath }}
+
+{%- endfor %}
+
+{%- endif %}
+
+{%- if controller.storage.engine == "hitachi_vsp" %}
+
+{%- for type in controller.get('types', []) %}
+
+[{{ type.backend }}]
+volume_driver = cinder.volume.drivers.hitachi.hbsd_fc.HBSDFCDriver
+volume_backend_name={{ type.backend }}
+
+#
+# Options defined in cinder.volume.drivers.hitachi.hbsd_common
+#
+# Serial number of storage system (string value)
+#hitachi_serial_number=<None>
+hitachi_serial_number=86644
+#hitachi_serial_number=355316
+
+# Name of an array unit (string value)
+#hitachi_unit_name=<None>
+#hitachi_unit_name=fiat
+
+# Pool ID of storage system (integer value)
+#hitachi_pool_id=<None>
+hitachi_pool_id=4
+
+# Thin pool ID of storage system (integer value)
+hitachi_thin_pool_id=14
+
+# Range of logical device of storage system (string value)
+#hitachi_ldev_range=<None>
+#hitachi_ldev_range=00:05:00-00:05:FF
+
+# Default copy method of storage system (string value)
+#hitachi_default_copy_method=FULL
+hitachi_default_copy_method=THIN
+
+# Copy speed of storage system (integer value)
+#hitachi_copy_speed=3
+
+# Interval to check copy (integer value)
+#hitachi_copy_check_interval=3
+
+# Interval to check copy asynchronously (integer value)
+#hitachi_async_copy_check_interval=10
+
+# Control port names for HostGroup or iSCSI Target (string
+# value)
+#hitachi_target_ports=<None>
+hitachi_target_ports=CL1-E,CL2-E,CL3-B,CL4-D
+
+# Range of group number (string value)
+#hitachi_group_range=
+#hitachi_group_range=
+
+# Request for creating HostGroup or iSCSI Target (boolean
+# value)
+hitachi_group_request=True
+#hitachi_group_request=false
+#
+# Options defined in cinder.volume.drivers.hitachi.hbsd_fc
+#
+# Request for FC Zone creating HostGroup (boolean value)
+hitachi_zoning_request=true
+#
+# Options defined in cinder.volume.drivers.hitachi.hbsd_horcm
+#
+# Instance numbers for HORCM (string value)
+#hitachi_horcm_numbers=200,201
+hitachi_horcm_numbers=0,1
+
+# Username of storage system for HORCM (string value)
+#hitachi_horcm_user=<None>
+#hitachi_horcm_user=openstack
+hitachi_horcm_user=root
+
+# Password of storage system for HORCM (string value)
+#hitachi_horcm_password=<None>
+#hitachi_horcm_password=avg2014
+hitachi_horcm_password=X3tT35va
+
+# Add to HORCM configuration (boolean value)
+#hitachi_horcm_add_conf=true
+hitachi_horcm_add_conf=true
+
+#hitachi multipath advice
+use_multipath_for_image_xfer=false
+
+{%- endfor %}
+
+{%- endif %}
+
+{%- if controller.storage.engine == "ceph" %}
+
+{%- for type in controller.get('types', []) %}
+
+[{{ type.backend }}]
+volume_backend_name={{ type.backend }}
+volume_driver = cinder.volume.drivers.rbd.RBDDriver
+#
+# Options defined in cinder.volume.drivers.rbd
+#
+# The RADOS pool where rbd volumes are stored (string value)
+#rbd_pool=volumes
+rbd_pool={{ type.pool }}
+
+# The RADOS client name for accessing rbd volumes - only set
+# when using cephx authentication (string value)
+#rbd_user=cinder
+rbd_user={{ controller.storage.user }}
+
+# Path to the ceph configuration file (string value)
+#rbd_ceph_conf=
+rbd_ceph_conf=/etc/ceph/ceph.conf
+
+# Flatten volumes created from snapshots to remove dependency
+# from volume to snapshot (boolean value)
+#rbd_flatten_volume_from_snapshot=false
+
+# The libvirt uuid of the secret for the rbd_user volumes
+# (string value)
+#rbd_secret_uuid=da74ccb7-aa59-1721-a172-0006b1aa4e3e
+rbd_secret_uuid={{ controller.storage.secret_uuid }}
+
+# Directory where temporary image files are stored when the
+# volume driver does not write them directly to the volume.
+# (string value)
+#volume_tmp_dir=<None>
+
+# Maximum number of nested volume clones that are taken before
+# a flatten occurs. Set to 0 to disable cloning. (integer
+# value)
+#rbd_max_clone_depth=5
+
+# Volumes will be chunked into objects of this size (in
+# megabytes). (integer value)
+#rbd_store_chunk_size=4
+
+# Timeout value (in seconds) used when connecting to ceph
+# cluster. If value < 0, no timeout is set and default
+# librados value is used. (integer value)
+#rados_connect_timeout=-1
+
+{%- endfor %}
+
+{%- endif %}
+
+
+{%- if controller.storage.engine == "hp3par" %}
+
+{%- for type in controller.get('types', []) %}
+
+[{{ type.backend }}]
+
+hp3par_api_url={{ controller.storage.url }}
+
+# 3PAR Super user username
+hp3par_username={{ controller.storage.user }}
+
+# 3PAR Super user password
+hp3par_password={{ controller.storage.password }}
+
+# 3PAR CPG to use for volume creation
+hp3par_cpg={{ controller.storage.cpg }}
+
+# IP address of SAN controller for SSH access to the array
+san_ip={{ controller.storage.host }}
+
+# Username for SAN controller for SSH access to the array
+san_login={{ controller.storage.login }}
+
+# Password for SAN controller for SSH access to the array
+san_password={{ controller.storage.password }}
+
+# FIBRE CHANNEL(uncomment the next line to enable the FC driver)
+volume_driver=cinder.volume.drivers.san.hp.hp_3par_fc.HP3PARFCDriver
+
+# iSCSI (uncomment the next line to enable the iSCSI driver and
+# hp3par_iscsi_ips or iscsi_ip_address)
+#volume_driver=cinder.volume.drivers.san.hp.hp_3par_iscsi.HP3PARISCSIDriver
+
+# iSCSI multiple port configuration
+# hp3par_iscsi_ips=10.10.220.253:3261,10.10.222.234
+#hp3par_iscsi_ips=10.10.103.151
+
+# Still available for single port iSCSI configuration
+#iscsi_ip_address=10.10.103.151
+
+## OPTIONAL SETTINGS
+# Enable HTTP debugging to 3PAR
+# hp3par_debug=True
+hp3par_debug={{ controller.storage.debug }}
+
+# Enable CHAP authentication for iSCSI connections.
+hp3par_iscsi_chap_enabled=false
+
+# The CPG to use for Snapshots for volumes. If empty hp3par_cpg will be used.
+hp3par_snap_cpg={{ controller.storage.snapcpg }}
+
+# Time in hours to retain a snapshot. You can't delete it before this expires.
+hp3par_snapshot_retention=2
+
+# Time in hours when a snapshot expires and is deleted. This must be larger than retention.
+hp3par_snapshot_expiration=4
+
+{%- endfor %}
+
+{%- endif %}
+
+{%- if controller.storage.engine == "fujitsu" %}
+
+{%- for type in controller.get('types', []) %}
+
+[{{ type.backend }}]
+volume_backend_name={{ type.backend }}
+volume_driver=cinder.volume.drivers.fujitsu.fujitsu_eternus_dx_fc.FJDXFCDriver
+cinder_eternus_config_file=/etc/cinder/cinder_fujitsu_eternus_dx_{{ type.backend }}.xml
+
+{%- endfor %}
+
+{%- endif %}
+
+{%- if controller.storage.engine == "gpfs" %}
+
+{%- for type in controller.get('types', []) %}
+
+[{{ type.backend }}]
+volume_backend_name={{ type.backend }}
+volume_driver = cinder.volume.drivers.ibm.gpfs.GPFSDriver
+gpfs_mount_point_base={{ type.mount_point }}
+#gpfs_mount_point_base=/mnt/gpfs-openstack/cinder/gold
+gpfs_max_clone_depth=3
+gpfs_sparse_volumes=true
+gpfs_storage_pool=system
+
+{%- endfor %}
+
+{%- endif %}
+
+{%- endif %}
diff --git a/cinder/files/kilo/cinder.conf.controller.RedHat b/cinder/files/kilo/cinder.conf.controller.RedHat
new file mode 120000
index 0000000..563de88
--- /dev/null
+++ b/cinder/files/kilo/cinder.conf.controller.RedHat
@@ -0,0 +1 @@
+cinder.conf.controller.Debian
\ No newline at end of file
diff --git a/cinder/files/kilo/cinder.conf.volume.Debian b/cinder/files/kilo/cinder.conf.volume.Debian
new file mode 100644
index 0000000..5ec2e6b
--- /dev/null
+++ b/cinder/files/kilo/cinder.conf.volume.Debian
@@ -0,0 +1,486 @@
+{%- from "cinder/map.jinja" import volume with context %}
+
+[DEFAULT]
+rootwrap_config = /etc/cinder/rootwrap.conf
+api_paste_confg = /etc/cinder/api-paste.ini
+
+iscsi_helper = tgtadm
+volume_name_template = volume-%s
+#volume_group = cinder
+
+verbose = True
+
+auth_strategy = keystone
+
+state_path = /var/lib/cinder
+lock_path = /var/lock/cinder
+
+use_syslog=False
+
+glance_num_retries=0
+debug=False
+
+os_region_name={{ volume.identity.region }}
+
+#glance_api_ssl_compression=False
+#glance_api_insecure=False
+
+osapi_volume_listen={{ volume.osapi.host }}
+
+glance_host={{ volume.glance.host }}
+glance_port={{ volume.glance.port }}
+glance_api_version=2
+
+volume_backend_name=DEFAULT
+
+{%- if volume.backend is defined %}
+
+default_volume_type={{ volume.default_volume_type }}
+
+enabled_backends={% for backend_name, backend in volume.get('backend', {}).iteritems() %}{{ backend_name }}{% if not loop.last %},{% endif %}{% endfor %}
+
+{%- else %}
+
+default_volume_type={% for type in volume.get('types', []) %}{% if loop.first %}{{ type.name }}{% endif %}{% endfor %}
+
+enabled_backends={% for type in volume.get('types', []) %}{{ type.backend }}{% if not loop.last %},{% endif %}{% endfor %}{% if volume.storage.engine == "openvstorage" %}{% for vpool in pillar.openvstorage.server.get('vpools', []) %}{{ vpool }}{% if not loop.last %},{% endif %}{% endfor %}{% endif %}
+
+{%- endif %}
+
+#RPC response timeout recommended by Hitachi
+rpc_response_timeout=3600
+
+#Rabbit
+rpc_backend=cinder.openstack.common.rpc.impl_kombu
+control_exchange=cinder
+
+
+volume_clear={{ volume.wipe_method }}
+
+{%- if volume.notification %}
+control_exchange = cinder
+
+{%- if volume.notification.driver is defined %}
+notification_driver = {{ volume.notification.driver }}
+{%- else %}
+notification_driver = messagingv2
+{%- endif %}
+
+{%- if volume.notification.topics is defined %}
+notification_topics = {{ volume.notification.topics }}
+{%- endif %}
+
+{%- endif %}
+
+volume_name_template = volume-%s
+
+#volume_group = vg_cinder_volume
+
+volumes_dir = /var/lib/cinder/volumes
+log_dir=/var/log/cinder
+
+# Use syslog for logging. (boolean value)
+#use_syslog=false
+
+use_syslog=false
+verbose=True
+
+[oslo_concurrency]
+
+lock_path=/var/lock/cinder
+
+[oslo_messaging_rabbit]
+
+rabbit_host = {{ volume.message_queue.host }}
+rabbit_port = {{ volume.message_queue.port }}
+rabbit_userid = {{ volume.message_queue.user }}
+rabbit_password = {{ volume.message_queue.password }}
+rabbit_virtual_host = {{ volume.message_queue.virtual_host }}
+rabbit_ha_queues={{ volume.message_queue.ha_queues }}
+kombu_reconnect_delay=5.0
+
+{%- if volume.identity.get('version', 2) == 2 %}
+
+[keystone_authtoken]
+signing_dir=/tmp/keystone-signing-cinder
+admin_password={{ volume.identity.password }}
+auth_port={{ volume.identity.port }}
+auth_host={{ volume.identity.host }}
+admin_tenant_name={{ volume.identity.tenant }}
+auth_protocol=http
+admin_user=cinder
+signing_dirname=/tmp/keystone-signing-cinder
+auth_uri=http://{{ volume.identity.host }}:{{ volume.identity.port }}
+
+{%- else %}
+
+[keystone_authtoken]
+signing_dir=/tmp/keystone-signing-cinder
+signing_dirname=/tmp/keystone-signing-cinder
+identity_uri = http://{{ volume.identity.host }}:35357/v3
+revocation_cache_time = 10
+auth_section = generic_password
+
+[generic_password]
+auth_plugin = password
+user_domain_id = {{ volume.identity.get('domain', 'default') }}
+project_domain_id = {{ volume.identity.get('domain', 'default') }}
+project_name = {{ volume.identity.tenant }}
+username = {{ volume.identity.user }}
+password = {{ volume.identity.password }}
+auth_uri=http://{{ volume.identity.host }}:5000/v3
+auth_url=http://{{ volume.identity.host }}:35357/v3
+
+{%- endif %}
+
+[database]
+idle_timeout=3600
+max_pool_size=30
+max_retries=-1
+max_overflow=40
+connection = {{ volume.database.engine }}://{{ volume.database.user }}:{{ volume.database.password }}@{{ volume.database.host }}/{{ volume.database.name }}
+
+{# new way #}
+
+{%- if volume.backend is defined %}
+
+{%- for backend_name, backend in volume.get('backend', {}).iteritems() %}
+
+{%- set backend_fragment = "cinder/files/backend/_" + backend.engine + ".conf" %}
+{%- include backend_fragment %}
+
+{%- endfor %}
+
+{%- else %}
+
+{# old way #}
+
+{%- if volume.storage.engine == "storwize" %}
+
+{%- for type in volume.get('types', []) %}
+
+[{{ type.backend }}]
+volume_driver = cinder.volume.drivers.ibm.storwize_svc.StorwizeSVCDriver
+volume_backend_name={{ type.backend }}
+san_ip={{ volume.storage.host }}
+san_ssh_port={{ volume.storage.port }}
+san_login={{ volume.storage.user }}
+san_password={{ volume.storage.password }}
+
+storwize_svc_volpool_name={{ type.pool }}
+#storwize_svc_connection_protocol=iSCSI
+storwize_svc_connection_protocol={{ volume.storage.connection }}
+#storwize_svc_iscsi_chap_enabled=true
+storwize_svc_multihost_enabled={{ volume.storage.multihost }}
+storwize_svc_multipath_enabled={{ volume.storage.multipath }}
+
+{%- endfor %}
+
+{%- endif %}
+
+{%- if volume.storage.engine == "hitachi_vsp" %}
+
+{%- for type in volume.get('types', []) %}
+
+[{{ type.backend }}]
+
+{%- if volume.storage.version == "1.0" %}
+
+volume_driver = cinder.volume.drivers.hitachi.hbsd_fc.HBSDFCDriver
+volume_backend_name={{ type.backend }}
+
+#
+# Options defined in cinder.volume.drivers.hitachi.hbsd_common
+#
+# Serial number of storage system (string value)
+#hitachi_serial_number=<None>
+hitachi_serial_number=86644
+#hitachi_serial_number=355316
+
+# Name of an array unit (string value)
+#hitachi_unit_name=<None>
+#hitachi_unit_name=fiat
+
+# Pool ID of storage system (integer value)
+#hitachi_pool_id=<None>
+hitachi_pool_id=4
+
+# Thin pool ID of storage system (integer value)
+hitachi_thin_pool_id=13
+
+# Default copy method of storage system (string value)
+#hitachi_default_copy_method=FULL
+hitachi_default_copy_method=THIN
+
+# Control port names for HostGroup or iSCSI Target (string
+# value)
+#hitachi_target_ports=<None>
+hitachi_target_ports=CL1-E,CL2-E,CL3-B,CL4-D
+
+# Request for creating HostGroup or iSCSI Target (boolean
+# value)
+hitachi_group_request=True
+#hitachi_group_request=false
+#
+# Options defined in cinder.volume.drivers.hitachi.hbsd_fc
+#
+# Request for FC Zone creating HostGroup (boolean value)
+hitachi_zoning_request=true
+#
+# Options defined in cinder.volume.drivers.hitachi.hbsd_horcm
+#
+# Instance numbers for HORCM (string value)
+#hitachi_horcm_numbers=200,201
+hitachi_horcm_numbers=0,1
+
+# Username of storage system for HORCM (string value)
+#hitachi_horcm_user=<None>
+#hitachi_horcm_user=openstack
+hitachi_horcm_user=root
+
+# Password of storage system for HORCM (string value)
+#hitachi_horcm_password=<None>
+#hitachi_horcm_password=avg2014
+hitachi_horcm_password=X3tT35va
+
+# Add to HORCM configuration (boolean value)
+#hitachi_horcm_add_conf=true
+hitachi_horcm_add_conf=true
+
+#hitachi multipath advice
+use_multipath_for_image_xfer=false
+
+{%- endif %}
+
+{%- if volume.storage.version == "1.3" %}
+
+volume_driver = cinder.volume.drivers.hitachi.hbsd.hbsd_fc.HBSDFCDriver
+volume_backend_name=hitachi_vsp
+
+#
+# Options defined in cinder.volume.drivers.hitachi.hbsd_common
+#
+# Serial number of storage system (string value)
+#hitachi_serial_number=<None>
+#hitachi_serial_number=86644
+hitachi_storage_id=86644
+
+# Pool ID of storage system (integer value)
+#hitachi_pool_id=<None>
+hitachi_pool=4
+
+# Thin pool ID of storage system (integer value)
+hitachi_thin_pool=13
+
+# Default copy method of storage system (string value)
+#hitachi_default_copy_method=FULL
+hitachi_default_copy_method=THIN
+
+# Control port names for HostGroup or iSCSI Target (string
+# value)
+#hitachi_target_ports=<None>
+hitachi_target_ports=CL3-B
+
+hitachi_compute_target_ports=CL1-E,CL2-E,CL3-B,CL4-D
+
+# Range of group number (string value)
+#hitachi_group_range=
+#hitachi_group_range=
+
+# Request for creating HostGroup or iSCSI Target (boolean
+# value)
+#JPA
+hitachi_group_request=True
+#hitachi_group_request=false
+
+# Instance numbers for HORCM (string value)
+#hitachi_horcm_numbers=200,201
+hitachi_horcm_numbers=0,1
+
+# Username of storage system for HORCM (string value)
+#hitachi_horcm_user=<None>
+#hitachi_horcm_user=openstack
+hitachi_horcm_user=root
+
+# Password of storage system for HORCM (string value)
+#hitachi_horcm_password=<None>
+#hitachi_horcm_password=avg2014
+hitachi_horcm_password=X3tT35va
+
+# Add to HORCM configuration (boolean value)
+#hitachi_horcm_add_conf=true
+hitachi_horcm_add_conf=true
+
+#hitachi multipath advice
+use_multipath_for_image_xfer=false
+
+hitachi_storage_cli=HORCM
+
+
+{%- endif %}
+
+{%- endfor %}
+
+{%- endif %}
+
+{%- if volume.storage.engine == "ceph" %}
+
+{%- for type in volume.get('types', []) %}
+
+[{{ type.backend }}]
+volume_backend_name={{ type.backend }}
+volume_driver = cinder.volume.drivers.rbd.RBDDriver
+#
+# Options defined in cinder.volume.drivers.rbd
+#
+# The RADOS pool where rbd volumes are stored (string value)
+#rbd_pool=volumes
+rbd_pool={{ type.pool }}
+
+# The RADOS client name for accessing rbd volumes - only set
+# when using cephx authentication (string value)
+#rbd_user=cinder
+rbd_user={{ volume.storage.user }}
+
+# Path to the ceph configuration file (string value)
+#rbd_ceph_conf=
+rbd_ceph_conf=/etc/ceph/ceph.conf
+
+# Flatten volumes created from snapshots to remove dependency
+# from volume to snapshot (boolean value)
+#rbd_flatten_volume_from_snapshot=false
+
+# The libvirt uuid of the secret for the rbd_user volumes
+# (string value)
+#rbd_secret_uuid=da74ccb7-aa59-1721-a172-0006b1aa4e3e
+rbd_secret_uuid={{ volume.storage.secret_uuid }}
+
+# Directory where temporary image files are stored when the
+# volume driver does not write them directly to the volume.
+# (string value)
+#volume_tmp_dir=<None>
+
+# Maximum number of nested volume clones that are taken before
+# a flatten occurs. Set to 0 to disable cloning. (integer
+# value)
+#rbd_max_clone_depth=5
+
+# Volumes will be chunked into objects of this size (in
+# megabytes). (integer value)
+#rbd_store_chunk_size=4
+
+# Timeout value (in seconds) used when connecting to ceph
+# cluster. If value < 0, no timeout is set and default
+# librados value is used. (integer value)
+#rados_connect_timeout=-1
+
+{%- endfor %}
+
+{%- endif %}
+
+
+{%- if volume.storage.engine == "hp3par" %}
+
+{%- for type in volume.get('types', []) %}
+
+[{{ type.backend }}]
+
+hp3par_api_url={{ volume.storage.url }}
+
+# 3PAR Super user username
+hp3par_username={{ volume.storage.user }}
+
+# 3PAR Super user password
+hp3par_password={{ volume.storage.password }}
+
+# 3PAR CPG to use for volume creation
+hp3par_cpg={{ volume.storage.cpg }}
+
+# IP address of SAN volume for SSH access to the array
+san_ip={{ volume.storage.host }}
+
+# Username for SAN volume for SSH access to the array
+san_login={{ volume.storage.login }}
+
+# Password for SAN volume for SSH access to the array
+san_password={{ volume.storage.password }}
+
+# FIBRE CHANNEL(uncomment the next line to enable the FC driver)
+volume_driver=cinder.volume.drivers.san.hp.hp_3par_fc.HP3PARFCDriver
+
+# iSCSI (uncomment the next line to enable the iSCSI driver and
+# hp3par_iscsi_ips or iscsi_ip_address)
+#volume_driver=cinder.volume.drivers.san.hp.hp_3par_iscsi.HP3PARISCSIDriver
+
+# iSCSI multiple port configuration
+# hp3par_iscsi_ips=10.10.220.253:3261,10.10.222.234
+#hp3par_iscsi_ips=10.10.103.151
+
+# Still available for single port iSCSI configuration
+#iscsi_ip_address=10.10.103.151
+
+## OPTIONAL SETTINGS
+# Enable HTTP debugging to 3PAR
+# hp3par_debug=True
+hp3par_debug={{ volume.storage.debug }}
+
+# Enable CHAP authentication for iSCSI connections.
+hp3par_iscsi_chap_enabled=false
+
+# The CPG to use for Snapshots for volumes. If empty hp3par_cpg will be used.
+hp3par_snap_cpg={{ volume.storage.snapcpg }}
+
+# Time in hours to retain a snapshot. You can't delete it before this expires.
+hp3par_snapshot_retention=2
+
+# Time in hours when a snapshot expires and is deleted. This must be larger than retention.
+hp3par_snapshot_expiration=4
+
+{%- endfor %}
+
+{%- endif %}
+
+{%- if volume.storage.engine == "openvstorage" %}
+
+{%- for vpool in pillar.openvstorage.server.get('vpools', []) %}
+
+[{{ vpool }}]
+volume_driver = cinder.volume.drivers.openvstorage.OVSVolumeDriver
+volume_backend_name={{ vpool }}
+vpool_name={{ vpool }}
+
+{%- endfor %}
+
+{%- endif %}
+
+{%- if volume.storage.engine == "fujitsu" %}
+
+{%- for type in volume.get('types', []) %}
+
+[{{ type.backend }}]
+volume_backend_name={{ type.backend }}
+volume_driver=cinder.volume.drivers.fujitsu.fujitsu_eternus_dx_fc.FJDXFCDriver
+cinder_eternus_config_file=/etc/cinder/cinder_fujitsu_eternus_dx_{{ type.backend }}.xml
+
+{%- endfor %}
+
+{%- endif %}
+
+{%- if volume.storage.engine == "gpfs" %}
+
+{%- for type in volume.get('types', []) %}
+
+[{{ type.backend }}]
+volume_backend_name={{ type.backend }}
+volume_driver = cinder.volume.drivers.ibm.gpfs.GPFSDriver
+gpfs_mount_point_base={{ type.mount_point }}
+#gpfs_mount_point_base=/mnt/gpfs-openstack/cinder/gold
+gpfs_max_clone_depth=3
+gpfs_sparse_volumes=true
+gpfs_storage_pool=system
+
+{%- endfor %}
+
+{%- endif %}
+
+{%- endif %}
diff --git a/cinder/files/kilo/cinder.conf.volume.RedHat b/cinder/files/kilo/cinder.conf.volume.RedHat
new file mode 120000
index 0000000..df997ca
--- /dev/null
+++ b/cinder/files/kilo/cinder.conf.volume.RedHat
@@ -0,0 +1 @@
+cinder.conf.volume.Debian
\ No newline at end of file
diff --git a/cinder/files/kilo/cinder_fujitsu_eternus_dx.xml b/cinder/files/kilo/cinder_fujitsu_eternus_dx.xml
new file mode 100644
index 0000000..afab2d5
--- /dev/null
+++ b/cinder/files/kilo/cinder_fujitsu_eternus_dx.xml
@@ -0,0 +1,22 @@
+{%- if volume.storage.engine is defined %}
+{%- from "cinder/map.jinja" import volume with context -%}
+<?xml version='1.0' encoding='UTF-8'?>
+<FUJITSU>
+<EternusIP>{{ volume.storage.host }}</EternusIP>
+<EternusPort>{{ volume.storage.port }}</EternusPort>
+<EternusUser>{{ volume.storage.user }}</EternusUser>
+<EternusPassword>{{ volume.storage.password }}</EternusPassword>
+<EternusISCSIIP></EternusISCSIIP>
+<EternusPool>{{ volume_type_name }}</EternusPool>
+</FUJITSU>
+{%- else %}
+<?xml version='1.0' encoding='UTF-8'?>
+<FUJITSU>
+<EternusIP>{{ volume.backend.host }}</EternusIP>
+<EternusPort>{{ volume.backend.port }}</EternusPort>
+<EternusUser>{{ volume.backend.user }}</EternusUser>
+<EternusPassword>{{ volume.backend.password }}</EternusPassword>
+<EternusISCSIIP></EternusISCSIIP>
+<EternusPool>{{ backend.pool }}</EternusPool>
+</FUJITSU>
+{%- endif %}
\ No newline at end of file
diff --git a/cinder/files/liberty/api-paste.ini.controller.Debian b/cinder/files/liberty/api-paste.ini.controller.Debian
new file mode 100644
index 0000000..6ffb02e
--- /dev/null
+++ b/cinder/files/liberty/api-paste.ini.controller.Debian
@@ -0,0 +1,70 @@
+#############
+# OpenStack #
+#############
+{%- from "cinder/map.jinja" import controller with context %}
+[composite:osapi_volume]
+use = call:cinder.api:root_app_factory
+/: apiversions
+/v1: openstack_volume_api_v1
+/v2: openstack_volume_api_v2
+
+[composite:openstack_volume_api_v1]
+use = call:cinder.api.middleware.auth:pipeline_factory
+noauth = request_id faultwrap sizelimit osprofiler noauth apiv1
+keystone = request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv1
+keystone_nolimit = request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv1
+
+[composite:openstack_volume_api_v2]
+use = call:cinder.api.middleware.auth:pipeline_factory
+noauth = request_id faultwrap sizelimit osprofiler noauth apiv2
+keystone = request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv2
+keystone_nolimit = request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv2
+
+[filter:request_id]
+paste.filter_factory = oslo_middleware.request_id:RequestId.factory
+
+[filter:faultwrap]
+paste.filter_factory = cinder.api.middleware.fault:FaultWrapper.factory
+
+[filter:osprofiler]
+paste.filter_factory = osprofiler.web:WsgiMiddleware.factory
+hmac_keys = SECRET_KEY
+enabled = yes
+
+[filter:noauth]
+paste.filter_factory = cinder.api.middleware.auth:NoAuthMiddleware.factory
+
+[filter:sizelimit]
+paste.filter_factory = cinder.api.middleware.sizelimit:RequestBodySizeLimiter.factory
+
+[app:apiv1]
+paste.app_factory = cinder.api.v1.router:APIRouter.factory
+
+[app:apiv2]
+paste.app_factory = cinder.api.v2.router:APIRouter.factory
+
+[pipeline:apiversions]
+pipeline = faultwrap osvolumeversionapp
+
+[app:osvolumeversionapp]
+paste.app_factory = cinder.api.versions:Versions.factory
+
+##########
+# Shared #
+##########
+
+[filter:keystonecontext]
+paste.filter_factory = cinder.api.middleware.auth:CinderKeystoneContext.factory
+
+[filter:authtoken]
+paste.filter_factory = keystonemiddleware.auth_token:filter_factory
+service_port=5000
+service_host={{ controller.identity.host }}
+service_protocol=http
+auth_host = {{ controller.identity.host }}
+auth_port = {{ controller.identity.port }}
+auth_protocol = http
+admin_tenant_name = {{ controller.identity.tenant }}
+admin_user = {{ controller.identity.user }}
+admin_password = {{ controller.identity.password }}
+auth_uri=http://{{ controller.identity.host }}:5000/{% if controller.identity.get('version', 2) == 3 %}v3{% endif %}
diff --git a/cinder/files/liberty/api-paste.ini.controller.RedHat b/cinder/files/liberty/api-paste.ini.controller.RedHat
new file mode 120000
index 0000000..341baca
--- /dev/null
+++ b/cinder/files/liberty/api-paste.ini.controller.RedHat
@@ -0,0 +1 @@
+api-paste.ini.controller.Debian
\ No newline at end of file
diff --git a/cinder/files/liberty/api-paste.ini.volume.Debian b/cinder/files/liberty/api-paste.ini.volume.Debian
new file mode 100644
index 0000000..0d7ee9f
--- /dev/null
+++ b/cinder/files/liberty/api-paste.ini.volume.Debian
@@ -0,0 +1,70 @@
+#############
+# OpenStack #
+#############
+{%- from "cinder/map.jinja" import volume with context %}
+[composite:osapi_volume]
+use = call:cinder.api:root_app_factory
+/: apiversions
+/v1: openstack_volume_api_v1
+/v2: openstack_volume_api_v2
+
+[composite:openstack_volume_api_v1]
+use = call:cinder.api.middleware.auth:pipeline_factory
+noauth = request_id faultwrap sizelimit osprofiler noauth apiv1
+keystone = request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv1
+keystone_nolimit = request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv1
+
+[composite:openstack_volume_api_v2]
+use = call:cinder.api.middleware.auth:pipeline_factory
+noauth = request_id faultwrap sizelimit osprofiler noauth apiv2
+keystone = request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv2
+keystone_nolimit = request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv2
+
+[filter:request_id]
+paste.filter_factory = oslo_middleware.request_id:RequestId.factory
+
+[filter:faultwrap]
+paste.filter_factory = cinder.api.middleware.fault:FaultWrapper.factory
+
+[filter:osprofiler]
+paste.filter_factory = osprofiler.web:WsgiMiddleware.factory
+hmac_keys = SECRET_KEY
+enabled = yes
+
+[filter:noauth]
+paste.filter_factory = cinder.api.middleware.auth:NoAuthMiddleware.factory
+
+[filter:sizelimit]
+paste.filter_factory = cinder.api.middleware.sizelimit:RequestBodySizeLimiter.factory
+
+[app:apiv1]
+paste.app_factory = cinder.api.v1.router:APIRouter.factory
+
+[app:apiv2]
+paste.app_factory = cinder.api.v2.router:APIRouter.factory
+
+[pipeline:apiversions]
+pipeline = faultwrap osvolumeversionapp
+
+[app:osvolumeversionapp]
+paste.app_factory = cinder.api.versions:Versions.factory
+
+##########
+# Shared #
+##########
+
+[filter:keystonecontext]
+paste.filter_factory = cinder.api.middleware.auth:CinderKeystoneContext.factory
+
+[filter:authtoken]
+paste.filter_factory = keystonemiddleware.auth_token:filter_factory
+service_port=5000
+service_host={{ volume.identity.host }}
+service_protocol=http
+auth_host = {{ volume.identity.host }}
+auth_port = {{ volume.identity.port }}
+auth_protocol = http
+admin_tenant_name = {{ volume.identity.tenant }}
+admin_user = {{ volume.identity.user }}
+admin_password = {{ volume.identity.password }}
+auth_uri=http://{{ volume.identity.host }}:5000/{% if volume.identity.get('version', 2) == 3 %}v3{% endif %}
diff --git a/cinder/files/liberty/api-paste.ini.volume.RedHat b/cinder/files/liberty/api-paste.ini.volume.RedHat
new file mode 120000
index 0000000..c5204ec
--- /dev/null
+++ b/cinder/files/liberty/api-paste.ini.volume.RedHat
@@ -0,0 +1 @@
+api-paste.ini.volume.Debian
\ No newline at end of file
diff --git a/cinder/files/liberty/cinder.conf.controller.Debian b/cinder/files/liberty/cinder.conf.controller.Debian
new file mode 100644
index 0000000..4625b38
--- /dev/null
+++ b/cinder/files/liberty/cinder.conf.controller.Debian
@@ -0,0 +1,410 @@
+{%- from "cinder/map.jinja" import controller with context %}
+
+[DEFAULT]
+rootwrap_config = /etc/cinder/rootwrap.conf
+api_paste_confg = /etc/cinder/api-paste.ini
+
+iscsi_helper = tgtadm
+volume_name_template = volume-%s
+#volume_group = cinder
+
+verbose = True
+
+auth_strategy = keystone
+
+state_path = /var/lib/cinder
+lock_path = /var/lock/cinder
+
+use_syslog=False
+
+glance_num_retries=0
+debug=False
+
+os_region_name={{ controller.identity.region }}
+
+#glance_api_ssl_compression=False
+#glance_api_insecure=False
+
+osapi_volume_listen={{ controller.osapi.host }}
+
+glance_host={{ controller.glance.host }}
+glance_port={{ controller.glance.port }}
+glance_api_version=2
+
+volume_backend_name=DEFAULT
+
+{%- if controller.backend is defined %}
+
+default_volume_type={{ controller.default_volume_type }}
+
+enabled_backends={% for backend_name, backend in controller.get('backend', {}).iteritems() %}{{ backend_name }}{% if not loop.last %},{% endif %}{% endfor %}
+
+{%- else %}
+
+default_volume_type={% for type in controller.get('types', []) %}{% if loop.first %}{{ type.name }}{% endif %}{% endfor %}
+
+enabled_backends={% for type in controller.get('types', []) %}{{ type.backend }}{% if not loop.last %},{% endif %}{% endfor %}{% if controller.storage.engine == "openvstorage" %}{% for vpool in pillar.openvstorage.server.get('vpools', []) %}{{ vpool }}{% if not loop.last %},{% endif %}{% endfor %}{% endif %}
+
+{%- endif %}
+
+#RPC response timeout recommended by Hitachi
+rpc_response_timeout=3600
+
+#Rabbit
+rpc_backend=cinder.openstack.common.rpc.impl_kombu
+control_exchange=cinder
+
+
+volume_clear={{ controller.wipe_method }}
+
+{%- if controller.notification %}
+control_exchange = cinder
+notification_driver = messagingv2
+{%- endif %}
+
+
+volume_name_template = volume-%s
+
+#volume_group = vg_cinder_volume
+
+volumes_dir = /var/lib/cinder/volumes
+log_dir=/var/log/cinder
+
+# Use syslog for logging. (boolean value)
+#use_syslog=false
+
+use_syslog=false
+verbose=True
+lock_path=/var/lock/cinder
+
+[oslo_concurrency]
+
+lock_path=/var/lock/cinder
+
+[oslo_messaging_rabbit]
+rabbit_host = {{ controller.message_queue.host }}
+rabbit_port = {{ controller.message_queue.port }}
+rabbit_userid = {{ controller.message_queue.user }}
+rabbit_password = {{ controller.message_queue.password }}
+rabbit_virtual_host = {{ controller.message_queue.virtual_host }}
+rabbit_ha_queues={{ controller.message_queue.ha_queues }}
+kombu_reconnect_delay=5.0
+
+{%- if controller.identity.get('version', 2) == 2 %}
+
+[keystone_authtoken]
+signing_dir=/tmp/keystone-signing-cinder
+admin_password={{ controller.identity.password }}
+auth_port={{ controller.identity.port }}
+auth_host={{ controller.identity.host }}
+admin_tenant_name={{ controller.identity.tenant }}
+auth_protocol=http
+admin_user=cinder
+signing_dirname=/tmp/keystone-signing-cinder
+auth_uri=http://{{ controller.identity.host }}:{{ controller.identity.port }}
+
+{%- else %}
+
+[keystone_authtoken]
+signing_dir=/tmp/keystone-signing-cinder
+signing_dirname=/tmp/keystone-signing-cinder
+identity_uri = http://{{ controller.identity.host }}:35357/v3
+revocation_cache_time = 10
+auth_section = generic_password
+
+[generic_password]
+auth_plugin = password
+user_domain_id = {{ controller.identity.get('domain', 'default') }}
+project_domain_id = {{ controller.identity.get('domain', 'default') }}
+project_name = {{ controller.identity.tenant }}
+username = {{ controller.identity.user }}
+password = {{ controller.identity.password }}
+auth_uri=http://{{ controller.identity.host }}:5000/v3
+auth_url=http://{{ controller.identity.host }}:35357/v3
+
+{%- endif %}
+
+[database]
+idle_timeout=3600
+max_pool_size=30
+max_retries=-1
+max_overflow=40
+connection = {{ controller.database.engine }}://{{ controller.database.user }}:{{ controller.database.password }}@{{ controller.database.host }}/{{ controller.database.name }}
+
+{# new way #}
+
+{%- if controller.backend is defined %}
+
+{%- for backend_name, backend in controller.get('backend', {}).iteritems() %}
+
+{%- set backend_fragment = "cinder/files/backend/_" + backend.engine + ".conf" %}
+{%- include backend_fragment %}
+
+{%- endfor %}
+
+{%- else %}
+
+{# old way #}
+
+{%- if controller.storage.engine == "storwize" %}
+
+{%- for type in controller.get('types', []) %}
+
+[{{ type.backend }}]
+volume_driver = cinder.volume.drivers.ibm.storwize_svc.StorwizeSVCDriver
+volume_backend_name={{ type.backend }}
+san_ip={{ controller.storage.host }}
+san_ssh_port={{ controller.storage.port }}
+san_login={{ controller.storage.user }}
+san_password={{ controller.storage.password }}
+
+storwize_svc_volpool_name={{ type.pool }}
+#storwize_svc_connection_protocol=iSCSI
+storwize_svc_connection_protocol={{ controller.storage.connection }}
+#storwize_svc_iscsi_chap_enabled=true
+storwize_svc_multihost_enabled={{ controller.storage.multihost }}
+storwize_svc_multipath_enabled={{ controller.storage.multipath }}
+
+{%- endfor %}
+
+{%- endif %}
+
+{%- if controller.storage.engine == "hitachi_vsp" %}
+
+{%- for type in controller.get('types', []) %}
+
+[{{ type.backend }}]
+volume_driver = cinder.volume.drivers.hitachi.hbsd_fc.HBSDFCDriver
+volume_backend_name={{ type.backend }}
+
+#
+# Options defined in cinder.volume.drivers.hitachi.hbsd_common
+#
+# Serial number of storage system (string value)
+#hitachi_serial_number=<None>
+hitachi_serial_number=86644
+#hitachi_serial_number=355316
+
+# Name of an array unit (string value)
+#hitachi_unit_name=<None>
+#hitachi_unit_name=fiat
+
+# Pool ID of storage system (integer value)
+#hitachi_pool_id=<None>
+hitachi_pool_id=4
+
+# Thin pool ID of storage system (integer value)
+hitachi_thin_pool_id=14
+
+# Range of logical device of storage system (string value)
+#hitachi_ldev_range=<None>
+#hitachi_ldev_range=00:05:00-00:05:FF
+
+# Default copy method of storage system (string value)
+#hitachi_default_copy_method=FULL
+hitachi_default_copy_method=THIN
+
+# Copy speed of storage system (integer value)
+#hitachi_copy_speed=3
+
+# Interval to check copy (integer value)
+#hitachi_copy_check_interval=3
+
+# Interval to check copy asynchronously (integer value)
+#hitachi_async_copy_check_interval=10
+
+# Control port names for HostGroup or iSCSI Target (string
+# value)
+#hitachi_target_ports=<None>
+hitachi_target_ports=CL1-E,CL2-E,CL3-B,CL4-D
+
+# Range of group number (string value)
+#hitachi_group_range=
+#hitachi_group_range=
+
+# Request for creating HostGroup or iSCSI Target (boolean
+# value)
+hitachi_group_request=True
+#hitachi_group_request=false
+#
+# Options defined in cinder.volume.drivers.hitachi.hbsd_fc
+#
+# Request for FC Zone creating HostGroup (boolean value)
+hitachi_zoning_request=true
+#
+# Options defined in cinder.volume.drivers.hitachi.hbsd_horcm
+#
+# Instance numbers for HORCM (string value)
+#hitachi_horcm_numbers=200,201
+hitachi_horcm_numbers=0,1
+
+# Username of storage system for HORCM (string value)
+#hitachi_horcm_user=<None>
+#hitachi_horcm_user=openstack
+hitachi_horcm_user=root
+
+# Password of storage system for HORCM (string value)
+#hitachi_horcm_password=<None>
+#hitachi_horcm_password=avg2014
+hitachi_horcm_password=X3tT35va
+
+# Add to HORCM configuration (boolean value)
+#hitachi_horcm_add_conf=true
+hitachi_horcm_add_conf=true
+
+#hitachi multipath advice
+use_multipath_for_image_xfer=false
+
+{%- endfor %}
+
+{%- endif %}
+
+{%- if controller.storage.engine == "ceph" %}
+
+{%- for type in controller.get('types', []) %}
+
+[{{ type.backend }}]
+
+volume_backend_name={{ type.backend }}
+volume_driver = cinder.volume.drivers.rbd.RBDDriver
+#
+# Options defined in cinder.volume.drivers.rbd
+#
+# The RADOS pool where rbd volumes are stored (string value)
+#rbd_pool=volumes
+rbd_pool={{ type.pool }}
+
+# The RADOS client name for accessing rbd volumes - only set
+# when using cephx authentication (string value)
+#rbd_user=cinder
+rbd_user={{ controller.storage.user }}
+
+# Path to the ceph configuration file (string value)
+#rbd_ceph_conf=
+rbd_ceph_conf=/etc/ceph/ceph.conf
+
+# Flatten volumes created from snapshots to remove dependency
+# from volume to snapshot (boolean value)
+#rbd_flatten_volume_from_snapshot=false
+
+# The libvirt uuid of the secret for the rbd_user volumes
+# (string value)
+#rbd_secret_uuid=da74ccb7-aa59-1721-a172-0006b1aa4e3e
+rbd_secret_uuid={{ controller.storage.secret_uuid }}
+
+# Directory where temporary image files are stored when the
+# volume driver does not write them directly to the volume.
+# (string value)
+#volume_tmp_dir=<None>
+
+# Maximum number of nested volume clones that are taken before
+# a flatten occurs. Set to 0 to disable cloning. (integer
+# value)
+#rbd_max_clone_depth=5
+
+# Volumes will be chunked into objects of this size (in
+# megabytes). (integer value)
+#rbd_store_chunk_size=4
+
+# Timeout value (in seconds) used when connecting to ceph
+# cluster. If value < 0, no timeout is set and default
+# librados value is used. (integer value)
+#rados_connect_timeout=-1
+
+{%- endfor %}
+
+{%- endif %}
+
+
+{%- if controller.storage.engine == "hp3par" %}
+
+{%- for type in controller.get('types', []) %}
+
+[{{ type.backend }}]
+
+hp3par_api_url={{ controller.storage.url }}
+
+# 3PAR Super user username
+hp3par_username={{ controller.storage.user }}
+
+# 3PAR Super user password
+hp3par_password={{ controller.storage.password }}
+
+# 3PAR CPG to use for volume creation
+hp3par_cpg={{ controller.storage.cpg }}
+
+# IP address of SAN controller for SSH access to the array
+san_ip={{ controller.storage.host }}
+
+# Username for SAN controller for SSH access to the array
+san_login={{ controller.storage.login }}
+
+# Password for SAN controller for SSH access to the array
+san_password={{ controller.storage.password }}
+
+# FIBRE CHANNEL(uncomment the next line to enable the FC driver)
+volume_driver=cinder.volume.drivers.san.hp.hp_3par_fc.HP3PARFCDriver
+
+# iSCSI (uncomment the next line to enable the iSCSI driver and
+# hp3par_iscsi_ips or iscsi_ip_address)
+#volume_driver=cinder.volume.drivers.san.hp.hp_3par_iscsi.HP3PARISCSIDriver
+
+# iSCSI multiple port configuration
+# hp3par_iscsi_ips=10.10.220.253:3261,10.10.222.234
+#hp3par_iscsi_ips=10.10.103.151
+
+# Still available for single port iSCSI configuration
+#iscsi_ip_address=10.10.103.151
+
+## OPTIONAL SETTINGS
+# Enable HTTP debugging to 3PAR
+# hp3par_debug=True
+hp3par_debug={{ controller.storage.debug }}
+
+# Enable CHAP authentication for iSCSI connections.
+hp3par_iscsi_chap_enabled=false
+
+# The CPG to use for Snapshots for volumes. If empty hp3par_cpg will be used.
+hp3par_snap_cpg={{ controller.storage.snapcpg }}
+
+# Time in hours to retain a snapshot. You can't delete it before this expires.
+hp3par_snapshot_retention=2
+
+# Time in hours when a snapshot expires and is deleted. This must be larger than retention.
+hp3par_snapshot_expiration=4
+
+{%- endfor %}
+
+{%- endif %}
+
+{%- if controller.storage.engine == "fujitsu" %}
+
+{%- for type in controller.get('types', []) %}
+
+[{{ type.backend }}]
+volume_backend_name={{ type.backend }}
+volume_driver=cinder.volume.drivers.fujitsu.fujitsu_eternus_dx_fc.FJDXFCDriver
+cinder_eternus_config_file=/etc/cinder/cinder_fujitsu_eternus_dx_{{ type.backend }}.xml
+
+{%- endfor %}
+
+{%- endif %}
+
+{%- if controller.storage.engine == "gpfs" %}
+
+{%- for type in controller.get('types', []) %}
+
+[{{ type.backend }}]
+volume_backend_name={{ type.backend }}
+volume_driver = cinder.volume.drivers.ibm.gpfs.GPFSDriver
+gpfs_mount_point_base={{ type.mount_point }}
+#gpfs_mount_point_base=/mnt/gpfs-openstack/cinder/gold
+gpfs_max_clone_depth=3
+gpfs_sparse_volumes=true
+gpfs_storage_pool=system
+
+{%- endfor %}
+
+{%- endif %}
+
+{%- endif %}
diff --git a/cinder/files/liberty/cinder.conf.controller.RedHat b/cinder/files/liberty/cinder.conf.controller.RedHat
new file mode 120000
index 0000000..563de88
--- /dev/null
+++ b/cinder/files/liberty/cinder.conf.controller.RedHat
@@ -0,0 +1 @@
+cinder.conf.controller.Debian
\ No newline at end of file
diff --git a/cinder/files/liberty/cinder.conf.volume.Debian b/cinder/files/liberty/cinder.conf.volume.Debian
new file mode 100644
index 0000000..5ec2e6b
--- /dev/null
+++ b/cinder/files/liberty/cinder.conf.volume.Debian
@@ -0,0 +1,486 @@
+{%- from "cinder/map.jinja" import volume with context %}
+
+[DEFAULT]
+rootwrap_config = /etc/cinder/rootwrap.conf
+api_paste_confg = /etc/cinder/api-paste.ini
+
+iscsi_helper = tgtadm
+volume_name_template = volume-%s
+#volume_group = cinder
+
+verbose = True
+
+auth_strategy = keystone
+
+state_path = /var/lib/cinder
+lock_path = /var/lock/cinder
+
+use_syslog=False
+
+glance_num_retries=0
+debug=False
+
+os_region_name={{ volume.identity.region }}
+
+#glance_api_ssl_compression=False
+#glance_api_insecure=False
+
+osapi_volume_listen={{ volume.osapi.host }}
+
+glance_host={{ volume.glance.host }}
+glance_port={{ volume.glance.port }}
+glance_api_version=2
+
+volume_backend_name=DEFAULT
+
+{%- if volume.backend is defined %}
+
+default_volume_type={{ volume.default_volume_type }}
+
+enabled_backends={% for backend_name, backend in volume.get('backend', {}).iteritems() %}{{ backend_name }}{% if not loop.last %},{% endif %}{% endfor %}
+
+{%- else %}
+
+default_volume_type={% for type in volume.get('types', []) %}{% if loop.first %}{{ type.name }}{% endif %}{% endfor %}
+
+enabled_backends={% for type in volume.get('types', []) %}{{ type.backend }}{% if not loop.last %},{% endif %}{% endfor %}{% if volume.storage.engine == "openvstorage" %}{% for vpool in pillar.openvstorage.server.get('vpools', []) %}{{ vpool }}{% if not loop.last %},{% endif %}{% endfor %}{% endif %}
+
+{%- endif %}
+
+#RPC response timeout recommended by Hitachi
+rpc_response_timeout=3600
+
+#Rabbit
+rpc_backend=cinder.openstack.common.rpc.impl_kombu
+control_exchange=cinder
+
+
+volume_clear={{ volume.wipe_method }}
+
+{%- if volume.notification %}
+control_exchange = cinder
+
+{%- if volume.notification.driver is defined %}
+notification_driver = {{ volume.notification.driver }}
+{%- else %}
+notification_driver = messagingv2
+{%- endif %}
+
+{%- if volume.notification.topics is defined %}
+notification_topics = {{ volume.notification.topics }}
+{%- endif %}
+
+{%- endif %}
+
+volume_name_template = volume-%s
+
+#volume_group = vg_cinder_volume
+
+volumes_dir = /var/lib/cinder/volumes
+log_dir=/var/log/cinder
+
+# Use syslog for logging. (boolean value)
+#use_syslog=false
+
+use_syslog=false
+verbose=True
+
+[oslo_concurrency]
+
+lock_path=/var/lock/cinder
+
+[oslo_messaging_rabbit]
+
+rabbit_host = {{ volume.message_queue.host }}
+rabbit_port = {{ volume.message_queue.port }}
+rabbit_userid = {{ volume.message_queue.user }}
+rabbit_password = {{ volume.message_queue.password }}
+rabbit_virtual_host = {{ volume.message_queue.virtual_host }}
+rabbit_ha_queues={{ volume.message_queue.ha_queues }}
+kombu_reconnect_delay=5.0
+
+{%- if volume.identity.get('version', 2) == 2 %}
+
+[keystone_authtoken]
+signing_dir=/tmp/keystone-signing-cinder
+admin_password={{ volume.identity.password }}
+auth_port={{ volume.identity.port }}
+auth_host={{ volume.identity.host }}
+admin_tenant_name={{ volume.identity.tenant }}
+auth_protocol=http
+admin_user=cinder
+signing_dirname=/tmp/keystone-signing-cinder
+auth_uri=http://{{ volume.identity.host }}:{{ volume.identity.port }}
+
+{%- else %}
+
+[keystone_authtoken]
+signing_dir=/tmp/keystone-signing-cinder
+signing_dirname=/tmp/keystone-signing-cinder
+identity_uri = http://{{ volume.identity.host }}:35357/v3
+revocation_cache_time = 10
+auth_section = generic_password
+
+[generic_password]
+auth_plugin = password
+user_domain_id = {{ volume.identity.get('domain', 'default') }}
+project_domain_id = {{ volume.identity.get('domain', 'default') }}
+project_name = {{ volume.identity.tenant }}
+username = {{ volume.identity.user }}
+password = {{ volume.identity.password }}
+auth_uri=http://{{ volume.identity.host }}:5000/v3
+auth_url=http://{{ volume.identity.host }}:35357/v3
+
+{%- endif %}
+
+[database]
+idle_timeout=3600
+max_pool_size=30
+max_retries=-1
+max_overflow=40
+connection = {{ volume.database.engine }}://{{ volume.database.user }}:{{ volume.database.password }}@{{ volume.database.host }}/{{ volume.database.name }}
+
+{# new way #}
+
+{%- if volume.backend is defined %}
+
+{%- for backend_name, backend in volume.get('backend', {}).iteritems() %}
+
+{%- set backend_fragment = "cinder/files/backend/_" + backend.engine + ".conf" %}
+{%- include backend_fragment %}
+
+{%- endfor %}
+
+{%- else %}
+
+{# old way #}
+
+{%- if volume.storage.engine == "storwize" %}
+
+{%- for type in volume.get('types', []) %}
+
+[{{ type.backend }}]
+volume_driver = cinder.volume.drivers.ibm.storwize_svc.StorwizeSVCDriver
+volume_backend_name={{ type.backend }}
+san_ip={{ volume.storage.host }}
+san_ssh_port={{ volume.storage.port }}
+san_login={{ volume.storage.user }}
+san_password={{ volume.storage.password }}
+
+storwize_svc_volpool_name={{ type.pool }}
+#storwize_svc_connection_protocol=iSCSI
+storwize_svc_connection_protocol={{ volume.storage.connection }}
+#storwize_svc_iscsi_chap_enabled=true
+storwize_svc_multihost_enabled={{ volume.storage.multihost }}
+storwize_svc_multipath_enabled={{ volume.storage.multipath }}
+
+{%- endfor %}
+
+{%- endif %}
+
+{%- if volume.storage.engine == "hitachi_vsp" %}
+
+{%- for type in volume.get('types', []) %}
+
+[{{ type.backend }}]
+
+{%- if volume.storage.version == "1.0" %}
+
+volume_driver = cinder.volume.drivers.hitachi.hbsd_fc.HBSDFCDriver
+volume_backend_name={{ type.backend }}
+
+#
+# Options defined in cinder.volume.drivers.hitachi.hbsd_common
+#
+# Serial number of storage system (string value)
+#hitachi_serial_number=<None>
+hitachi_serial_number=86644
+#hitachi_serial_number=355316
+
+# Name of an array unit (string value)
+#hitachi_unit_name=<None>
+#hitachi_unit_name=fiat
+
+# Pool ID of storage system (integer value)
+#hitachi_pool_id=<None>
+hitachi_pool_id=4
+
+# Thin pool ID of storage system (integer value)
+hitachi_thin_pool_id=13
+
+# Default copy method of storage system (string value)
+#hitachi_default_copy_method=FULL
+hitachi_default_copy_method=THIN
+
+# Control port names for HostGroup or iSCSI Target (string
+# value)
+#hitachi_target_ports=<None>
+hitachi_target_ports=CL1-E,CL2-E,CL3-B,CL4-D
+
+# Request for creating HostGroup or iSCSI Target (boolean
+# value)
+hitachi_group_request=True
+#hitachi_group_request=false
+#
+# Options defined in cinder.volume.drivers.hitachi.hbsd_fc
+#
+# Request for FC Zone creating HostGroup (boolean value)
+hitachi_zoning_request=true
+#
+# Options defined in cinder.volume.drivers.hitachi.hbsd_horcm
+#
+# Instance numbers for HORCM (string value)
+#hitachi_horcm_numbers=200,201
+hitachi_horcm_numbers=0,1
+
+# Username of storage system for HORCM (string value)
+#hitachi_horcm_user=<None>
+#hitachi_horcm_user=openstack
+hitachi_horcm_user=root
+
+# Password of storage system for HORCM (string value)
+#hitachi_horcm_password=<None>
+#hitachi_horcm_password=avg2014
+hitachi_horcm_password=X3tT35va
+
+# Add to HORCM configuration (boolean value)
+#hitachi_horcm_add_conf=true
+hitachi_horcm_add_conf=true
+
+#hitachi multipath advice
+use_multipath_for_image_xfer=false
+
+{%- endif %}
+
+{%- if volume.storage.version == "1.3" %}
+
+volume_driver = cinder.volume.drivers.hitachi.hbsd.hbsd_fc.HBSDFCDriver
+volume_backend_name=hitachi_vsp
+
+#
+# Options defined in cinder.volume.drivers.hitachi.hbsd_common
+#
+# Serial number of storage system (string value)
+#hitachi_serial_number=<None>
+#hitachi_serial_number=86644
+hitachi_storage_id=86644
+
+# Pool ID of storage system (integer value)
+#hitachi_pool_id=<None>
+hitachi_pool=4
+
+# Thin pool ID of storage system (integer value)
+hitachi_thin_pool=13
+
+# Default copy method of storage system (string value)
+#hitachi_default_copy_method=FULL
+hitachi_default_copy_method=THIN
+
+# Control port names for HostGroup or iSCSI Target (string
+# value)
+#hitachi_target_ports=<None>
+hitachi_target_ports=CL3-B
+
+hitachi_compute_target_ports=CL1-E,CL2-E,CL3-B,CL4-D
+
+# Range of group number (string value)
+#hitachi_group_range=
+#hitachi_group_range=
+
+# Request for creating HostGroup or iSCSI Target (boolean
+# value)
+#JPA
+hitachi_group_request=True
+#hitachi_group_request=false
+
+# Instance numbers for HORCM (string value)
+#hitachi_horcm_numbers=200,201
+hitachi_horcm_numbers=0,1
+
+# Username of storage system for HORCM (string value)
+#hitachi_horcm_user=<None>
+#hitachi_horcm_user=openstack
+hitachi_horcm_user=root
+
+# Password of storage system for HORCM (string value)
+#hitachi_horcm_password=<None>
+#hitachi_horcm_password=avg2014
+hitachi_horcm_password=X3tT35va
+
+# Add to HORCM configuration (boolean value)
+#hitachi_horcm_add_conf=true
+hitachi_horcm_add_conf=true
+
+#hitachi multipath advice
+use_multipath_for_image_xfer=false
+
+hitachi_storage_cli=HORCM
+
+
+{%- endif %}
+
+{%- endfor %}
+
+{%- endif %}
+
+{%- if volume.storage.engine == "ceph" %}
+
+{%- for type in volume.get('types', []) %}
+
+[{{ type.backend }}]
+volume_backend_name={{ type.backend }}
+volume_driver = cinder.volume.drivers.rbd.RBDDriver
+#
+# Options defined in cinder.volume.drivers.rbd
+#
+# The RADOS pool where rbd volumes are stored (string value)
+#rbd_pool=volumes
+rbd_pool={{ type.pool }}
+
+# The RADOS client name for accessing rbd volumes - only set
+# when using cephx authentication (string value)
+#rbd_user=cinder
+rbd_user={{ volume.storage.user }}
+
+# Path to the ceph configuration file (string value)
+#rbd_ceph_conf=
+rbd_ceph_conf=/etc/ceph/ceph.conf
+
+# Flatten volumes created from snapshots to remove dependency
+# from volume to snapshot (boolean value)
+#rbd_flatten_volume_from_snapshot=false
+
+# The libvirt uuid of the secret for the rbd_user volumes
+# (string value)
+#rbd_secret_uuid=da74ccb7-aa59-1721-a172-0006b1aa4e3e
+rbd_secret_uuid={{ volume.storage.secret_uuid }}
+
+# Directory where temporary image files are stored when the
+# volume driver does not write them directly to the volume.
+# (string value)
+#volume_tmp_dir=<None>
+
+# Maximum number of nested volume clones that are taken before
+# a flatten occurs. Set to 0 to disable cloning. (integer
+# value)
+#rbd_max_clone_depth=5
+
+# Volumes will be chunked into objects of this size (in
+# megabytes). (integer value)
+#rbd_store_chunk_size=4
+
+# Timeout value (in seconds) used when connecting to ceph
+# cluster. If value < 0, no timeout is set and default
+# librados value is used. (integer value)
+#rados_connect_timeout=-1
+
+{%- endfor %}
+
+{%- endif %}
+
+
+{%- if volume.storage.engine == "hp3par" %}
+
+{%- for type in volume.get('types', []) %}
+
+[{{ type.backend }}]
+
+hp3par_api_url={{ volume.storage.url }}
+
+# 3PAR Super user username
+hp3par_username={{ volume.storage.user }}
+
+# 3PAR Super user password
+hp3par_password={{ volume.storage.password }}
+
+# 3PAR CPG to use for volume creation
+hp3par_cpg={{ volume.storage.cpg }}
+
+# IP address of SAN volume for SSH access to the array
+san_ip={{ volume.storage.host }}
+
+# Username for SAN volume for SSH access to the array
+san_login={{ volume.storage.login }}
+
+# Password for SAN volume for SSH access to the array
+san_password={{ volume.storage.password }}
+
+# FIBRE CHANNEL(uncomment the next line to enable the FC driver)
+volume_driver=cinder.volume.drivers.san.hp.hp_3par_fc.HP3PARFCDriver
+
+# iSCSI (uncomment the next line to enable the iSCSI driver and
+# hp3par_iscsi_ips or iscsi_ip_address)
+#volume_driver=cinder.volume.drivers.san.hp.hp_3par_iscsi.HP3PARISCSIDriver
+
+# iSCSI multiple port configuration
+# hp3par_iscsi_ips=10.10.220.253:3261,10.10.222.234
+#hp3par_iscsi_ips=10.10.103.151
+
+# Still available for single port iSCSI configuration
+#iscsi_ip_address=10.10.103.151
+
+## OPTIONAL SETTINGS
+# Enable HTTP debugging to 3PAR
+# hp3par_debug=True
+hp3par_debug={{ volume.storage.debug }}
+
+# Enable CHAP authentication for iSCSI connections.
+hp3par_iscsi_chap_enabled=false
+
+# The CPG to use for Snapshots for volumes. If empty hp3par_cpg will be used.
+hp3par_snap_cpg={{ volume.storage.snapcpg }}
+
+# Time in hours to retain a snapshot. You can't delete it before this expires.
+hp3par_snapshot_retention=2
+
+# Time in hours when a snapshot expires and is deleted. This must be larger than retention.
+hp3par_snapshot_expiration=4
+
+{%- endfor %}
+
+{%- endif %}
+
+{%- if volume.storage.engine == "openvstorage" %}
+
+{%- for vpool in pillar.openvstorage.server.get('vpools', []) %}
+
+[{{ vpool }}]
+volume_driver = cinder.volume.drivers.openvstorage.OVSVolumeDriver
+volume_backend_name={{ vpool }}
+vpool_name={{ vpool }}
+
+{%- endfor %}
+
+{%- endif %}
+
+{%- if volume.storage.engine == "fujitsu" %}
+
+{%- for type in volume.get('types', []) %}
+
+[{{ type.backend }}]
+volume_backend_name={{ type.backend }}
+volume_driver=cinder.volume.drivers.fujitsu.fujitsu_eternus_dx_fc.FJDXFCDriver
+cinder_eternus_config_file=/etc/cinder/cinder_fujitsu_eternus_dx_{{ type.backend }}.xml
+
+{%- endfor %}
+
+{%- endif %}
+
+{%- if volume.storage.engine == "gpfs" %}
+
+{%- for type in volume.get('types', []) %}
+
+[{{ type.backend }}]
+volume_backend_name={{ type.backend }}
+volume_driver = cinder.volume.drivers.ibm.gpfs.GPFSDriver
+gpfs_mount_point_base={{ type.mount_point }}
+#gpfs_mount_point_base=/mnt/gpfs-openstack/cinder/gold
+gpfs_max_clone_depth=3
+gpfs_sparse_volumes=true
+gpfs_storage_pool=system
+
+{%- endfor %}
+
+{%- endif %}
+
+{%- endif %}
diff --git a/cinder/files/liberty/cinder.conf.volume.RedHat b/cinder/files/liberty/cinder.conf.volume.RedHat
new file mode 120000
index 0000000..df997ca
--- /dev/null
+++ b/cinder/files/liberty/cinder.conf.volume.RedHat
@@ -0,0 +1 @@
+cinder.conf.volume.Debian
\ No newline at end of file
diff --git a/cinder/files/liberty/cinder_fujitsu_eternus_dx.xml b/cinder/files/liberty/cinder_fujitsu_eternus_dx.xml
new file mode 100644
index 0000000..1af35fa
--- /dev/null
+++ b/cinder/files/liberty/cinder_fujitsu_eternus_dx.xml
@@ -0,0 +1,22 @@
+{%- if volume.storage.engine is defined %}
+{%- from "cinder/map.jinja" import volume with context -%}
+<?xml version='1.0' encoding='UTF-8'?>
+<FUJITSU>
+<EternusIP>{{ volume.storage.host }}</EternusIP>
+<EternusPort>{{ volume.storage.port }}</EternusPort>
+<EternusUser>{{ volume.storage.user }}</EternusUser>
+<EternusPassword>{{ volume.storage.password }}</EternusPassword>
+<EternusISCSIIP></EternusISCSIIP>
+<EternusPool>{{ volume_type_name }}</EternusPool>
+</FUJITSU>
+{%- else %}
+<?xml version='1.0' encoding='UTF-8'?>
+<FUJITSU>
+<EternusIP>{{ volume.backend.host }}</EternusIP>
+<EternusPort>{{ volume.backend.port }}</EternusPort>
+<EternusUser>{{ volume.backend.user }}</EternusUser>
+<EternusPassword>{{ volume.backend.password }}</EternusPassword>
+<EternusISCSIIP></EternusISCSIIP>
+<EternusPool>{{ backend.pool }}</EternusPool>
+</FUJITSU>
+{%- endif %}
diff --git a/cinder/files/mitaka/api-paste.ini.controller.Debian b/cinder/files/mitaka/api-paste.ini.controller.Debian
new file mode 100644
index 0000000..5914d81
--- /dev/null
+++ b/cinder/files/mitaka/api-paste.ini.controller.Debian
@@ -0,0 +1,75 @@
+#############
+# OpenStack #
+#############
+
+[composite:osapi_volume]
+use = call:cinder.api:root_app_factory
+/: apiversions
+/v1: openstack_volume_api_v1
+/v2: openstack_volume_api_v2
+/v3: openstack_volume_api_v3
+
+[composite:openstack_volume_api_v1]
+use = call:cinder.api.middleware.auth:pipeline_factory
+noauth = cors request_id faultwrap sizelimit osprofiler noauth apiv1
+keystone = cors request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv1
+keystone_nolimit = cors request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv1
+
+[composite:openstack_volume_api_v2]
+use = call:cinder.api.middleware.auth:pipeline_factory
+noauth = cors request_id faultwrap sizelimit osprofiler noauth apiv2
+keystone = cors request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv2
+keystone_nolimit = cors request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv2
+
+[composite:openstack_volume_api_v3]
+use = call:cinder.api.middleware.auth:pipeline_factory
+noauth = cors request_id faultwrap sizelimit osprofiler noauth apiv3
+keystone = cors request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv3
+keystone_nolimit = cors request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv3
+
+[filter:request_id]
+paste.filter_factory = oslo_middleware.request_id:RequestId.factory
+
+[filter:cors]
+paste.filter_factory = oslo_middleware.cors:filter_factory
+oslo_config_project = cinder
+latent_allow_headers = X-Auth-Token, X-Identity-Status, X-Roles, X-Service-Catalog, X-User-Id, X-Tenant-Id, X-OpenStack-Request-ID, X-Trace-Info, X-Trace-HMAC, OpenStack-Volume-microversion
+latent_expose_headers = X-Auth-Token, X-Subject-Token, X-Service-Token, X-OpenStack-Request-ID, OpenStack-Volume-microversion
+latent_allow_methods = GET, PUT, POST, DELETE, PATCH
+
+[filter:faultwrap]
+paste.filter_factory = cinder.api.middleware.fault:FaultWrapper.factory
+
+[filter:osprofiler]
+paste.filter_factory = osprofiler.web:WsgiMiddleware.factory
+
+[filter:noauth]
+paste.filter_factory = cinder.api.middleware.auth:NoAuthMiddleware.factory
+
+[filter:sizelimit]
+paste.filter_factory = cinder.api.middleware.sizelimit:RequestBodySizeLimiter.factory
+
+[app:apiv1]
+paste.app_factory = cinder.api.v1.router:APIRouter.factory
+
+[app:apiv2]
+paste.app_factory = cinder.api.v2.router:APIRouter.factory
+
+[app:apiv3]
+paste.app_factory = cinder.api.v3.router:APIRouter.factory
+
+[pipeline:apiversions]
+pipeline = cors faultwrap osvolumeversionapp
+
+[app:osvolumeversionapp]
+paste.app_factory = cinder.api.versions:Versions.factory
+
+##########
+# Shared #
+##########
+
+[filter:keystonecontext]
+paste.filter_factory = cinder.api.middleware.auth:CinderKeystoneContext.factory
+
+[filter:authtoken]
+paste.filter_factory = keystonemiddleware.auth_token:filter_factory
diff --git a/cinder/files/mitaka/api-paste.ini.controller.RedHat b/cinder/files/mitaka/api-paste.ini.controller.RedHat
new file mode 120000
index 0000000..341baca
--- /dev/null
+++ b/cinder/files/mitaka/api-paste.ini.controller.RedHat
@@ -0,0 +1 @@
+api-paste.ini.controller.Debian
\ No newline at end of file
diff --git a/cinder/files/mitaka/api-paste.ini.volume.Debian b/cinder/files/mitaka/api-paste.ini.volume.Debian
new file mode 100644
index 0000000..5914d81
--- /dev/null
+++ b/cinder/files/mitaka/api-paste.ini.volume.Debian
@@ -0,0 +1,75 @@
+#############
+# OpenStack #
+#############
+
+[composite:osapi_volume]
+use = call:cinder.api:root_app_factory
+/: apiversions
+/v1: openstack_volume_api_v1
+/v2: openstack_volume_api_v2
+/v3: openstack_volume_api_v3
+
+[composite:openstack_volume_api_v1]
+use = call:cinder.api.middleware.auth:pipeline_factory
+noauth = cors request_id faultwrap sizelimit osprofiler noauth apiv1
+keystone = cors request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv1
+keystone_nolimit = cors request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv1
+
+[composite:openstack_volume_api_v2]
+use = call:cinder.api.middleware.auth:pipeline_factory
+noauth = cors request_id faultwrap sizelimit osprofiler noauth apiv2
+keystone = cors request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv2
+keystone_nolimit = cors request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv2
+
+[composite:openstack_volume_api_v3]
+use = call:cinder.api.middleware.auth:pipeline_factory
+noauth = cors request_id faultwrap sizelimit osprofiler noauth apiv3
+keystone = cors request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv3
+keystone_nolimit = cors request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv3
+
+[filter:request_id]
+paste.filter_factory = oslo_middleware.request_id:RequestId.factory
+
+[filter:cors]
+paste.filter_factory = oslo_middleware.cors:filter_factory
+oslo_config_project = cinder
+latent_allow_headers = X-Auth-Token, X-Identity-Status, X-Roles, X-Service-Catalog, X-User-Id, X-Tenant-Id, X-OpenStack-Request-ID, X-Trace-Info, X-Trace-HMAC, OpenStack-Volume-microversion
+latent_expose_headers = X-Auth-Token, X-Subject-Token, X-Service-Token, X-OpenStack-Request-ID, OpenStack-Volume-microversion
+latent_allow_methods = GET, PUT, POST, DELETE, PATCH
+
+[filter:faultwrap]
+paste.filter_factory = cinder.api.middleware.fault:FaultWrapper.factory
+
+[filter:osprofiler]
+paste.filter_factory = osprofiler.web:WsgiMiddleware.factory
+
+[filter:noauth]
+paste.filter_factory = cinder.api.middleware.auth:NoAuthMiddleware.factory
+
+[filter:sizelimit]
+paste.filter_factory = cinder.api.middleware.sizelimit:RequestBodySizeLimiter.factory
+
+[app:apiv1]
+paste.app_factory = cinder.api.v1.router:APIRouter.factory
+
+[app:apiv2]
+paste.app_factory = cinder.api.v2.router:APIRouter.factory
+
+[app:apiv3]
+paste.app_factory = cinder.api.v3.router:APIRouter.factory
+
+[pipeline:apiversions]
+pipeline = cors faultwrap osvolumeversionapp
+
+[app:osvolumeversionapp]
+paste.app_factory = cinder.api.versions:Versions.factory
+
+##########
+# Shared #
+##########
+
+[filter:keystonecontext]
+paste.filter_factory = cinder.api.middleware.auth:CinderKeystoneContext.factory
+
+[filter:authtoken]
+paste.filter_factory = keystonemiddleware.auth_token:filter_factory
diff --git a/cinder/files/mitaka/api-paste.ini.volume.RedHat b/cinder/files/mitaka/api-paste.ini.volume.RedHat
new file mode 120000
index 0000000..c5204ec
--- /dev/null
+++ b/cinder/files/mitaka/api-paste.ini.volume.RedHat
@@ -0,0 +1 @@
+api-paste.ini.volume.Debian
\ No newline at end of file
diff --git a/cinder/files/mitaka/cinder.conf.controller.Debian b/cinder/files/mitaka/cinder.conf.controller.Debian
new file mode 100644
index 0000000..7f508fe
--- /dev/null
+++ b/cinder/files/mitaka/cinder.conf.controller.Debian
@@ -0,0 +1,118 @@
+{%- from "cinder/map.jinja" import controller with context %}
+
+[DEFAULT]
+rootwrap_config = /etc/cinder/rootwrap.conf
+api_paste_confg = /etc/cinder/api-paste.ini
+
+iscsi_helper = tgtadm
+volume_name_template = volume-%s
+#volume_group = cinder
+
+verbose = True
+
+auth_strategy = keystone
+
+state_path = /var/lib/cinder
+
+use_syslog=False
+
+glance_num_retries=0
+debug=False
+
+os_region_name={{ controller.identity.region }}
+
+#glance_api_ssl_compression=False
+#glance_api_insecure=False
+
+osapi_volume_listen={{ controller.osapi.host }}
+
+glance_host={{ controller.glance.host }}
+glance_port={{ controller.glance.port }}
+glance_api_version=2
+
+volume_backend_name=DEFAULT
+
+{%- if controller.backend is defined %}
+
+default_volume_type={{ controller.default_volume_type }}
+
+enabled_backends={% for backend_name, backend in controller.get('backend', {}).iteritems() %}{{ backend_name }}{% if not loop.last %},{% endif %}{% endfor %}
+
+{%- endif %}
+
+#RPC response timeout recommended by Hitachi
+rpc_response_timeout=3600
+
+#Rabbit
+rpc_backend=cinder.openstack.common.rpc.impl_kombu
+control_exchange=cinder
+
+
+volume_clear={{ controller.wipe_method }}
+
+
+
+volume_name_template = volume-%s
+
+#volume_group = vg_cinder_volume
+
+volumes_dir = /var/lib/cinder/volumes
+log_dir=/var/log/cinder
+
+# Use syslog for logging. (boolean value)
+#use_syslog=false
+
+use_syslog=false
+verbose=True
+lock_path=/var/lock/cinder
+
+[oslo_messaging_notifications]
+{%- if controller.notification %}
+driver = messagingv2
+{%- endif %}
+
+[oslo_concurrency]
+
+lock_path=/var/lock/cinder
+
+[oslo_messaging_rabbit]
+rabbit_host = {{ controller.message_queue.host }}
+rabbit_port = {{ controller.message_queue.port }}
+rabbit_userid = {{ controller.message_queue.user }}
+rabbit_password = {{ controller.message_queue.password }}
+rabbit_virtual_host = {{ controller.message_queue.virtual_host }}
+rabbit_ha_queues={{ controller.message_queue.ha_queues }}
+kombu_reconnect_delay=5.0
+
+[keystone_authtoken]
+signing_dir=/tmp/keystone-signing-cinder
+revocation_cache_time = 10
+auth_type = password
+user_domain_id = {{ controller.identity.get('domain', 'default') }}
+project_domain_id = {{ controller.identity.get('domain', 'default') }}
+project_name = {{ controller.identity.tenant }}
+username = {{ controller.identity.user }}
+password = {{ controller.identity.password }}
+auth_uri=http://{{ controller.identity.host }}:5000
+auth_url=http://{{ controller.identity.host }}:35357
+{%- if controller.cache is defined %}
+memcached_servers={%- for member in controller.cache.members %}{{ member.host }}:11211{% if not loop.last %},{% endif %}{%- endfor %}
+{%- endif %}
+
+[database]
+idle_timeout=3600
+max_pool_size=30
+max_retries=-1
+max_overflow=40
+connection = {{ controller.database.engine }}+pymysql://{{ controller.database.user }}:{{ controller.database.password }}@{{ controller.database.host }}/{{ controller.database.name }}
+
+{%- if controller.backend is defined %}
+
+{%- for backend_name, backend in controller.get('backend', {}).iteritems() %}
+
+{%- set backend_fragment = "cinder/files/backend/_" + backend.engine + ".conf" %}
+{%- include backend_fragment %}
+
+{%- endfor %}
+
+{%- endif %}
diff --git a/cinder/files/mitaka/cinder.conf.controller.RedHat b/cinder/files/mitaka/cinder.conf.controller.RedHat
new file mode 120000
index 0000000..563de88
--- /dev/null
+++ b/cinder/files/mitaka/cinder.conf.controller.RedHat
@@ -0,0 +1 @@
+cinder.conf.controller.Debian
\ No newline at end of file
diff --git a/cinder/files/mitaka/cinder.conf.volume.Debian b/cinder/files/mitaka/cinder.conf.volume.Debian
new file mode 100644
index 0000000..91faf84
--- /dev/null
+++ b/cinder/files/mitaka/cinder.conf.volume.Debian
@@ -0,0 +1,117 @@
+{%- from "cinder/map.jinja" import volume with context %}
+
+[DEFAULT]
+rootwrap_config = /etc/cinder/rootwrap.conf
+api_paste_confg = /etc/cinder/api-paste.ini
+
+iscsi_helper = tgtadm
+volume_name_template = volume-%s
+#volume_group = cinder
+
+verbose = True
+
+auth_strategy = keystone
+
+state_path = /var/lib/cinder
+
+use_syslog=False
+
+glance_num_retries=0
+debug=False
+
+os_region_name={{ volume.identity.region }}
+
+#glance_api_ssl_compression=False
+#glance_api_insecure=False
+
+osapi_volume_listen={{ volume.osapi.host }}
+
+glance_host={{ volume.glance.host }}
+glance_port={{ volume.glance.port }}
+glance_api_version=2
+
+volume_backend_name=DEFAULT
+
+{%- if volume.backend is defined %}
+
+default_volume_type={{ volume.default_volume_type }}
+
+enabled_backends={% for backend_name, backend in volume.get('backend', {}).iteritems() %}{{ backend_name }}{% if not loop.last %},{% endif %}{% endfor %}
+
+{%- endif %}
+
+#RPC response timeout recommended by Hitachi
+rpc_response_timeout=3600
+
+#Rabbit
+rpc_backend=cinder.openstack.common.rpc.impl_kombu
+control_exchange=cinder
+
+
+volume_clear={{ volume.wipe_method }}
+
+
+volume_name_template = volume-%s
+
+#volume_group = vg_cinder_volume
+
+volumes_dir = /var/lib/cinder/volumes
+log_dir=/var/log/cinder
+
+# Use syslog for logging. (boolean value)
+#use_syslog=false
+
+use_syslog=false
+verbose=True
+
+[oslo_messaging_notifications]
+{%- if volume.notification %}
+driver = messagingv2
+{%- endif %}
+
+[oslo_concurrency]
+
+lock_path=/var/lock/cinder
+
+[oslo_messaging_rabbit]
+
+rabbit_host = {{ volume.message_queue.host }}
+rabbit_port = {{ volume.message_queue.port }}
+rabbit_userid = {{ volume.message_queue.user }}
+rabbit_password = {{ volume.message_queue.password }}
+rabbit_virtual_host = {{ volume.message_queue.virtual_host }}
+rabbit_ha_queues={{ volume.message_queue.ha_queues }}
+kombu_reconnect_delay=5.0
+
+[keystone_authtoken]
+signing_dir=/tmp/keystone-signing-cinder
+revocation_cache_time = 10
+auth_type = password
+user_domain_id = {{ volume.identity.get('domain', 'default') }}
+project_domain_id = {{ volume.identity.get('domain', 'default') }}
+project_name = {{ volume.identity.tenant }}
+username = {{ volume.identity.user }}
+password = {{ volume.identity.password }}
+auth_uri=http://{{ volume.identity.host }}:5000/v3
+auth_url=http://{{ volume.identity.host }}:35357/v3
+{%- if volume.cache is defined %}
+memcached_servers={%- for member in volume.cache.members %}{{ member.host }}:11211{% if not loop.last %},{% endif %}{%- endfor %}
+{%- endif %}
+
+[database]
+idle_timeout=3600
+max_pool_size=30
+max_retries=-1
+max_overflow=40
+connection = {{ volume.database.engine }}+pymysql://{{ volume.database.user }}:{{ volume.database.password }}@{{ volume.database.host }}/{{ volume.database.name }}
+
+{%- if volume.backend is defined %}
+
+{%- for backend_name, backend in volume.get('backend', {}).iteritems() %}
+
+{%- set backend_fragment = "cinder/files/backend/_" + backend.engine + ".conf" %}
+{%- include backend_fragment %}
+
+{%- endfor %}
+
+{%- endif %}
\ No newline at end of file
diff --git a/cinder/files/mitaka/cinder.conf.volume.RedHat b/cinder/files/mitaka/cinder.conf.volume.RedHat
new file mode 120000
index 0000000..df997ca
--- /dev/null
+++ b/cinder/files/mitaka/cinder.conf.volume.RedHat
@@ -0,0 +1 @@
+cinder.conf.volume.Debian
\ No newline at end of file
diff --git a/cinder/init.sls b/cinder/init.sls
new file mode 100644
index 0000000..967b134
--- /dev/null
+++ b/cinder/init.sls
@@ -0,0 +1,8 @@
+
+include:
+{% if pillar.cinder.controller is defined %}
+- cinder.controller
+{% endif %}
+{% if pillar.cinder.volume is defined %}
+- cinder.volume
+{% endif %}
\ No newline at end of file
diff --git a/cinder/map.jinja b/cinder/map.jinja
new file mode 100644
index 0000000..d2282cb
--- /dev/null
+++ b/cinder/map.jinja
@@ -0,0 +1,30 @@
+
+{% set controller = salt['grains.filter_by']({
+    'Debian': {
+        'pkgs': ['cinder-api', 'cinder-scheduler', 'lvm2', 'python-cinder'],
+        'services': ['cinder-api', 'cinder-scheduler'],
+        'wipe_method': 'none',
+        'notification': False,
+    },
+    'RedHat': {
+        'pkgs': ['openstack-cinder', 'python-cinder', 'lvm2'],
+        'services': ['openstack-cinder-api', 'openstack-cinder-scheduler'],
+        'wipe_method': 'none',
+        'notification': False,
+    },
+}, merge=salt['pillar.get']('cinder:controller')) %}
+
+{% set volume = salt['grains.filter_by']({
+    'Debian': {
+        'pkgs': ['cinder-volume', 'lvm2', 'sysfsutils', 'sg3-utils', 'python-cinder','python-mysqldb','p7zip'],
+        'services': ['cinder-volume'],
+        'wipe_method': 'none',
+        'notification': False,
+    },
+    'RedHat': {
+        'pkgs': ['openstack-cinder', 'python-cinder', 'lvm2', 'sysfsutils', 'sg3_utils', 'device-mapper-multipath', 'device-mapper-multipath-libs'],
+        'services': ['openstack-cinder-volume'],
+        'wipe_method': 'none',
+        'notification': False,
+    },
+}, merge=salt['pillar.get']('cinder:volume')) %}
\ No newline at end of file
diff --git a/cinder/meta/sensu.yml b/cinder/meta/sensu.yml
new file mode 100644
index 0000000..a418e13
--- /dev/null
+++ b/cinder/meta/sensu.yml
@@ -0,0 +1,31 @@
+check:
+  local_cinder_api_proc:
+    command: "PATH=$PATH:/usr/lib64/nagios/plugins:/usr/lib/nagios/plugins check_procs -C cinder-api -u cinder -c 1:30"
+    interval: 60
+    occurrences: 1
+    subscribers:
+    - local-cinder-controller
+  local_cinder_scheduler_proc:
+    command: "PATH=$PATH:/usr/lib64/nagios/plugins:/usr/lib/nagios/plugins check_procs -C cinder-schedule -u cinder -c 1:5"
+    interval: 60
+    occurrences: 1
+    subscribers:
+    - local-cinder-controller
+  local_cinder_volume_proc:
+    command: "PATH=$PATH:/usr/lib64/nagios/plugins:/usr/lib/nagios/plugins check_procs -C cinder-volume -u cinder -c 1:5"
+    interval: 60
+    occurrences: 1
+    subscribers:
+    - local-cinder-volume
+  remote_openstack_cinder_api:
+    command: "PATH=$PATH:/usr/local/bin oschecks-check_cinder_api --os_auth_url='http://:::openstack.host:::::::openstack.port:::/v2.0' --os_username=:::openstack.user::: --os_password=':::openstack.password:::' --os_tenant_name=:::openstack.tenant"
+    interval: 60
+    occurrences: 1
+    subscribers:
+    - remote-network
+  remote_openstack_cinder_volume:
+    command: "PATH=$PATH:/usr/local/bin oschecks-check_cinder_volume --auth_url='http://:::openstack.host:::::::openstack.port:::/v2.0' --username :::openstack.user::: --password :::openstack.password::: --tenant :::openstack.tenant:::"
+    interval: 300
+    occurrences: 1
+    subscribers:
+    - remote-network
\ No newline at end of file
diff --git a/cinder/meta/sphinx.yml b/cinder/meta/sphinx.yml
new file mode 100644
index 0000000..eebde48
--- /dev/null
+++ b/cinder/meta/sphinx.yml
@@ -0,0 +1,89 @@
+doc:
+  name: Cinder
+  description: Cinder provides an infrastructure for managing volumes in OpenStack. It was originally a Nova component called nova-volume, but has become an independent project since the Folsom release.
+  role:
+  {%- if pillar.cinder.controller is defined %}
+  {%- from "cinder/map.jinja" import controller with context %}
+    controller:
+      name: controller
+      endpoint:
+        glance_api:
+          name: cinder-api
+          type: cinder-api
+          address: http://{{ controller.osapi.host }}:8776
+          protocol: http
+      param:
+        bind:
+          value: {{ controller.osapi.host }}:8776
+        version:
+          name: "Version"
+          value: {{ controller.version }}
+        glance_host:
+          name: "Glance service"
+          value: {{ controller.glance.host }}:{{ controller.glance.port }}
+        database_host:
+          name: "Database"
+          value: {{ controller.database.user }}@{{ controller.database.host }}:{{ controller.database.port }}//{{ controller.database.name }}
+        message_queue_ip:
+          name: "Message queue"
+          value: {{ controller.message_queue.user }}@{{ controller.message_queue.host }}:{{ controller.message_queue.port }}{{ controller.message_queue.virtual_host }}
+        identity_host:
+          name: "Identity service"
+          value: {{ controller.identity.user }}@{{ controller.identity.host }}:{{ controller.identity.port }}
+        packages:
+          value: |
+            {%- for pkg in controller.pkgs %}
+            {%- set pkg_version = "dpkg -l "+pkg+" | grep "+pkg+" | awk '{print $3}'" %}
+            * {{ pkg }}: {{ salt['cmd.run'](pkg_version) }}
+            {%- endfor %}
+        {%- if controller.backend is defined %}
+        backends:
+          value: |
+            {%- for backend_name, backend in controller.get('backend', {}).iteritems() %}
+            * {{ backend_name }}:
+              * storage engine: {{ backend.engine }}
+              * volume type: {{ backend.type_name}}
+            {%- endfor %}
+        {%- endif %}
+  {%- endif %}
+  {%- if pillar.cinder.volume is defined %}
+  {%- from "cinder/map.jinja" import volume with context %}
+    volume:
+      name: volume
+      param:
+        {%- if pillar.cinder.volume.version is defined %}
+        version:
+          name: "Version"
+          value: {{ volume.version }}
+        {%- endif %}
+        {%- if pillar.cinder.volume.database is defined %}
+        database_host:
+          name: "Database"
+          value: {{ volume.database.user }}@{{ volume.database.host }}:{{ volume.database.port }}//{{ volume.database.name }}
+        {%- endif %}
+        {%- if pillar.cinder.volume.message_queue is defined %}
+        message_queue_ip:
+          name: "Message queue"
+          value: {{ volume.message_queue.user }}@{{ volume.message_queue.host }}:{{ volume.message_queue.port }}{{ volume.message_queue.virtual_host }}
+        {%- endif %}
+        {%- if pillar.cinder.volume.identity is defined %}
+        identity_host:
+          name: "Identity service"
+          value: {{ volume.identity.user }}@{{ volume.identity.host }}:{{ volume.identity.port }}
+        {%- endif %}
+        packages:
+          value: |
+            {%- for pkg in volume.pkgs %}
+            {%- set pkg_version = "dpkg -l "+pkg+" | grep "+pkg+" | awk '{print $3}'" %}
+            * {{ pkg }}: {{ salt['cmd.run'](pkg_version) }}
+            {%- endfor %}
+        {%- if volume.backend is defined %}
+        backends:
+          value: |
+            {%- for backend_name, backend in volume.get('backend', {}).iteritems() %}
+            * {{ backend_name }}:
+              * storage engine: {{ backend.engine }}
+              * volume type: {{ backend.type_name}}
+            {%- endfor %}
+        {%- endif %}
+  {%- endif %}
diff --git a/cinder/user.sls b/cinder/user.sls
new file mode 100644
index 0000000..aabf39d
--- /dev/null
+++ b/cinder/user.sls
@@ -0,0 +1,25 @@
+{%- if not salt['user.info']('cinder') %}
+cinder_user:
+  user.present:
+    - name: cinder
+    - home: /var/lib/cinder
+    - uid: 304
+    - gid: 304
+    - shell: /bin/false
+    - system: True
+    - require_in:
+      {%- if pillar.cinder.controller is defined and pillar.cinder.controller.enabled %}
+      - pkg: cinder_controller_packages
+      {%- endif %}
+      {%- if pillar.cinder.volume is defined and pillar.cinder.volume.enabled %}
+      - pkg: cinder_volume_packages
+      {%- endif %}
+
+cinder_group:
+  group.present:
+    - name: cinder
+    - gid: 304
+    - system: True
+    - require_in:
+      - user: cinder_user
+{%- endif %}
diff --git a/cinder/volume.sls b/cinder/volume.sls
new file mode 100644
index 0000000..b4ab4ba
--- /dev/null
+++ b/cinder/volume.sls
@@ -0,0 +1,216 @@
+{%- from "cinder/map.jinja" import volume with context %}
+{%- if volume.enabled %}
+
+include:
+- cinder.user
+
+cinder_volume_packages:
+  pkg.installed:
+  - names: {{ volume.pkgs }}
+
+/var/lock/cinder:
+  file.directory:
+  - mode: 755
+  - user: cinder
+  - group: cinder
+  - require:
+    - pkg: cinder_volume_packages
+  - require_in:
+    - service: cinder_volume_services
+
+{%- if pillar.cinder.controller is not defined or not pillar.cinder.controller.enabled %}
+
+/etc/cinder/cinder.conf:
+  file.managed:
+  - source: salt://cinder/files/{{ volume.version }}/cinder.conf.volume.{{ grains.os_family }}
+  - template: jinja
+  - require:
+    - pkg: cinder_volume_packages
+
+/etc/cinder/api-paste.ini:
+  file.managed:
+  - source: salt://cinder/files/{{ volume.version }}/api-paste.ini.volume.{{ grains.os_family }}
+  - template: jinja
+  - require:
+    - pkg: cinder_volume_packages
+
+{%- endif %}
+
+cinder_volume_services:
+  service.running:
+  - names: {{ volume.services }}
+  - enable: true
+  - watch:
+    - file: /etc/cinder/cinder.conf
+    - file: /etc/cinder/api-paste.ini
+
+{# new way #}
+
+{%- if volume.backend is defined %}
+
+{%- for backend_name, backend in volume.get('backend', {}).iteritems() %}
+
+{%- if backend.engine == 'iscsi' %}
+
+cinder_iscsi_packages_{{ loop.index }}:
+  pkg.installed:
+  - names:
+    - iscsitarget
+    - open-iscsi
+    - iscsitarget-dkms
+  - require:
+    - pkg: cinder_volume_packages
+
+/etc/default/iscsitarget:
+  file.managed:
+  - source: salt://cinder/files/iscsitarget
+  - template: jinja
+  - require:
+    - pkg: cinder_iscsi_packages
+
+cinder_scsi_service:
+  service.running:
+  - names:
+    - iscsitarget
+    - open-iscsi
+  - enable: true
+  - watch:
+    - file: /etc/default/iscsitarget
+
+{%- endif %}
+
+{%- if backend.engine == 'hitachi_vsp' %}
+
+{%- if grains.os_family == 'Debian' and volume.version == 'juno' %}
+
+hitachi_pkgs:
+  pkg.latest:
+    - names:
+      - horcm
+      - hbsd
+
+cinder_hitachi_vps_dir:
+  file.directory:
+  - name: /var/lock/hbsd
+  - user: cinder
+  - group: cinder
+
+{%- endif %}
+
+{%- endif %}
+
+{%- if backend.engine == 'hp3par' %}
+
+hp3parclient:
+  pkg.latest:
+    - name: python-hp3parclient
+
+{%- endif %}
+
+{%- if backend.engine == 'fujitsu' %}
+
+cinder_driver_fujitsu_{{ loop.index }}:
+  pkg.latest:
+    - name: cinder-driver-fujitsu
+
+/etc/cinder/cinder_fujitsu_eternus_dx_{{ backend_name }}.xml:
+  file.managed:
+  - source: salt://cinder/files/{{ volume.version }}/cinder_fujitsu_eternus_dx.xml
+  - template: jinja
+  - defaults:
+      volume_type_name: "{{ backend.pool }}"
+  - require:
+    - pkg: cinder-driver-fujitsu
+
+{%- endif %}
+
+{%- endfor %}
+
+{%- endif %}
+
+{# old way #}
+
+{%- if volume.storage is defined %}
+
+{%- if volume.storage.engine == 'iscsi' %}
+
+cinder_iscsi_packages:
+  pkg.installed:
+  - names:
+    - iscsitarget
+    - open-iscsi
+    - iscsitarget-dkms
+  - require:
+    - pkg: cinder_volume_packages
+
+/etc/default/iscsitarget:
+  file.managed:
+  - source: salt://cinder/files/iscsitarget
+  - template: jinja
+  - require:
+    - pkg: cinder_iscsi_packages
+
+cinder_scsi_service:
+  service.running:
+  - names:
+    - iscsitarget
+    - open-iscsi
+  - enable: true
+  - watch:
+    - file: /etc/default/iscsitarget
+
+{%- endif %}
+
+{%- if volume.storage.engine == 'hitachi_vsp' %}
+
+{%- if grains.os_family == 'Debian' and volume.version == 'juno' %}
+
+hitachi_pkgs:
+  pkg.latest:
+    - names:
+      - horcm
+      - hbsd
+
+cinder_hitachi_vps_dir:
+  file.directory:
+  - name: /var/lock/hbsd
+  - user: cinder
+  - group: cinder
+
+{%- endif %}
+
+{%- endif %}
+
+{%- if volume.storage.engine == 'hp3par' %}
+
+hp3parclient:
+  pkg.latest:
+    - name: python-hp3parclient
+
+{%- endif %}
+
+{%- if volume.storage.engine == 'fujitsu' %}
+
+cinder_driver_fujitsu:
+  pkg.latest:
+    - name: cinder-driver-fujitsu
+
+{%- for type in volume.get('types', []) %}
+
+/etc/cinder/cinder_fujitsu_eternus_dx_{{ type.name }}.xml:
+  file.managed:
+  - source: salt://cinder/files/{{ volume.version }}/cinder_fujitsu_eternus_dx.xml
+  - template: jinja
+  - defaults:
+      volume_type_name: "{{ type.pool }}"
+  - require:
+    - pkg: cinder-driver-fujitsu
+
+{%- endfor %}
+
+{%- endif %}
+
+{%- endif %}
+
+
+{%- endif %}
diff --git a/doc/source/conf.py b/doc/source/conf.py
new file mode 100644
index 0000000..508f3b6
--- /dev/null
+++ b/doc/source/conf.py
@@ -0,0 +1,73 @@
+# -*- coding: utf-8 -*-
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import sys
+
+sys.path.insert(0, os.path.abspath('../..'))
+# -- General configuration ----------------------------------------------------
+
+# Add any Sphinx extension module names here, as strings. They can be
+# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
+extensions = [
+    'sphinx.ext.autodoc',
+]
+
+# autodoc generation is a bit aggressive and a nuisance when doing heavy
+# text edit cycles.
+# execute "export SPHINX_DEBUG=1" in your terminal to disable
+
+# The suffix of source filenames.
+source_suffix = '.rst'
+
+# The master toctree document.
+master_doc = 'index'
+
+# General information about the project.
+project = u'salt-formula-cinder'
+copyright = u'2015, OpenStack Foundation'
+
+# If true, '()' will be appended to :func: etc. cross-reference text.
+add_function_parentheses = True
+
+# If true, the current module name will be prepended to all description
+# unit titles (such as .. function::).
+add_module_names = True
+
+# The name of the Pygments (syntax highlighting) style to use.
+pygments_style = 'sphinx'
+
+# -- Options for HTML output --------------------------------------------------
+
+# The theme to use for HTML and HTML Help pages.  Major themes that come with
+# Sphinx are currently 'default' and 'sphinxdoc'.
+# html_theme_path = ["."]
+# html_theme = '_theme'
+# html_static_path = ['static']
+
+# Output file base name for HTML help builder.
+htmlhelp_basename = '%sdoc' % project
+
+# Grouping the document tree into LaTeX files. List of tuples
+# (source start file, target name, title, author, documentclass
+# [howto/manual]).
+latex_documents = [
+    ('index',
+     '%s.tex' % project,
+     u'%s Documentation' % project,
+     u'OpenStack Foundation', 'manual'),
+]
+
+# Example configuration for intersphinx: refer to the Python standard library.
+# intersphinx_mapping = {'http://docs.python.org/': None}
diff --git a/doc/source/index.rst b/doc/source/index.rst
new file mode 100644
index 0000000..a6210d3
--- /dev/null
+++ b/doc/source/index.rst
@@ -0,0 +1 @@
+.. include:: ../../README.rst
diff --git a/metadata.yml b/metadata.yml
new file mode 100644
index 0000000..af2198a
--- /dev/null
+++ b/metadata.yml
@@ -0,0 +1,3 @@
+name: "cinder"
+version: "2016.4.1"
+source: "https://github.com/openstack/salt-formula-cinder"
diff --git a/metadata/service/control/cluster.yml b/metadata/service/control/cluster.yml
new file mode 100644
index 0000000..91a814a
--- /dev/null
+++ b/metadata/service/control/cluster.yml
@@ -0,0 +1,46 @@
+applications:
+- cinder
+classes:
+- service.cinder.support
+parameters:
+  cinder:
+    controller:
+      enabled: true
+      version: ${_param:cinder_version}
+      osapi:
+        host: ${_param:cluster_local_address}
+      database:
+        engine: mysql
+        host: ${_param:cluster_vip_address}
+        port: 3306
+        name: cinder
+        user: cinder
+        password: ${_param:mysql_cinder_password}
+      identity:
+        engine: keystone
+        region: RegionOne
+        host: ${_param:cluster_vip_address}
+        port: 35357
+        tenant: service
+        user: cinder
+        password: ${_param:keystone_cinder_password}
+      glance:
+        host: ${_param:cluster_vip_address}
+        port: 9292
+      message_queue:
+        engine: rabbitmq
+        host: ${_param:cluster_vip_address}
+        port: 5672
+        user: openstack
+        password: ${_param:rabbitmq_openstack_password}
+        virtual_host: '/openstack'
+        ha_queues: true
+      cache:
+        engine: memcached
+        members:
+        - host: ${_param:cluster_node01_address}
+          port: 11211
+        - host: ${_param:cluster_node02_address}
+          port: 11211
+        - host: ${_param:cluster_node03_address}
+          port: 11211
\ No newline at end of file
diff --git a/metadata/service/control/cluster_control.yml b/metadata/service/control/cluster_control.yml
new file mode 100644
index 0000000..91a814a
--- /dev/null
+++ b/metadata/service/control/cluster_control.yml
@@ -0,0 +1,46 @@
+applications:
+- cinder
+classes:
+- service.cinder.support
+parameters:
+  cinder:
+    controller:
+      enabled: true
+      version: ${_param:cinder_version}
+      osapi:
+        host: ${_param:cluster_local_address}
+      database:
+        engine: mysql
+        host: ${_param:cluster_vip_address}
+        port: 3306
+        name: cinder
+        user: cinder
+        password: ${_param:mysql_cinder_password}
+      identity:
+        engine: keystone
+        region: RegionOne
+        host: ${_param:cluster_vip_address}
+        port: 35357
+        tenant: service
+        user: cinder
+        password: ${_param:keystone_cinder_password}
+      glance:
+        host: ${_param:cluster_vip_address}
+        port: 9292
+      message_queue:
+        engine: rabbitmq
+        host: ${_param:cluster_vip_address}
+        port: 5672
+        user: openstack
+        password: ${_param:rabbitmq_openstack_password}
+        virtual_host: '/openstack'
+        ha_queues: true
+      cache:
+        engine: memcached
+        members:
+        - host: ${_param:cluster_node01_address}
+          port: 11211
+        - host: ${_param:cluster_node02_address}
+          port: 11211
+        - host: ${_param:cluster_node03_address}
+          port: 11211
\ No newline at end of file
diff --git a/metadata/service/control/single.yml b/metadata/service/control/single.yml
new file mode 100644
index 0000000..f52b8a6
--- /dev/null
+++ b/metadata/service/control/single.yml
@@ -0,0 +1,37 @@
+applications:
+- cinder
+classes:
+- service.cinder.support
+parameters:
+  cinder:
+    controller:
+      enabled: true
+      version: ${_param:cinder_version}
+      osapi:
+        host: ${_param:single_address}
+      database:
+        engine: mysql
+        host:  localhost
+        port: 3306
+        name: cinder
+        user: cinder
+        password: ${_param:mysql_cinder_password}
+      identity:
+        engine: keystone
+        region: RegionOne
+        host: ${_param:single_address}
+        port: 35357
+        tenant: service
+        user: cinder
+        password: ${_param:keystone_cinder_password}
+      glance:
+        host: ${_param:single_address}
+        port: 9292
+      message_queue:
+        engine: rabbitmq
+        host: ${_param:single_address}
+        port: 5672
+        user: openstack
+        password: ${_param:rabbitmq_openstack_password}
+        virtual_host: '/openstack'
+        ha_queues: false
diff --git a/metadata/service/support.yml b/metadata/service/support.yml
new file mode 100644
index 0000000..bb1eadc
--- /dev/null
+++ b/metadata/service/support.yml
@@ -0,0 +1,11 @@
+parameters:
+  cinder:
+    _support:
+      collectd:
+        enabled: false
+      heka:
+        enabled: true
+      sensu:
+        enabled: true
+      sphinx:
+        enabled: true
diff --git a/metadata/service/volume/single.yml b/metadata/service/volume/single.yml
new file mode 100644
index 0000000..1705867
--- /dev/null
+++ b/metadata/service/volume/single.yml
@@ -0,0 +1,46 @@
+applications:
+- cinder
+classes:
+- service.cinder.support
+parameters:
+  cinder:
+    volume:
+      enabled: true
+      version: ${_param:cinder_version}
+      osapi:
+        host: ${_param:cluster_local_address}
+      database:
+        engine: mysql
+        host: ${_param:cluster_vip_address}
+        port: 3306
+        name: cinder
+        user: cinder
+        password: ${_param:mysql_cinder_password}
+      identity:
+        engine: keystone
+        region: RegionOne
+        host: ${_param:cluster_vip_address}
+        port: 35357
+        tenant: service
+        user: cinder
+        password: ${_param:keystone_cinder_password}
+      glance:
+        host: ${_param:cluster_vip_address}
+        port: 9292
+      message_queue:
+        engine: rabbitmq
+        host: ${_param:cluster_vip_address}
+        port: 5672
+        user: openstack
+        password: ${_param:rabbitmq_openstack_password}
+        virtual_host: '/openstack'
+        ha_queues: true
+      cache:
+        engine: memcached
+        members:
+        - host: ${_param:cluster_node01_address}
+          port: 11211
+        - host: ${_param:cluster_node02_address}
+          port: 11211
+        - host: ${_param:cluster_node03_address}
+          port: 11211
\ No newline at end of file
diff --git a/other-requirements.txt b/other-requirements.txt
new file mode 100644
index 0000000..ba84cc5
--- /dev/null
+++ b/other-requirements.txt
@@ -0,0 +1 @@
+python-yaml
diff --git a/tests/pillar/ceph_single.sls b/tests/pillar/ceph_single.sls
new file mode 100644
index 0000000..de5f2cc
--- /dev/null
+++ b/tests/pillar/ceph_single.sls
@@ -0,0 +1,25 @@
+cinder:
+  controller:
+    enabled: true
+    version: liberty
+    backend:
+      ceph_backend:
+        type_name: standard-iops
+        backend: ceph_backend
+        pool: volumes
+        engine: ceph
+        user: cinder
+        secret_uuid: password
+        client_cinder_key: password
+  volume:
+    enabled: true
+    version: liberty
+    backend:
+      ceph_backend:
+        type_name: standard-iops
+        backend: ceph_backend
+        pool: volumes
+        engine: ceph
+        user: cinder
+        secret_uuid: password
+        client_cinder_key: password
\ No newline at end of file
diff --git a/tests/pillar/control_cluster.sls b/tests/pillar/control_cluster.sls
new file mode 100644
index 0000000..b8c1dfb
--- /dev/null
+++ b/tests/pillar/control_cluster.sls
@@ -0,0 +1,41 @@
+cinder:
+  controller:
+    enabled: true
+    version: liberty
+    osapi:
+      host: 127.0.0.1
+    database:
+      engine: mysql
+      host: 127.0.0.1
+      port: 3306
+      name: cinder
+      user: cinder
+      password: password
+    identity:
+      engine: keystone
+      region: RegionOne
+      host: 127.0.0.1
+      port: 35357
+      tenant: service
+      user: cinder
+      password: password
+    glance:
+      host: 127.0.0.1
+      port: 9292
+    message_queue:
+      engine: rabbitmq
+      host: 127.0.0.1
+      port: 5672
+      user: openstack
+      password: password
+      virtual_host: '/openstack'
+      ha_queues: true
+    cache:
+      engine: memcached
+      members:
+      - host: 127.0.0.1
+        port: 11211
+      - host: 127.0.0.1
+        port: 11211
+      - host: 127.0.0.1
+        port: 11211
\ No newline at end of file
diff --git a/tests/pillar/control_single.sls b/tests/pillar/control_single.sls
new file mode 100644
index 0000000..d9d6a73
--- /dev/null
+++ b/tests/pillar/control_single.sls
@@ -0,0 +1,32 @@
+cinder:
+  controller:
+    enabled: true
+    version: liberty
+    osapi:
+      host: 127.0.0.1
+    database:
+      engine: mysql
+      host:  localhost
+      port: 3306
+      name: cinder
+      user: cinder
+      password: password
+    identity:
+      engine: keystone
+      host: 127.0.0.1
+      region: RegionOne
+      port: 35357
+      tenant: service
+      user: cinder
+      password: password
+    glance:
+      host: 127.0.0.1
+      port: 9292
+    message_queue:
+      engine: rabbitmq
+      host: 127.0.0.1
+      port: 5672
+      user: openstack
+      password: password
+      virtual_host: '/openstack'
+      ha_queues: false
diff --git a/tests/pillar/eternus_single.sls b/tests/pillar/eternus_single.sls
new file mode 100644
index 0000000..52b722c
--- /dev/null
+++ b/tests/pillar/eternus_single.sls
@@ -0,0 +1,49 @@
+cinder:
+  volume:
+    enabled: true
+    version: liberty
+    backend:
+      10kThinPro:
+        type_name: 10kThinPro
+        engine: fujitsu
+        pool: 10kThinPro
+        host: 127.0.0.1
+        port: 5988
+        user: username
+        password: password
+        connection: FC
+        name: 10kThinPro
+      10k_SAS:
+        type_name: 10k_SAS
+        pool: SAS10K
+        engine: fujitsu
+        host: 127.0.0.1
+        port: 5988
+        user: username
+        password: password
+        connection: FC
+        name: 7k2RAID6
+  controller:
+    enabled: true
+    version: liberty
+    backend:
+      10kThinPro:
+        type_name: 10kThinPro
+        engine: fujitsu
+        pool: 10kThinPro
+        host: 127.0.0.1
+        port: 5988
+        user: username
+        password: password
+        connection: FC
+        name: 10kThinPro
+      10k_SAS:
+        type_name: 10k_SAS
+        pool: SAS10K
+        engine: fujitsu
+        host: 127.0.0.1
+        port: 5988
+        user: username
+        password: password
+        connection: FC
+        name: 7k2RAID6
\ No newline at end of file
diff --git a/tests/pillar/gpfs_single.sls b/tests/pillar/gpfs_single.sls
new file mode 100644
index 0000000..4bb04d7
--- /dev/null
+++ b/tests/pillar/gpfs_single.sls
@@ -0,0 +1,25 @@
+cinder:
+  volume:
+    enabled: true
+    version: liberty
+    backend:
+      GPFS-GOLD:
+        type_name: GPFS-GOLD
+        engine: gpfs
+        mount_point: '/mnt/gpfs-openstack/cinder/gold'
+      GPFS-SILVER:
+        type_name: GPFS-SILVER
+        engine: gpfs
+        mount_point: '/mnt/gpfs-openstack/cinder/silver'
+  controller:
+    enabled: true
+    version: liberty
+    backend:
+      GPFS-GOLD:
+        type_name: GPFS-GOLD
+        engine: gpfs
+        mount_point: '/mnt/gpfs-openstack/cinder/gold'
+      GPFS-SILVER:
+        type_name: GPFS-SILVER
+        engine: gpfs
+        mount_point: '/mnt/gpfs-openstack/cinder/silver'
\ No newline at end of file
diff --git a/tests/pillar/hp3par_single.sls b/tests/pillar/hp3par_single.sls
new file mode 100644
index 0000000..cb93ba1
--- /dev/null
+++ b/tests/pillar/hp3par_single.sls
@@ -0,0 +1,34 @@
+cinder:
+  controller:
+    enabled: true
+    version: liberty
+    backend:
+      hp3par_backend:
+        type_name: hp3par
+        backend: hp3par_backend
+        user: admin
+        password: password  
+        url: http://localhost/api/v1
+        cpg: OpenStackCPG
+        host: localhost
+        login: admin
+        sanpassword: password
+        debug: True
+        snapcpg: OpenStackSNAPCPG
+  volume:
+    enabled: true
+    version: liberty
+    backend:
+      hp3par_backend:
+        type_name: hp3par
+        backend: hp3par_backend
+        user: admin
+        password: password  
+        url: http://localhost/api/v1
+        cpg: OpenStackCPG
+        host: localhost
+        login: admin
+        sanpassword: password
+        debug: True
+        snapcpg: OpenStackSNAPCPG
+        engine: hp3par
\ No newline at end of file
diff --git a/tests/pillar/lefthand_single.sls b/tests/pillar/lefthand_single.sls
new file mode 100644
index 0000000..9a48eec
--- /dev/null
+++ b/tests/pillar/lefthand_single.sls
@@ -0,0 +1,25 @@
+cinder:
+  volume:
+    enabled: true
+    version: liberty
+    backend:
+      HP-LeftHand:
+        type_name: normal-storage
+        engine: hp_lefthand
+        api_url: 'https://127.0.0.1:8081/lhos'
+        username: username
+        password: password
+        clustername: cluster1
+        iscsi_chap_enabled: false
+  controller:
+    enabled: true
+    version: liberty
+    backend:
+      HP-LeftHand:
+        type_name: normal-storage
+        engine: hp_lefthand
+        api_url: 'https://127.0.0.1:8081/lhos'
+        username: username
+        password: password
+        clustername: cluster1
+        iscsi_chap_enabled: false
\ No newline at end of file
diff --git a/tests/pillar/solidfire_single.sls b/tests/pillar/solidfire_single.sls
new file mode 100644
index 0000000..a75d6f4
--- /dev/null
+++ b/tests/pillar/solidfire_single.sls
@@ -0,0 +1,25 @@
+cinder:
+  volume:
+    enabled: true
+    version: liberty
+    backend:
+      solidfire:
+        type_name: normal-storage
+        engine: solidfire
+        san_ip: 127.0.0.1
+        san_login: username
+        san_password: password
+        clustername: cluster1
+        sf_emulate_512: false
+  controller:
+    enabled: true
+    version: liberty
+    backend:
+      solidfire:
+        type_name: normal-storage
+        engine: solidfire
+        san_ip: 127.0.0.1
+        san_login: username
+        san_password: password
+        clustername: cluster1
+        sf_emulate_512: false
\ No newline at end of file
diff --git a/tests/pillar/storwize_single.sls b/tests/pillar/storwize_single.sls
new file mode 100644
index 0000000..96c6b6f
--- /dev/null
+++ b/tests/pillar/storwize_single.sls
@@ -0,0 +1,75 @@
+cinder:
+  volume:
+    enabled: true
+    version: liberty
+    backend:
+      7k2_SAS:
+        engine: storwize
+        type_name: 7k2_SAS
+        host: 127.0.0.1
+        port: 22
+        user: username
+        password: password
+        connection: FC
+        multihost: true
+        multipath: true
+        pool: SAS7K2
+      10k_SAS:
+        engine: storwize
+        type_name: 10k_SAS
+        host: 127.0.0.1
+        port: 22
+        user: username
+        password: password
+        connection: FC
+        multihost: true
+        multipath: true
+        pool: SAS10K
+      15k_SAS:
+        engine: storwize
+        type_name: 15k_SAS
+        host: 127.0.0.1
+        port: 22
+        user: username
+        password: password
+        connection: FC
+        multihost: true
+        multipath: true
+        pool: SAS15K
+  controller:
+    enabled: true
+    version: liberty
+    backend:
+      7k2_SAS:
+        engine: storwize
+        type_name: 7k2_SAS
+        host: 127.0.0.1
+        port: 22
+        user: username
+        password: password
+        connection: FC
+        multihost: true
+        multipath: true
+        pool: SAS7K2
+      10k_SAS:
+        engine: storwize
+        type_name: 10k_SAS
+        host: 127.0.0.1
+        port: 22
+        user: username
+        password: password
+        connection: FC
+        multihost: true
+        multipath: true
+        pool: SAS10K
+      15k_SAS:
+        engine: storwize
+        type_name: 15k_SAS
+        host: 127.0.0.1
+        port: 22
+        user: username
+        password: password
+        connection: FC
+        multihost: true
+        multipath: true
+        pool: SAS15K
\ No newline at end of file
diff --git a/tests/pillar/volume_single.sls b/tests/pillar/volume_single.sls
new file mode 100644
index 0000000..2c69443
--- /dev/null
+++ b/tests/pillar/volume_single.sls
@@ -0,0 +1,31 @@
+cinder:
+  volume:
+    enabled: true
+    version: liberty
+    osapi:
+      host: 127.0.0.1
+    database:
+      engine: mysql
+      host: 127.0.0.1
+      port: 3306
+      name: cinder
+      user: cinder
+      password: password
+    identity:
+      engine: keystone
+      host: 127.0.0.1
+      port: 35357
+      tenant: service
+      user: cinder
+      password: password
+    glance:
+      host: 127.0.0.1
+      port: 9292
+    message_queue:
+      engine: rabbitmq
+      host: 127.0.0.1
+      port: 5672
+      user: openstack
+      password: password
+      virtual_host: '/openstack'
+      ha_queues: true
\ No newline at end of file
diff --git a/tests/pillar/vsp_single.sls b/tests/pillar/vsp_single.sls
new file mode 100644
index 0000000..13450b4
--- /dev/null
+++ b/tests/pillar/vsp_single.sls
@@ -0,0 +1,19 @@
+cinder:
+  controller:
+    enabled: true
+    version: liberty
+    backend:
+      hus100_backend:
+        type_name: HUS100
+        backend: hus100_backend
+        engine: hitachi_vsp
+        connection: FC
+  volume:
+    enabled: true
+    version: liberty
+    backend:
+      hus100_backend:
+        type_name: HUS100
+        backend: hus100_backend
+        engine: hitachi_vsp
+        connection: FC
\ No newline at end of file
diff --git a/tests/run_tests.sh b/tests/run_tests.sh
new file mode 100755
index 0000000..3f42101
--- /dev/null
+++ b/tests/run_tests.sh
@@ -0,0 +1,162 @@
+#!/usr/bin/env bash
+
+set -e
+[ -n "$DEBUG" ] && set -x
+
+CURDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
+METADATA=${CURDIR}/../metadata.yml
+FORMULA_NAME=$(cat $METADATA | python -c "import sys,yaml; print yaml.load(sys.stdin)['name']")
+
+## Overrideable parameters
+PILLARDIR=${PILLARDIR:-${CURDIR}/pillar}
+BUILDDIR=${BUILDDIR:-${CURDIR}/build}
+VENV_DIR=${VENV_DIR:-${BUILDDIR}/virtualenv}
+DEPSDIR=${BUILDDIR}/deps
+
+SALT_FILE_DIR=${SALT_FILE_DIR:-${BUILDDIR}/file_root}
+SALT_PILLAR_DIR=${SALT_PILLAR_DIR:-${BUILDDIR}/pillar_root}
+SALT_CONFIG_DIR=${SALT_CONFIG_DIR:-${BUILDDIR}/salt}
+SALT_CACHE_DIR=${SALT_CACHE_DIR:-${SALT_CONFIG_DIR}/cache}
+
+SALT_OPTS="${SALT_OPTS} --retcode-passthrough --local -c ${SALT_CONFIG_DIR}"
+
+if [ "x${SALT_VERSION}" != "x" ]; then
+    PIP_SALT_VERSION="==${SALT_VERSION}"
+fi
+
+## Functions
+log_info() {
+    echo "[INFO] $*"
+}
+
+log_err() {
+    echo "[ERROR] $*" >&2
+}
+
+setup_virtualenv() {
+    log_info "Setting up Python virtualenv"
+    virtualenv $VENV_DIR
+    source ${VENV_DIR}/bin/activate
+    pip install salt${PIP_SALT_VERSION}
+}
+
+setup_pillar() {
+    [ ! -d ${SALT_PILLAR_DIR} ] && mkdir -p ${SALT_PILLAR_DIR}
+    echo "base:" > ${SALT_PILLAR_DIR}/top.sls
+    for pillar in ${PILLARDIR}/*; do
+        state_name=$(basename ${pillar%.sls})
+        echo -e "  ${state_name}:\n    - ${state_name}" >> ${SALT_PILLAR_DIR}/top.sls
+    done
+}
+
+setup_salt() {
+    [ ! -d ${SALT_FILE_DIR} ] && mkdir -p ${SALT_FILE_DIR}
+    [ ! -d ${SALT_CONFIG_DIR} ] && mkdir -p ${SALT_CONFIG_DIR}
+    [ ! -d ${SALT_CACHE_DIR} ] && mkdir -p ${SALT_CACHE_DIR}
+
+    echo "base:" > ${SALT_FILE_DIR}/top.sls
+    for pillar in ${PILLARDIR}/*.sls; do
+        state_name=$(basename ${pillar%.sls})
+        echo -e "  ${state_name}:\n    - ${FORMULA_NAME}" >> ${SALT_FILE_DIR}/top.sls
+    done
+
+    cat << EOF > ${SALT_CONFIG_DIR}/minion
+file_client: local
+cachedir: ${SALT_CACHE_DIR}
+verify_env: False
+
+file_roots:
+  base:
+  - ${SALT_FILE_DIR}
+  - ${CURDIR}/..
+  - /usr/share/salt-formulas/env
+
+pillar_roots:
+  base:
+  - ${SALT_PILLAR_DIR}
+  - ${PILLARDIR}
+EOF
+}
+
+fetch_dependency() {
+    dep_name="$(echo $1|cut -d : -f 1)"
+    dep_source="$(echo $1|cut -d : -f 2-)"
+    dep_root="${DEPSDIR}/$(basename $dep_source .git)"
+    dep_metadata="${dep_root}/metadata.yml"
+
+    [ -d /usr/share/salt-formulas/env/${dep_name} ] && log_info "Dependency $dep_name already present in system-wide salt env" && return 0
+    [ -d $dep_root ] && log_info "Dependency $dep_name already fetched" && return 0
+
+    log_info "Fetching dependency $dep_name"
+    [ ! -d ${DEPSDIR} ] && mkdir -p ${DEPSDIR}
+    git clone $dep_source ${DEPSDIR}/$(basename $dep_source .git)
+    ln -s ${dep_root}/${dep_name} ${SALT_FILE_DIR}/${dep_name}
+
+    METADATA="${dep_metadata}" install_dependencies
+}
+
+install_dependencies() {
+    grep -E "^dependencies:" ${METADATA} >/dev/null || return 0
+    (python - | while read dep; do fetch_dependency "$dep"; done) << EOF
+import sys,yaml
+for dep in yaml.load(open('${METADATA}', 'ro'))['dependencies']:
+    print '%s:%s' % (dep["name"], dep["source"])
+EOF
+}
+
+clean() {
+    log_info "Cleaning up ${BUILDDIR}"
+    [ -d ${BUILDDIR} ] && rm -rf ${BUILDDIR} || exit 0
+}
+
+salt_run() {
+    [ -e ${VEN_DIR}/bin/activate ] && source ${VENV_DIR}/bin/activate
+    salt-call ${SALT_OPTS} $*
+}
+
+prepare() {
+    [ -d ${BUILDDIR} ] && mkdir -p ${BUILDDIR}
+
+    which salt-call || setup_virtualenv
+    setup_pillar
+    setup_salt
+    install_dependencies
+}
+
+run() {
+    for pillar in ${PILLARDIR}/*.sls; do
+        state_name=$(basename ${pillar%.sls})
+        salt_run --id=${state_name} state.show_sls ${FORMULA_NAME} || (log_err "Execution of ${FORMULA_NAME}.${state_name} failed"; exit 1)
+    done
+}
+
+_atexit() {
+    RETVAL=$?
+    trap true INT TERM EXIT
+
+    if [ $RETVAL -ne 0 ]; then
+        log_err "Execution failed"
+    else
+        log_info "Execution successful"
+    fi
+    return $RETVAL
+}
+
+## Main
+trap _atexit INT TERM EXIT
+
+case $1 in
+    clean)
+        clean
+        ;;
+    prepare)
+        prepare
+        ;;
+    run)
+        run
+        ;;
+    *)
+        prepare
+        run
+        ;;
+esac