Initial commit
diff --git a/CHANGELOG.rst b/CHANGELOG.rst
new file mode 100644
index 0000000..15745bc
--- /dev/null
+++ b/CHANGELOG.rst
@@ -0,0 +1,6 @@
+cinder formula
+==============
+
+0.0.1 (2015-08-03)
+
+- Initial formula setup
\ No newline at end of file
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 0000000..8e80b12
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,13 @@
+Copyright (c) 2014-2015 tcp cloud a. s.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
\ No newline at end of file
diff --git a/README.rst b/README.rst
new file mode 100644
index 0000000..0a48ec7
--- /dev/null
+++ b/README.rst
@@ -0,0 +1,240 @@
+==============================
+Openstack Cinder Block Storage
+==============================
+
+Cinder provides an infrastructure for managing volumes in OpenStack. It was originally a Nova component called nova-volume, but has become an independent project since the Folsom release.
+
+Sample pillars
+==============
+
+New structure divides cinder-api,cinder-scheduler to role controller and cinder-volume to role volume.
+
+.. code-block:: yaml
+
+ cinder:
+ controller:
+ enabled: true
+ version: juno
+ database:
+ engine: mysql
+ host: 127.0.0.1
+ port: 3306
+ name: cinder
+ user: cinder
+ password: pwd
+ identity:
+ engine: keystone
+ host: 127.0.0.1
+ port: 35357
+ tenant: service
+ user: cinder
+ password: pwd
+ message_queue:
+ engine: rabbitmq
+ host: 127.0.0.1
+ port: 5672
+ user: openstack
+ password: pwd
+ virtual_host: '/openstack'
+ storage:
+ engine: file
+ types:
+ - name: 7k2_SAS
+ - name: 10k_SAS
+ - name: 15k_SAS
+
+ cinder:
+ volume:
+ enabled: true
+ version: juno
+ database:
+ engine: mysql
+ host: 127.0.0.1
+ port: 3306
+ name: cinder
+ user: cinder
+ password: pwd
+ identity:
+ engine: keystone
+ host: 127.0.0.1
+ port: 35357
+ tenant: service
+ user: cinder
+ password: pwd
+ message_queue:
+ engine: rabbitmq
+ host: 127.0.0.1
+ port: 5672
+ user: openstack
+ password: pwd
+ virtual_host: '/openstack'
+ storage:
+ engine: file
+ types:
+ - name: 7k2_SAS
+ - name: 10k_SAS
+ - name: 15k_SAS
+
+Cinder setup with zeroing deleted volumes
+
+ cinder:
+ controller:
+ enabled: true
+ wipe_method: zero
+ ...
+
+Cinder setup with shreding deleted volumes
+
+.. code-block:: yaml
+
+ cinder:
+ controller:
+ enabled: true
+ wipe_method: shred
+ ...
+
+
+Default Cinder setup with iSCSI target
+
+.. code-block:: yaml
+
+ cinder:
+ controller:
+ enabled: true
+ version: juno
+ database:
+ engine: mysql
+ host: 127.0.0.1
+ port: 3306
+ name: cinder
+ user: cinder
+ password: pwd
+ identity:
+ engine: keystone
+ host: 127.0.0.1
+ port: 35357
+ tenant: service
+ user: cinder
+ password: pwd
+ message_queue:
+ engine: rabbitmq
+ host: 127.0.0.1
+ port: 5672
+ user: openstack
+ password: pwd
+ virtual_host: '/openstack'
+ storage:
+ engine: file
+ types:
+ - name: 7k2_SAS
+ - name: 10k_SAS
+ - name: 15k_SAS
+
+Cinder setup with IBM Storwize
+
+.. code-block:: yaml
+
+ cinder:
+ controller:
+ enabled: true
+ types:
+ - name: 7k2_SAS
+ engine: storwize
+ pool: SAS7K2
+ - name: 10k_SAS
+ pool: SAS10K
+ engine: storwize
+ - name: 15k_SAS
+ pool: SAS15K
+ engine: storwize
+ storage:
+ engine: storwize
+ host: 192.168.0.1
+ port: 22
+ user: username
+ password: pass
+ connection: FC/iSCSI
+ multihost: true
+ multipath: true
+
+Cinder setup with Hitachi VPS
+
+.. code-block:: yaml
+
+ cinder:
+ controller:
+ enabled: true
+ types:
+ - name: HUS100
+ backend: hus100_backend
+ storage:
+ engine: hitachi_vsp
+ connection: FC
+
+Cinder setup with CEPH
+
+.. code-block:: yaml
+
+ cinder:
+ controller:
+ enabled: true
+ types:
+ - name: ceph
+ backend: ceph_backend
+ pool: volumes
+ storage:
+ engine: ceph
+ user: cinder
+ secret_uuid: da74ccb7-aa59-1721-a172-0006b1aa4e3e
+ client_cinder_key: AQDOavlU6BsSJhAAnpFR906mvdgdfRqLHwu0Uw==
+
+http://ceph.com/docs/master/rbd/rbd-openstack/
+
+
+Cinder setup with HP3par
+
+.. code-block:: yaml
+
+ cinder:
+ controller:
+ enabled: true
+ types:
+ - name: hp3par
+ backend: hp3par_backend
+ storage:
+ user: hp3paruser
+ password: something
+ url: http://10.10.10.10/api/v1
+ cpg: OpenStackCPG
+ host: 10.10.10.10
+ login: hp3paradmin
+ sanpassword: something
+ debug: True
+ snapcpg: OpenStackSNAPCPG
+
+Cinder setup with Fujitsu Eternus
+
+.. code-block:: yaml
+
+ cinder:
+ volume:
+ enabled: true
+ types:
+ - name: 10kThinPro
+ engine: fujitsu
+ pool: 10kThinPro
+ - name: 10k_SAS
+ pool: SAS10K
+ engine: fujitsu
+ storage:
+ engine: fujitsu
+ host: 192.168.0.1
+ port: 5988
+ user: username
+ password: pass
+ connection: FC/iSCSI
+
+## Read more
+
+* https://wiki.openstack.org/wiki/Cinder
+* http://docs.openstack.org/juno/config-reference/content/hitachi-configuration.html
diff --git a/VERSION b/VERSION
new file mode 100644
index 0000000..3b04cfb
--- /dev/null
+++ b/VERSION
@@ -0,0 +1 @@
+0.2
diff --git a/cinder/controller.sls b/cinder/controller.sls
new file mode 100644
index 0000000..a30b7ca
--- /dev/null
+++ b/cinder/controller.sls
@@ -0,0 +1,57 @@
+{%- from "cinder/map.jinja" import controller with context %}
+{%- if controller.enabled %}
+
+include:
+- cinder.user
+
+cinder_controller_packages:
+ pkg.installed:
+ - names: {{ controller.pkgs }}
+
+/etc/cinder/cinder.conf:
+ file.managed:
+ - source: salt://cinder/files/{{ controller.version }}/cinder.conf.controller.{{ grains.os_family }}
+ - template: jinja
+ - require:
+ - pkg: cinder_controller_packages
+
+/etc/cinder/api-paste.ini:
+ file.managed:
+ - source: salt://cinder/files/{{ controller.version }}/api-paste.ini.controller.{{ grains.os_family }}
+ - template: jinja
+ - require:
+ - pkg: cinder_controller_packages
+
+cinder_controller_services:
+ service.running:
+ - names: {{ controller.services }}
+ - enable: true
+ - watch:
+ - file: /etc/cinder/cinder.conf
+ - file: /etc/cinder/api-paste.ini
+
+cinder_syncdb:
+ cmd.run:
+ - name: cinder-manage db sync
+ - require:
+ - service: cinder_controller_services
+
+{% for type in controller.get('types', []) %}
+
+cinder_type_create_{{ type.name }}:
+ cmd.run:
+ - name: "source /root/keystonerc; cinder type-create {{ type.name }}"
+ - unless: "source /root/keystonerc; cinder type-list | grep {{ type.name }}"
+ - require:
+ - service: cinder_controller_services
+
+cinder_type_update_{{ type.name }}:
+ cmd.run:
+ - name: "source /root/keystonerc; cinder type-key {{ type.name }} set volume_backend_name={{ type.get('backend', type.name) }}"
+ - unless: "source /root/keystonerc; cinder extra-specs-list | grep \"{u'volume_backend_name': u'{{ type.get('backend', type.name) }}'}\""
+ - require:
+ - cmd: cinder_type_create_{{ type.name }}
+
+{% endfor %}
+
+{%- endif %}
diff --git a/cinder/files/iscsitarget b/cinder/files/iscsitarget
new file mode 100644
index 0000000..c69e4e5
--- /dev/null
+++ b/cinder/files/iscsitarget
@@ -0,0 +1 @@
+ISCSITARGET_ENABLE=true
\ No newline at end of file
diff --git a/cinder/files/juno/api-paste.ini.controller.Debian b/cinder/files/juno/api-paste.ini.controller.Debian
new file mode 100644
index 0000000..35693bd
--- /dev/null
+++ b/cinder/files/juno/api-paste.ini.controller.Debian
@@ -0,0 +1,74 @@
+#############
+# OpenStack #
+#############
+
+[composite:osapi_volume]
+use = call:cinder.api:root_app_factory
+/: apiversions
+/v1: openstack_volume_api_v1
+/v2: openstack_volume_api_v2
+
+[composite:openstack_volume_api_v1]
+use = call:cinder.api.middleware.auth:pipeline_factory
+noauth = request_id faultwrap sizelimit osprofiler noauth apiv1
+keystone = request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv1
+keystone_nolimit = request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv1
+
+[composite:openstack_volume_api_v2]
+use = call:cinder.api.middleware.auth:pipeline_factory
+noauth = request_id faultwrap sizelimit osprofiler noauth apiv2
+keystone = request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv2
+keystone_nolimit = request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv2
+
+[filter:request_id]
+paste.filter_factory = cinder.openstack.common.middleware.request_id:RequestIdMiddleware.factory
+
+[filter:faultwrap]
+paste.filter_factory = cinder.api.middleware.fault:FaultWrapper.factory
+
+[filter:osprofiler]
+paste.filter_factory = osprofiler.web:WsgiMiddleware.factory
+hmac_keys = SECRET_KEY
+enabled = yes
+
+[filter:noauth]
+paste.filter_factory = cinder.api.middleware.auth:NoAuthMiddleware.factory
+
+[filter:sizelimit]
+paste.filter_factory = cinder.api.middleware.sizelimit:RequestBodySizeLimiter.factory
+
+[app:apiv1]
+paste.app_factory = cinder.api.v1.router:APIRouter.factory
+
+[app:apiv2]
+paste.app_factory = cinder.api.v2.router:APIRouter.factory
+
+[pipeline:apiversions]
+pipeline = faultwrap osvolumeversionapp
+
+[app:osvolumeversionapp]
+paste.app_factory = cinder.api.versions:Versions.factory
+
+##########
+# Shared #
+##########
+
+[filter:keystonecontext]
+paste.filter_factory = cinder.api.middleware.auth:CinderKeystoneContext.factory
+
+[filter:authtoken]
+paste.filter_factory = keystonemiddleware.auth_token:filter_factory
+service_port=5000
+service_host={{ pillar.cinder.controller.identity.host }}
+service_protocol=http
+auth_host = {{ pillar.cinder.controller.identity.host }}
+auth_port = {{ pillar.cinder.controller.identity.port }}
+auth_protocol = http
+admin_tenant_name = {{ pillar.cinder.controller.identity.tenant }}
+admin_user = {{ pillar.cinder.controller.identity.user }}
+admin_password = {{ pillar.cinder.controller.identity.password }}
+auth_uri=http://{{ pillar.cinder.controller.identity.host }}:5000/
+
+[filter:ratelimit]
+limits=POST_SERVERS100000PUT100000GET100000DELETE100000POST100000
+paste.filter_factory=cinder.api.v1.limits:RateLimitingMiddleware.factory
diff --git a/cinder/files/juno/api-paste.ini.volume.Debian b/cinder/files/juno/api-paste.ini.volume.Debian
new file mode 100644
index 0000000..83136cc
--- /dev/null
+++ b/cinder/files/juno/api-paste.ini.volume.Debian
@@ -0,0 +1,74 @@
+#############
+# OpenStack #
+#############
+
+[composite:osapi_volume]
+use = call:cinder.api:root_app_factory
+/: apiversions
+/v1: openstack_volume_api_v1
+/v2: openstack_volume_api_v2
+
+[composite:openstack_volume_api_v1]
+use = call:cinder.api.middleware.auth:pipeline_factory
+noauth = request_id faultwrap sizelimit osprofiler noauth apiv1
+keystone = request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv1
+keystone_nolimit = request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv1
+
+[composite:openstack_volume_api_v2]
+use = call:cinder.api.middleware.auth:pipeline_factory
+noauth = request_id faultwrap sizelimit osprofiler noauth apiv2
+keystone = request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv2
+keystone_nolimit = request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv2
+
+[filter:request_id]
+paste.filter_factory = cinder.openstack.common.middleware.request_id:RequestIdMiddleware.factory
+
+[filter:faultwrap]
+paste.filter_factory = cinder.api.middleware.fault:FaultWrapper.factory
+
+[filter:osprofiler]
+paste.filter_factory = osprofiler.web:WsgiMiddleware.factory
+hmac_keys = SECRET_KEY
+enabled = yes
+
+[filter:noauth]
+paste.filter_factory = cinder.api.middleware.auth:NoAuthMiddleware.factory
+
+[filter:sizelimit]
+paste.filter_factory = cinder.api.middleware.sizelimit:RequestBodySizeLimiter.factory
+
+[app:apiv1]
+paste.app_factory = cinder.api.v1.router:APIRouter.factory
+
+[app:apiv2]
+paste.app_factory = cinder.api.v2.router:APIRouter.factory
+
+[pipeline:apiversions]
+pipeline = faultwrap osvolumeversionapp
+
+[app:osvolumeversionapp]
+paste.app_factory = cinder.api.versions:Versions.factory
+
+##########
+# Shared #
+##########
+
+[filter:keystonecontext]
+paste.filter_factory = cinder.api.middleware.auth:CinderKeystoneContext.factory
+
+[filter:authtoken]
+paste.filter_factory = keystonemiddleware.auth_token:filter_factory
+service_port=5000
+service_host={{ pillar.cinder.volume.identity.host }}
+service_protocol=http
+auth_host = {{ pillar.cinder.volume.identity.host }}
+auth_port = {{ pillar.cinder.volume.identity.port }}
+auth_protocol = http
+admin_tenant_name = {{ pillar.cinder.volume.identity.tenant }}
+admin_user = {{ pillar.cinder.volume.identity.user }}
+admin_password = {{ pillar.cinder.volume.identity.password }}
+auth_uri=http://{{ pillar.cinder.volume.identity.host }}:5000/
+
+[filter:ratelimit]
+limits=POST_SERVERS100000PUT100000GET100000DELETE100000POST100000
+paste.filter_factory=cinder.api.v1.limits:RateLimitingMiddleware.factory
diff --git a/cinder/files/juno/cinder.conf.controller.Debian b/cinder/files/juno/cinder.conf.controller.Debian
new file mode 100644
index 0000000..c0ed882
--- /dev/null
+++ b/cinder/files/juno/cinder.conf.controller.Debian
@@ -0,0 +1,333 @@
+{%- from "cinder/map.jinja" import controller with context %}
+
+[DEFAULT]
+rootwrap_config = /etc/cinder/rootwrap.conf
+api_paste_confg = /etc/cinder/api-paste.ini
+
+iscsi_helper = tgtadm
+volume_name_template = volume-%s
+#volume_group = cinder
+
+verbose = True
+
+auth_strategy = keystone
+
+state_path = /var/lib/cinder
+lock_path = /var/lock/cinder
+
+use_syslog=False
+
+glance_num_retries=0
+debug=False
+
+#glance_api_ssl_compression=False
+#glance_api_insecure=False
+
+osapi_volume_listen={{ controller.osapi.host }}
+
+glance_host={{ controller.glance.host }}
+glance_port={{ controller.glance.port }}
+glance_api_version=2
+
+volume_backend_name=DEFAULT
+default_volume_type={% for type in controller.get('types', []) %}{% if loop.first %}{{ type.name }}{% endif %}{% endfor %}
+
+#RPC response timeout recommended by Hitachi
+rpc_response_timeout=3600
+
+#Rabbit
+kombu_reconnect_delay=5.0
+rpc_backend=cinder.openstack.common.rpc.impl_kombu
+control_exchange=cinder
+rabbit_host = {{ controller.message_queue.host }}
+rabbit_port = {{ controller.message_queue.port }}
+rabbit_userid = {{ controller.message_queue.user }}
+rabbit_password = {{ controller.message_queue.password }}
+rabbit_virtual_host = {{ controller.message_queue.virtual_host }}
+rabbit_ha_queues={{ controller.message_queue.ha_queues }}
+
+
+volume_clear={{ controller.wipe_method }}
+
+{%- if controller.notification %}
+control_exchange = cinder
+notification_driver = messagingv2
+{%- endif %}
+
+
+volume_name_template = volume-%s
+
+#volume_group = vg_cinder_volume
+
+volumes_dir = /var/lib/cinder/volumes
+log_dir=/var/log/cinder
+
+# Use syslog for logging. (boolean value)
+#use_syslog=false
+
+use_syslog=false
+verbose=True
+lock_path=/var/lock/cinder
+
+enabled_backends={% for type in controller.get('types', []) %}{{ type.name }}{% if not loop.last %},{% endif %}{% endfor %}
+
+[keystone_authtoken]
+signing_dir=/tmp/keystone-signing-cinder
+admin_password={{ controller.identity.password }}
+auth_port={{ controller.identity.port }}
+auth_host={{ controller.identity.host }}
+admin_tenant_name={{ controller.identity.tenant }}
+auth_protocol=http
+admin_user=cinder
+signing_dirname=/tmp/keystone-signing-cinder
+auth_uri=http://{{ controller.identity.host }}:{{ controller.identity.port }}
+
+[database]
+idle_timeout=3600
+max_pool_size=30
+max_retries=-1
+max_overflow=40
+connection = {{ controller.database.engine }}://{{ controller.database.user }}:{{ controller.database.password }}@{{ controller.database.host }}/{{ controller.database.name }}
+
+{%- if controller.storage.engine == "storwize" %}
+
+{%- for type in controller.get('types', []) %}
+
+[{{ type.name }}]
+volume_driver = cinder.volume.drivers.storwize_svc.StorwizeSVCDriver
+volume_backend_name={{ type.name }}
+san_ip={{ controller.storage.host }}
+san_ssh_port={{ controller.storage.port }}
+san_login={{ controller.storage.user }}
+san_password={{ controller.storage.password }}
+
+storwize_svc_volpool_name={{ type.pool }}
+#storwize_svc_connection_protocol=iSCSI
+storwize_svc_connection_protocol={{ controller.storage.connection }}
+#storwize_svc_iscsi_chap_enabled=true
+storwize_svc_multihost_enabled={{ controller.storage.multihost }}
+storwize_svc_multipath_enabled={{ controller.storage.multipath }}
+
+{%- endfor %}
+
+{%- endif %}
+
+{%- if controller.storage.engine == "hitachi_vsp" %}
+
+{%- for type in controller.get('types', []) %}
+
+[{{ type.name }}]
+volume_driver = cinder.volume.drivers.hitachi.hbsd_fc.HBSDFCDriver
+volume_backend_name={{ type.name }}
+
+#
+# Options defined in cinder.volume.drivers.hitachi.hbsd_common
+#
+# Serial number of storage system (string value)
+#hitachi_serial_number=<None>
+hitachi_serial_number=86644
+#hitachi_serial_number=355316
+
+# Name of an array unit (string value)
+#hitachi_unit_name=<None>
+#hitachi_unit_name=fiat
+
+# Pool ID of storage system (integer value)
+#hitachi_pool_id=<None>
+hitachi_pool_id=4
+
+# Thin pool ID of storage system (integer value)
+hitachi_thin_pool_id=14
+
+# Range of logical device of storage system (string value)
+#hitachi_ldev_range=<None>
+#hitachi_ldev_range=00:05:00-00:05:FF
+
+# Default copy method of storage system (string value)
+#hitachi_default_copy_method=FULL
+hitachi_default_copy_method=THIN
+
+# Copy speed of storage system (integer value)
+#hitachi_copy_speed=3
+
+# Interval to check copy (integer value)
+#hitachi_copy_check_interval=3
+
+# Interval to check copy asynchronously (integer value)
+#hitachi_async_copy_check_interval=10
+
+# Control port names for HostGroup or iSCSI Target (string
+# value)
+#hitachi_target_ports=<None>
+hitachi_target_ports=CL1-E,CL2-E,CL3-B,CL4-D
+
+# Range of group number (string value)
+#hitachi_group_range=
+#hitachi_group_range=
+
+# Request for creating HostGroup or iSCSI Target (boolean
+# value)
+hitachi_group_request=True
+#hitachi_group_request=false
+#
+# Options defined in cinder.volume.drivers.hitachi.hbsd_fc
+#
+# Request for FC Zone creating HostGroup (boolean value)
+hitachi_zoning_request=true
+#
+# Options defined in cinder.volume.drivers.hitachi.hbsd_horcm
+#
+# Instance numbers for HORCM (string value)
+#hitachi_horcm_numbers=200,201
+hitachi_horcm_numbers=0,1
+
+# Username of storage system for HORCM (string value)
+#hitachi_horcm_user=<None>
+#hitachi_horcm_user=openstack
+hitachi_horcm_user=root
+
+# Password of storage system for HORCM (string value)
+#hitachi_horcm_password=<None>
+#hitachi_horcm_password=avg2014
+hitachi_horcm_password=X3tT35va
+
+# Add to HORCM configuration (boolean value)
+#hitachi_horcm_add_conf=true
+hitachi_horcm_add_conf=true
+
+#hitachi multipath advice
+use_multipath_for_image_xfer=false
+
+{%- endfor %}
+
+{%- endif %}
+
+{%- if controller.storage.engine == "ceph" %}
+
+{%- for type in controller.get('types', []) %}
+
+[{{ type.name }}]
+
+volume_driver = cinder.volume.drivers.rbd.RBDDriver
+#
+# Options defined in cinder.volume.drivers.rbd
+#
+# The RADOS pool where rbd volumes are stored (string value)
+#rbd_pool=volumes
+rbd_pool={{ type.pool }}
+
+# The RADOS client name for accessing rbd volumes - only set
+# when using cephx authentication (string value)
+#rbd_user=cinder
+rbd_user={{ controller.storage.user }}
+
+# Path to the ceph configuration file (string value)
+#rbd_ceph_conf=
+rbd_ceph_conf=/etc/ceph/ceph.conf
+
+# Flatten volumes created from snapshots to remove dependency
+# from volume to snapshot (boolean value)
+#rbd_flatten_volume_from_snapshot=false
+
+# The libvirt uuid of the secret for the rbd_user volumes
+# (string value)
+#rbd_secret_uuid=da74ccb7-aa59-1721-a172-0006b1aa4e3e
+rbd_secret_uuid={{ controller.storage.secret_uuid }}
+
+# Directory where temporary image files are stored when the
+# volume driver does not write them directly to the volume.
+# (string value)
+#volume_tmp_dir=<None>
+
+# Maximum number of nested volume clones that are taken before
+# a flatten occurs. Set to 0 to disable cloning. (integer
+# value)
+#rbd_max_clone_depth=5
+
+# Volumes will be chunked into objects of this size (in
+# megabytes). (integer value)
+#rbd_store_chunk_size=4
+
+# Timeout value (in seconds) used when connecting to ceph
+# cluster. If value < 0, no timeout is set and default
+# librados value is used. (integer value)
+#rados_connect_timeout=-1
+
+{%- endfor %}
+
+{%- endif %}
+
+
+{%- if controller.storage.engine == "hp3par" %}
+
+{%- for type in controller.get('types', []) %}
+
+[{{ type.name }}]
+
+hp3par_api_url={{ controller.storage.url }}
+
+# 3PAR Super user username
+hp3par_username={{ controller.storage.user }}
+
+# 3PAR Super user password
+hp3par_password={{ controller.storage.password }}
+
+# 3PAR CPG to use for volume creation
+hp3par_cpg={{ controller.storage.cpg }}
+
+# IP address of SAN controller for SSH access to the array
+san_ip={{ controller.storage.host }}
+
+# Username for SAN controller for SSH access to the array
+san_login={{ controller.storage.login }}
+
+# Password for SAN controller for SSH access to the array
+san_password={{ controller.storage.password }}
+
+# FIBRE CHANNEL(uncomment the next line to enable the FC driver)
+volume_driver=cinder.volume.drivers.san.hp.hp_3par_fc.HP3PARFCDriver
+
+# iSCSI (uncomment the next line to enable the iSCSI driver and
+# hp3par_iscsi_ips or iscsi_ip_address)
+#volume_driver=cinder.volume.drivers.san.hp.hp_3par_iscsi.HP3PARISCSIDriver
+
+# iSCSI multiple port configuration
+# hp3par_iscsi_ips=10.10.220.253:3261,10.10.222.234
+#hp3par_iscsi_ips=10.10.103.151
+
+# Still available for single port iSCSI configuration
+#iscsi_ip_address=10.10.103.151
+
+## OPTIONAL SETTINGS
+# Enable HTTP debugging to 3PAR
+# hp3par_debug=True
+hp3par_debug={{ controller.storage.debug }}
+
+# Enable CHAP authentication for iSCSI connections.
+hp3par_iscsi_chap_enabled=false
+
+# The CPG to use for Snapshots for volumes. If empty hp3par_cpg will be used.
+hp3par_snap_cpg={{ controller.storage.snapcpg }}
+
+# Time in hours to retain a snapshot. You can't delete it before this expires.
+hp3par_snapshot_retention=2
+
+# Time in hours when a snapshot expires and is deleted. This must be larger than retention.
+hp3par_snapshot_expiration=4
+
+{%- endfor %}
+
+{%- endif %}
+
+{%- if controller.storage.engine == "fujitsu" %}
+
+{%- for type in controller.get('types', []) %}
+
+[{{ type.name }}]
+volume_backend_name={{ type.name }}
+volume_driver=cinder.volume.drivers.fujitsu.fujitsu_eternus_dx_fc.FJDXFCDriver
+cinder_eternus_config_file=/etc/cinder/cinder_fujitsu_eternus_dx_{{ type.name }}.xml
+
+{%- endfor %}
+
+{%- endif %}
\ No newline at end of file
diff --git a/cinder/files/juno/cinder.conf.volume.Debian b/cinder/files/juno/cinder.conf.volume.Debian
new file mode 100644
index 0000000..73bce22
--- /dev/null
+++ b/cinder/files/juno/cinder.conf.volume.Debian
@@ -0,0 +1,410 @@
+{%- from "cinder/map.jinja" import volume with context %}
+
+[DEFAULT]
+rootwrap_config = /etc/cinder/rootwrap.conf
+api_paste_confg = /etc/cinder/api-paste.ini
+
+iscsi_helper = tgtadm
+volume_name_template = volume-%s
+#volume_group = cinder
+
+verbose = True
+
+auth_strategy = keystone
+
+state_path = /var/lib/cinder
+lock_path = /var/lock/cinder
+
+use_syslog=False
+
+glance_num_retries=0
+debug=False
+
+#glance_api_ssl_compression=False
+#glance_api_insecure=False
+
+osapi_volume_listen={{ volume.osapi.host }}
+
+glance_host={{ volume.glance.host }}
+glance_port={{ volume.glance.port }}
+glance_api_version=2
+
+volume_backend_name=DEFAULT
+default_volume_type={% for type in volume.get('types', []) %}{% if loop.first %}{{ type.name }}{% endif %}{% endfor %}
+
+#RPC response timeout recommended by Hitachi
+rpc_response_timeout=3600
+
+#Rabbit
+kombu_reconnect_delay=5.0
+rpc_backend=cinder.openstack.common.rpc.impl_kombu
+control_exchange=cinder
+rabbit_host = {{ volume.message_queue.host }}
+rabbit_port = {{ volume.message_queue.port }}
+rabbit_userid = {{ volume.message_queue.user }}
+rabbit_password = {{ volume.message_queue.password }}
+rabbit_virtual_host = {{ volume.message_queue.virtual_host }}
+rabbit_ha_queues={{ volume.message_queue.ha_queues }}
+
+
+volume_clear={{ volume.wipe_method }}
+
+{%- if volume.notification %}
+control_exchange = cinder
+
+{%- if volume.notification.driver is defined %}
+notification_driver = {{ volume.notification.driver }}
+{%- else %}
+notification_driver = messagingv2
+{%- endif %}
+
+{%- if volume.notification.topics is defined %}
+notification_topics = {{ volume.notification.topics }}
+{%- endif %}
+
+{%- endif %}
+
+volume_name_template = volume-%s
+
+#volume_group = vg_cinder_volume
+
+volumes_dir = /var/lib/cinder/volumes
+log_dir=/var/log/cinder
+
+# Use syslog for logging. (boolean value)
+#use_syslog=false
+
+use_syslog=false
+verbose=True
+lock_path=/var/lock/cinder
+
+enabled_backends={% for type in volume.get('types', []) %}{{ type.name }}{% if not loop.last %},{% endif %}{% endfor %}{% if volume.storage.engine == "openvstorage" %}{% for vpool in pillar.openvstorage.server.get('vpools', []) %}{{ vpool }}{% if not loop.last %},{% endif %}{% endfor %}{% endif %}
+
+[keystone_authtoken]
+signing_dir=/tmp/keystone-signing-cinder
+admin_password={{ volume.identity.password }}
+auth_port={{ volume.identity.port }}
+auth_host={{ volume.identity.host }}
+admin_tenant_name={{ volume.identity.tenant }}
+auth_protocol=http
+admin_user=cinder
+signing_dirname=/tmp/keystone-signing-cinder
+auth_uri=http://{{ volume.identity.host }}:{{ volume.identity.port }}
+
+[database]
+idle_timeout=3600
+max_pool_size=30
+max_retries=-1
+max_overflow=40
+connection = {{ volume.database.engine }}://{{ volume.database.user }}:{{ volume.database.password }}@{{ volume.database.host }}/{{ volume.database.name }}
+
+{%- if volume.storage.engine == "storwize" %}
+
+{%- for type in volume.get('types', []) %}
+
+[{{ type.name }}]
+volume_driver = cinder.volume.drivers.storwize_svc.StorwizeSVCDriver
+volume_backend_name={{ type.name }}
+san_ip={{ volume.storage.host }}
+san_ssh_port={{ volume.storage.port }}
+san_login={{ volume.storage.user }}
+san_password={{ volume.storage.password }}
+
+storwize_svc_volpool_name={{ type.pool }}
+#storwize_svc_connection_protocol=iSCSI
+storwize_svc_connection_protocol={{ volume.storage.connection }}
+#storwize_svc_iscsi_chap_enabled=true
+storwize_svc_multihost_enabled={{ volume.storage.multihost }}
+storwize_svc_multipath_enabled={{ volume.storage.multipath }}
+
+{%- endfor %}
+
+{%- endif %}
+
+{%- if volume.storage.engine == "hitachi_vsp" %}
+
+{%- for type in volume.get('types', []) %}
+
+[{{ type.name }}]
+
+{%- if volume.storage.version == "1.0" %}
+
+volume_driver = cinder.volume.drivers.hitachi.hbsd_fc.HBSDFCDriver
+volume_backend_name={{ type.name }}
+
+#
+# Options defined in cinder.volume.drivers.hitachi.hbsd_common
+#
+# Serial number of storage system (string value)
+#hitachi_serial_number=<None>
+hitachi_serial_number={{ volume.storage.storage_id }}
+#hitachi_serial_number=355316
+
+# Name of an array unit (string value)
+#hitachi_unit_name=<None>
+#hitachi_unit_name=fiat
+
+# Pool ID of storage system (integer value)
+#hitachi_pool_id=<None>
+hitachi_pool_id={{ volume.storage.pool_id }}
+
+# Thin pool ID of storage system (integer value)
+hitachi_thin_pool_id={{ volume.storage.thin_pool_id }}
+
+# Default copy method of storage system (string value)
+#hitachi_default_copy_method=FULL
+hitachi_default_copy_method=THIN
+
+# Control port names for HostGroup or iSCSI Target (string
+# value)
+#hitachi_target_ports=<None>
+hitachi_target_ports={{ volume.storage.compute_target_ports }}
+
+# Request for creating HostGroup or iSCSI Target (boolean
+# value)
+hitachi_group_request=True
+#hitachi_group_request=false
+#
+# Options defined in cinder.volume.drivers.hitachi.hbsd_fc
+#
+# Request for FC Zone creating HostGroup (boolean value)
+hitachi_zoning_request=true
+#
+# Options defined in cinder.volume.drivers.hitachi.hbsd_horcm
+#
+# Instance numbers for HORCM (string value)
+#hitachi_horcm_numbers=200,201
+hitachi_horcm_numbers=0,1
+
+# Username of storage system for HORCM (string value)
+#hitachi_horcm_user=<None>
+#hitachi_horcm_user=openstack
+hitachi_horcm_user={{ volume.storage.user }}
+
+# Password of storage system for HORCM (string value)
+#hitachi_horcm_password=<None>
+#hitachi_horcm_password=avg2014
+hitachi_horcm_password={{ volume.storage.password }}
+
+# Add to HORCM configuration (boolean value)
+#hitachi_horcm_add_conf=true
+hitachi_horcm_add_conf=true
+
+#hitachi multipath advice
+use_multipath_for_image_xfer=false
+
+{%- endif %}
+
+{%- if volume.storage.version == "1.3" %}
+
+volume_driver = cinder.volume.drivers.hitachi.hbsd.hbsd_fc.HBSDFCDriver
+volume_backend_name=hitachi_vsp
+
+#
+# Options defined in cinder.volume.drivers.hitachi.hbsd_common
+#
+# Serial number of storage system (string value)
+#hitachi_serial_number=<None>
+#hitachi_serial_number=86644
+hitachi_storage_id={{ volume.storage.storage_id }}
+
+# Pool ID of storage system (integer value)
+#hitachi_pool_id=<None>
+hitachi_pool={{ volume.storage.pool_id }}
+
+# Thin pool ID of storage system (integer value)
+hitachi_thin_pool={{ volume.storage.thin_pool_id }}
+
+# Default copy method of storage system (string value)
+#hitachi_default_copy_method=FULL
+hitachi_default_copy_method=THIN
+
+# Control port names for HostGroup or iSCSI Target (string
+# value)
+#hitachi_target_ports=<None>
+hitachi_target_ports={{ volume.storage.target_ports }}
+
+hitachi_compute_target_ports={{ volume.storage.compute_target_ports }}
+
+# Range of group number (string value)
+#hitachi_group_range=
+#hitachi_group_range=
+
+# Request for creating HostGroup or iSCSI Target (boolean
+# value)
+#JPA
+hitachi_group_request=True
+#hitachi_group_request=false
+
+# Instance numbers for HORCM (string value)
+#hitachi_horcm_numbers=200,201
+hitachi_horcm_numbers=0,1
+
+# Username of storage system for HORCM (string value)
+#hitachi_horcm_user=<None>
+#hitachi_horcm_user=openstack
+hitachi_horcm_user={{ volume.storage.user }}
+
+# Password of storage system for HORCM (string value)
+#hitachi_horcm_password=<None>
+#hitachi_horcm_password=avg2014
+hitachi_horcm_password={{ volume.storage.password }}
+
+# Add to HORCM configuration (boolean value)
+#hitachi_horcm_add_conf=true
+hitachi_horcm_add_conf=true
+
+#hitachi multipath advice
+use_multipath_for_image_xfer=false
+
+hitachi_storage_cli=HORCM
+
+
+{%- endif %}
+
+{%- endfor %}
+
+{%- endif %}
+
+{%- if volume.storage.engine == "ceph" %}
+
+{%- for type in volume.get('types', []) %}
+
+[{{ type.name }}]
+
+volume_driver = cinder.volume.drivers.rbd.RBDDriver
+#
+# Options defined in cinder.volume.drivers.rbd
+#
+# The RADOS pool where rbd volumes are stored (string value)
+#rbd_pool=volumes
+rbd_pool={{ type.pool }}
+
+# The RADOS client name for accessing rbd volumes - only set
+# when using cephx authentication (string value)
+#rbd_user=cinder
+rbd_user={{ volume.storage.user }}
+
+# Path to the ceph configuration file (string value)
+#rbd_ceph_conf=
+rbd_ceph_conf=/etc/ceph/ceph.conf
+
+# Flatten volumes created from snapshots to remove dependency
+# from volume to snapshot (boolean value)
+#rbd_flatten_volume_from_snapshot=false
+
+# The libvirt uuid of the secret for the rbd_user volumes
+# (string value)
+#rbd_secret_uuid=da74ccb7-aa59-1721-a172-0006b1aa4e3e
+rbd_secret_uuid={{ volume.storage.secret_uuid }}
+
+# Directory where temporary image files are stored when the
+# volume driver does not write them directly to the volume.
+# (string value)
+#volume_tmp_dir=<None>
+
+# Maximum number of nested volume clones that are taken before
+# a flatten occurs. Set to 0 to disable cloning. (integer
+# value)
+#rbd_max_clone_depth=5
+
+# Volumes will be chunked into objects of this size (in
+# megabytes). (integer value)
+#rbd_store_chunk_size=4
+
+# Timeout value (in seconds) used when connecting to ceph
+# cluster. If value < 0, no timeout is set and default
+# librados value is used. (integer value)
+#rados_connect_timeout=-1
+
+{%- endfor %}
+
+{%- endif %}
+
+
+{%- if volume.storage.engine == "hp3par" %}
+
+{%- for type in volume.get('types', []) %}
+
+[{{ type.name }}]
+
+hp3par_api_url={{ volume.storage.url }}
+
+# 3PAR Super user username
+hp3par_username={{ volume.storage.user }}
+
+# 3PAR Super user password
+hp3par_password={{ volume.storage.password }}
+
+# 3PAR CPG to use for volume creation
+hp3par_cpg={{ volume.storage.cpg }}
+
+# IP address of SAN volume for SSH access to the array
+san_ip={{ volume.storage.host }}
+
+# Username for SAN volume for SSH access to the array
+san_login={{ volume.storage.login }}
+
+# Password for SAN volume for SSH access to the array
+san_password={{ volume.storage.password }}
+
+# FIBRE CHANNEL(uncomment the next line to enable the FC driver)
+volume_driver=cinder.volume.drivers.san.hp.hp_3par_fc.HP3PARFCDriver
+
+# iSCSI (uncomment the next line to enable the iSCSI driver and
+# hp3par_iscsi_ips or iscsi_ip_address)
+#volume_driver=cinder.volume.drivers.san.hp.hp_3par_iscsi.HP3PARISCSIDriver
+
+# iSCSI multiple port configuration
+# hp3par_iscsi_ips=10.10.220.253:3261,10.10.222.234
+#hp3par_iscsi_ips=10.10.103.151
+
+# Still available for single port iSCSI configuration
+#iscsi_ip_address=10.10.103.151
+
+## OPTIONAL SETTINGS
+# Enable HTTP debugging to 3PAR
+# hp3par_debug=True
+hp3par_debug={{ volume.storage.debug }}
+
+# Enable CHAP authentication for iSCSI connections.
+hp3par_iscsi_chap_enabled=false
+
+# The CPG to use for Snapshots for volumes. If empty hp3par_cpg will be used.
+hp3par_snap_cpg={{ volume.storage.snapcpg }}
+
+# Time in hours to retain a snapshot. You can't delete it before this expires.
+hp3par_snapshot_retention=2
+
+# Time in hours when a snapshot expires and is deleted. This must be larger than retention.
+hp3par_snapshot_expiration=4
+
+{%- endfor %}
+
+{%- endif %}
+
+{%- if volume.storage.engine == "openvstorage" %}
+
+{%- for vpool in pillar.openvstorage.server.get('vpools', []) %}
+
+[{{ vpool }}]
+volume_driver = cinder.volume.drivers.openvstorage.OVSVolumeDriver
+volume_backend_name={{ vpool }}
+vpool_name={{ vpool }}
+
+{%- endfor %}
+
+{%- endif %}
+
+{%- if volume.storage.engine == "fujitsu" %}
+
+{%- for type in volume.get('types', []) %}
+
+[{{ type.name }}]
+volume_backend_name={{ type.name }}
+volume_driver=cinder.volume.drivers.fujitsu.fujitsu_eternus_dx_fc.FJDXFCDriver
+cinder_eternus_config_file=/etc/cinder/cinder_fujitsu_eternus_dx_{{ type.name }}.xml
+
+{%- endfor %}
+
+{%- endif %}
\ No newline at end of file
diff --git a/cinder/files/juno/cinder_fujitsu_eternus_dx.xml b/cinder/files/juno/cinder_fujitsu_eternus_dx.xml
new file mode 100644
index 0000000..2f21171
--- /dev/null
+++ b/cinder/files/juno/cinder_fujitsu_eternus_dx.xml
@@ -0,0 +1,10 @@
+{%- from "cinder/map.jinja" import volume with context -%}
+<?xml version='1.0' encoding='UTF-8'?>
+<FUJITSU>
+<EternusIP>{{ volume.storage.host }}</EternusIP>
+<EternusPort>{{ volume.storage.port }}</EternusPort>
+<EternusUser>{{ volume.storage.user }}</EternusUser>
+<EternusPassword>{{ volume.storage.password }}</EternusPassword>
+<EternusISCSIIP></EternusISCSIIP>
+<EternusPool>{{ volume_type_name }}</EternusPool>
+</FUJITSU>
diff --git a/cinder/files/kilo/api-paste.ini.controller.Debian b/cinder/files/kilo/api-paste.ini.controller.Debian
new file mode 100644
index 0000000..e5b5444
--- /dev/null
+++ b/cinder/files/kilo/api-paste.ini.controller.Debian
@@ -0,0 +1,70 @@
+#############
+# OpenStack #
+#############
+{%- from "cinder/map.jinja" import controller with context %}
+[composite:osapi_volume]
+use = call:cinder.api:root_app_factory
+/: apiversions
+/v1: openstack_volume_api_v1
+/v2: openstack_volume_api_v2
+
+[composite:openstack_volume_api_v1]
+use = call:cinder.api.middleware.auth:pipeline_factory
+noauth = request_id faultwrap sizelimit osprofiler noauth apiv1
+keystone = request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv1
+keystone_nolimit = request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv1
+
+[composite:openstack_volume_api_v2]
+use = call:cinder.api.middleware.auth:pipeline_factory
+noauth = request_id faultwrap sizelimit osprofiler noauth apiv2
+keystone = request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv2
+keystone_nolimit = request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv2
+
+[filter:request_id]
+paste.filter_factory = oslo_middleware.request_id:RequestId.factory
+
+[filter:faultwrap]
+paste.filter_factory = cinder.api.middleware.fault:FaultWrapper.factory
+
+[filter:osprofiler]
+paste.filter_factory = osprofiler.web:WsgiMiddleware.factory
+hmac_keys = SECRET_KEY
+enabled = yes
+
+[filter:noauth]
+paste.filter_factory = cinder.api.middleware.auth:NoAuthMiddleware.factory
+
+[filter:sizelimit]
+paste.filter_factory = cinder.api.middleware.sizelimit:RequestBodySizeLimiter.factory
+
+[app:apiv1]
+paste.app_factory = cinder.api.v1.router:APIRouter.factory
+
+[app:apiv2]
+paste.app_factory = cinder.api.v2.router:APIRouter.factory
+
+[pipeline:apiversions]
+pipeline = faultwrap osvolumeversionapp
+
+[app:osvolumeversionapp]
+paste.app_factory = cinder.api.versions:Versions.factory
+
+##########
+# Shared #
+##########
+
+[filter:keystonecontext]
+paste.filter_factory = cinder.api.middleware.auth:CinderKeystoneContext.factory
+
+[filter:authtoken]
+paste.filter_factory = keystonemiddleware.auth_token:filter_factory
+service_port=5000
+service_host={{ pillar.cinder.controller.identity.host }}
+service_protocol=http
+auth_host = {{ pillar.cinder.controller.identity.host }}
+auth_port = {{ pillar.cinder.controller.identity.port }}
+auth_protocol = http
+admin_tenant_name = {{ pillar.cinder.controller.identity.tenant }}
+admin_user = {{ pillar.cinder.controller.identity.user }}
+admin_password = {{ pillar.cinder.controller.identity.password }}
+auth_uri=http://{{ pillar.cinder.controller.identity.host }}:5000/
\ No newline at end of file
diff --git a/cinder/files/kilo/api-paste.ini.volume.Debian b/cinder/files/kilo/api-paste.ini.volume.Debian
new file mode 100644
index 0000000..a280eb9
--- /dev/null
+++ b/cinder/files/kilo/api-paste.ini.volume.Debian
@@ -0,0 +1,70 @@
+#############
+# OpenStack #
+#############
+{%- from "cinder/map.jinja" import volume with context %}
+[composite:osapi_volume]
+use = call:cinder.api:root_app_factory
+/: apiversions
+/v1: openstack_volume_api_v1
+/v2: openstack_volume_api_v2
+
+[composite:openstack_volume_api_v1]
+use = call:cinder.api.middleware.auth:pipeline_factory
+noauth = request_id faultwrap sizelimit osprofiler noauth apiv1
+keystone = request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv1
+keystone_nolimit = request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv1
+
+[composite:openstack_volume_api_v2]
+use = call:cinder.api.middleware.auth:pipeline_factory
+noauth = request_id faultwrap sizelimit osprofiler noauth apiv2
+keystone = request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv2
+keystone_nolimit = request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv2
+
+[filter:request_id]
+paste.filter_factory = oslo_middleware.request_id:RequestId.factory
+
+[filter:faultwrap]
+paste.filter_factory = cinder.api.middleware.fault:FaultWrapper.factory
+
+[filter:osprofiler]
+paste.filter_factory = osprofiler.web:WsgiMiddleware.factory
+hmac_keys = SECRET_KEY
+enabled = yes
+
+[filter:noauth]
+paste.filter_factory = cinder.api.middleware.auth:NoAuthMiddleware.factory
+
+[filter:sizelimit]
+paste.filter_factory = cinder.api.middleware.sizelimit:RequestBodySizeLimiter.factory
+
+[app:apiv1]
+paste.app_factory = cinder.api.v1.router:APIRouter.factory
+
+[app:apiv2]
+paste.app_factory = cinder.api.v2.router:APIRouter.factory
+
+[pipeline:apiversions]
+pipeline = faultwrap osvolumeversionapp
+
+[app:osvolumeversionapp]
+paste.app_factory = cinder.api.versions:Versions.factory
+
+##########
+# Shared #
+##########
+
+[filter:keystonecontext]
+paste.filter_factory = cinder.api.middleware.auth:CinderKeystoneContext.factory
+
+[filter:authtoken]
+paste.filter_factory = keystonemiddleware.auth_token:filter_factory
+service_port=5000
+service_host={{ pillar.cinder.volume.identity.host }}
+service_protocol=http
+auth_host = {{ pillar.cinder.volume.identity.host }}
+auth_port = {{ pillar.cinder.volume.identity.port }}
+auth_protocol = http
+admin_tenant_name = {{ pillar.cinder.volume.identity.tenant }}
+admin_user = {{ pillar.cinder.volume.identity.user }}
+admin_password = {{ pillar.cinder.volume.identity.password }}
+auth_uri=http://{{ pillar.cinder.volume.identity.host }}:5000/
\ No newline at end of file
diff --git a/cinder/files/kilo/cinder.conf.controller.Debian b/cinder/files/kilo/cinder.conf.controller.Debian
new file mode 100644
index 0000000..c8741ca
--- /dev/null
+++ b/cinder/files/kilo/cinder.conf.controller.Debian
@@ -0,0 +1,339 @@
+{%- from "cinder/map.jinja" import controller with context %}
+
+[DEFAULT]
+rootwrap_config = /etc/cinder/rootwrap.conf
+api_paste_confg = /etc/cinder/api-paste.ini
+
+iscsi_helper = tgtadm
+volume_name_template = volume-%s
+#volume_group = cinder
+
+verbose = True
+
+auth_strategy = keystone
+
+state_path = /var/lib/cinder
+lock_path = /var/lock/cinder
+
+use_syslog=False
+
+glance_num_retries=0
+debug=False
+
+#glance_api_ssl_compression=False
+#glance_api_insecure=False
+
+osapi_volume_listen={{ controller.osapi.host }}
+
+glance_host={{ controller.glance.host }}
+glance_port={{ controller.glance.port }}
+glance_api_version=2
+
+volume_backend_name=DEFAULT
+default_volume_type={% for type in controller.get('types', []) %}{% if loop.first %}{{ type.name }}{% endif %}{% endfor %}
+
+#RPC response timeout recommended by Hitachi
+rpc_response_timeout=3600
+
+#Rabbit
+rpc_backend=cinder.openstack.common.rpc.impl_kombu
+control_exchange=cinder
+
+
+volume_clear={{ controller.wipe_method }}
+
+{%- if controller.notification %}
+control_exchange = cinder
+notification_driver = messagingv2
+{%- endif %}
+
+
+volume_name_template = volume-%s
+
+#volume_group = vg_cinder_volume
+
+volumes_dir = /var/lib/cinder/volumes
+log_dir=/var/log/cinder
+
+# Use syslog for logging. (boolean value)
+#use_syslog=false
+
+use_syslog=false
+verbose=True
+lock_path=/var/lock/cinder
+
+enabled_backends={% for type in controller.get('types', []) %}{{ type.name }}{% if not loop.last %},{% endif %}{% endfor %}
+
+[oslo_concurrency]
+
+lock_path=/var/lock/cinder
+
+[oslo_messaging_rabbit]
+rabbit_host = {{ controller.message_queue.host }}
+rabbit_port = {{ controller.message_queue.port }}
+rabbit_userid = {{ controller.message_queue.user }}
+rabbit_password = {{ controller.message_queue.password }}
+rabbit_virtual_host = {{ controller.message_queue.virtual_host }}
+rabbit_ha_queues={{ controller.message_queue.ha_queues }}
+kombu_reconnect_delay=5.0
+
+[keystone_authtoken]
+signing_dir=/tmp/keystone-signing-cinder
+admin_password={{ controller.identity.password }}
+auth_port={{ controller.identity.port }}
+auth_host={{ controller.identity.host }}
+admin_tenant_name={{ controller.identity.tenant }}
+auth_protocol=http
+admin_user=cinder
+signing_dirname=/tmp/keystone-signing-cinder
+auth_uri=http://{{ controller.identity.host }}:{{ controller.identity.port }}
+
+[database]
+idle_timeout=3600
+max_pool_size=30
+max_retries=-1
+max_overflow=40
+connection = {{ controller.database.engine }}://{{ controller.database.user }}:{{ controller.database.password }}@{{ controller.database.host }}/{{ controller.database.name }}
+
+{%- if controller.storage.engine == "storwize" %}
+
+{%- for type in controller.get('types', []) %}
+
+[{{ type.name }}]
+volume_driver = cinder.volume.drivers.storwize_svc.StorwizeSVCDriver
+volume_backend_name={{ type.name }}
+san_ip={{ controller.storage.host }}
+san_ssh_port={{ controller.storage.port }}
+san_login={{ controller.storage.user }}
+san_password={{ controller.storage.password }}
+
+storwize_svc_volpool_name={{ type.pool }}
+#storwize_svc_connection_protocol=iSCSI
+storwize_svc_connection_protocol={{ controller.storage.connection }}
+#storwize_svc_iscsi_chap_enabled=true
+storwize_svc_multihost_enabled={{ controller.storage.multihost }}
+storwize_svc_multipath_enabled={{ controller.storage.multipath }}
+
+{%- endfor %}
+
+{%- endif %}
+
+{%- if controller.storage.engine == "hitachi_vsp" %}
+
+{%- for type in controller.get('types', []) %}
+
+[{{ type.name }}]
+volume_driver = cinder.volume.drivers.hitachi.hbsd_fc.HBSDFCDriver
+volume_backend_name={{ type.name }}
+
+#
+# Options defined in cinder.volume.drivers.hitachi.hbsd_common
+#
+# Serial number of storage system (string value)
+#hitachi_serial_number=<None>
+hitachi_serial_number=86644
+#hitachi_serial_number=355316
+
+# Name of an array unit (string value)
+#hitachi_unit_name=<None>
+#hitachi_unit_name=fiat
+
+# Pool ID of storage system (integer value)
+#hitachi_pool_id=<None>
+hitachi_pool_id=4
+
+# Thin pool ID of storage system (integer value)
+hitachi_thin_pool_id=14
+
+# Range of logical device of storage system (string value)
+#hitachi_ldev_range=<None>
+#hitachi_ldev_range=00:05:00-00:05:FF
+
+# Default copy method of storage system (string value)
+#hitachi_default_copy_method=FULL
+hitachi_default_copy_method=THIN
+
+# Copy speed of storage system (integer value)
+#hitachi_copy_speed=3
+
+# Interval to check copy (integer value)
+#hitachi_copy_check_interval=3
+
+# Interval to check copy asynchronously (integer value)
+#hitachi_async_copy_check_interval=10
+
+# Control port names for HostGroup or iSCSI Target (string
+# value)
+#hitachi_target_ports=<None>
+hitachi_target_ports=CL1-E,CL2-E,CL3-B,CL4-D
+
+# Range of group number (string value)
+#hitachi_group_range=
+#hitachi_group_range=
+
+# Request for creating HostGroup or iSCSI Target (boolean
+# value)
+hitachi_group_request=True
+#hitachi_group_request=false
+#
+# Options defined in cinder.volume.drivers.hitachi.hbsd_fc
+#
+# Request for FC Zone creating HostGroup (boolean value)
+hitachi_zoning_request=true
+#
+# Options defined in cinder.volume.drivers.hitachi.hbsd_horcm
+#
+# Instance numbers for HORCM (string value)
+#hitachi_horcm_numbers=200,201
+hitachi_horcm_numbers=0,1
+
+# Username of storage system for HORCM (string value)
+#hitachi_horcm_user=<None>
+#hitachi_horcm_user=openstack
+hitachi_horcm_user=root
+
+# Password of storage system for HORCM (string value)
+#hitachi_horcm_password=<None>
+#hitachi_horcm_password=avg2014
+hitachi_horcm_password=X3tT35va
+
+# Add to HORCM configuration (boolean value)
+#hitachi_horcm_add_conf=true
+hitachi_horcm_add_conf=true
+
+#hitachi multipath advice
+use_multipath_for_image_xfer=false
+
+{%- endfor %}
+
+{%- endif %}
+
+{%- if controller.storage.engine == "ceph" %}
+
+{%- for type in controller.get('types', []) %}
+
+[{{ type.name }}]
+
+volume_driver = cinder.volume.drivers.rbd.RBDDriver
+#
+# Options defined in cinder.volume.drivers.rbd
+#
+# The RADOS pool where rbd volumes are stored (string value)
+#rbd_pool=volumes
+rbd_pool={{ type.pool }}
+
+# The RADOS client name for accessing rbd volumes - only set
+# when using cephx authentication (string value)
+#rbd_user=cinder
+rbd_user={{ controller.storage.user }}
+
+# Path to the ceph configuration file (string value)
+#rbd_ceph_conf=
+rbd_ceph_conf=/etc/ceph/ceph.conf
+
+# Flatten volumes created from snapshots to remove dependency
+# from volume to snapshot (boolean value)
+#rbd_flatten_volume_from_snapshot=false
+
+# The libvirt uuid of the secret for the rbd_user volumes
+# (string value)
+#rbd_secret_uuid=da74ccb7-aa59-1721-a172-0006b1aa4e3e
+rbd_secret_uuid={{ controller.storage.secret_uuid }}
+
+# Directory where temporary image files are stored when the
+# volume driver does not write them directly to the volume.
+# (string value)
+#volume_tmp_dir=<None>
+
+# Maximum number of nested volume clones that are taken before
+# a flatten occurs. Set to 0 to disable cloning. (integer
+# value)
+#rbd_max_clone_depth=5
+
+# Volumes will be chunked into objects of this size (in
+# megabytes). (integer value)
+#rbd_store_chunk_size=4
+
+# Timeout value (in seconds) used when connecting to ceph
+# cluster. If value < 0, no timeout is set and default
+# librados value is used. (integer value)
+#rados_connect_timeout=-1
+
+{%- endfor %}
+
+{%- endif %}
+
+
+{%- if controller.storage.engine == "hp3par" %}
+
+{%- for type in controller.get('types', []) %}
+
+[{{ type.name }}]
+
+hp3par_api_url={{ controller.storage.url }}
+
+# 3PAR Super user username
+hp3par_username={{ controller.storage.user }}
+
+# 3PAR Super user password
+hp3par_password={{ controller.storage.password }}
+
+# 3PAR CPG to use for volume creation
+hp3par_cpg={{ controller.storage.cpg }}
+
+# IP address of SAN controller for SSH access to the array
+san_ip={{ controller.storage.host }}
+
+# Username for SAN controller for SSH access to the array
+san_login={{ controller.storage.login }}
+
+# Password for SAN controller for SSH access to the array
+san_password={{ controller.storage.password }}
+
+# FIBRE CHANNEL(uncomment the next line to enable the FC driver)
+volume_driver=cinder.volume.drivers.san.hp.hp_3par_fc.HP3PARFCDriver
+
+# iSCSI (uncomment the next line to enable the iSCSI driver and
+# hp3par_iscsi_ips or iscsi_ip_address)
+#volume_driver=cinder.volume.drivers.san.hp.hp_3par_iscsi.HP3PARISCSIDriver
+
+# iSCSI multiple port configuration
+# hp3par_iscsi_ips=10.10.220.253:3261,10.10.222.234
+#hp3par_iscsi_ips=10.10.103.151
+
+# Still available for single port iSCSI configuration
+#iscsi_ip_address=10.10.103.151
+
+## OPTIONAL SETTINGS
+# Enable HTTP debugging to 3PAR
+# hp3par_debug=True
+hp3par_debug={{ controller.storage.debug }}
+
+# Enable CHAP authentication for iSCSI connections.
+hp3par_iscsi_chap_enabled=false
+
+# The CPG to use for Snapshots for volumes. If empty hp3par_cpg will be used.
+hp3par_snap_cpg={{ controller.storage.snapcpg }}
+
+# Time in hours to retain a snapshot. You can't delete it before this expires.
+hp3par_snapshot_retention=2
+
+# Time in hours when a snapshot expires and is deleted. This must be larger than retention.
+hp3par_snapshot_expiration=4
+
+{%- endfor %}
+
+{%- endif %}
+
+{%- if controller.storage.engine == "fujitsu" %}
+
+{%- for type in controller.get('types', []) %}
+
+[{{ type.name }}]
+volume_backend_name={{ type.name }}
+volume_driver=cinder.volume.drivers.fujitsu.fujitsu_eternus_dx_fc.FJDXFCDriver
+cinder_eternus_config_file=/etc/cinder/cinder_fujitsu_eternus_dx_{{ type.name }}.xml
+
+{%- endfor %}
+
+{%- endif %}
\ No newline at end of file
diff --git a/cinder/files/kilo/cinder.conf.volume.Debian b/cinder/files/kilo/cinder.conf.volume.Debian
new file mode 100644
index 0000000..d851c02
--- /dev/null
+++ b/cinder/files/kilo/cinder.conf.volume.Debian
@@ -0,0 +1,416 @@
+{%- from "cinder/map.jinja" import volume with context %}
+
+[DEFAULT]
+rootwrap_config = /etc/cinder/rootwrap.conf
+api_paste_confg = /etc/cinder/api-paste.ini
+
+iscsi_helper = tgtadm
+volume_name_template = volume-%s
+#volume_group = cinder
+
+verbose = True
+
+auth_strategy = keystone
+
+state_path = /var/lib/cinder
+lock_path = /var/lock/cinder
+
+use_syslog=False
+
+glance_num_retries=0
+debug=False
+
+#glance_api_ssl_compression=False
+#glance_api_insecure=False
+
+osapi_volume_listen={{ volume.osapi.host }}
+
+glance_host={{ volume.glance.host }}
+glance_port={{ volume.glance.port }}
+glance_api_version=2
+
+volume_backend_name=DEFAULT
+default_volume_type={% for type in volume.get('types', []) %}{% if loop.first %}{{ type.name }}{% endif %}{% endfor %}
+
+#RPC response timeout recommended by Hitachi
+rpc_response_timeout=3600
+
+#Rabbit
+rpc_backend=cinder.openstack.common.rpc.impl_kombu
+control_exchange=cinder
+
+
+volume_clear={{ volume.wipe_method }}
+
+{%- if volume.notification %}
+control_exchange = cinder
+
+{%- if volume.notification.driver is defined %}
+notification_driver = {{ volume.notification.driver }}
+{%- else %}
+notification_driver = messagingv2
+{%- endif %}
+
+{%- if volume.notification.topics is defined %}
+notification_topics = {{ volume.notification.topics }}
+{%- endif %}
+
+{%- endif %}
+
+volume_name_template = volume-%s
+
+#volume_group = vg_cinder_volume
+
+volumes_dir = /var/lib/cinder/volumes
+log_dir=/var/log/cinder
+
+# Use syslog for logging. (boolean value)
+#use_syslog=false
+
+use_syslog=false
+verbose=True
+
+enabled_backends={% for type in volume.get('types', []) %}{{ type.name }}{% if not loop.last %},{% endif %}{% endfor %}{% if volume.storage.engine == "openvstorage" %}{% for vpool in pillar.openvstorage.server.get('vpools', []) %}{{ vpool }}{% if not loop.last %},{% endif %}{% endfor %}{% endif %}
+
+[oslo_concurrency]
+
+lock_path=/var/lock/cinder
+
+[oslo_messaging_rabbit]
+
+rabbit_host = {{ volume.message_queue.host }}
+rabbit_port = {{ volume.message_queue.port }}
+rabbit_userid = {{ volume.message_queue.user }}
+rabbit_password = {{ volume.message_queue.password }}
+rabbit_virtual_host = {{ volume.message_queue.virtual_host }}
+rabbit_ha_queues={{ volume.message_queue.ha_queues }}
+kombu_reconnect_delay=5.0
+
+[keystone_authtoken]
+signing_dir=/tmp/keystone-signing-cinder
+admin_password={{ volume.identity.password }}
+auth_port={{ volume.identity.port }}
+auth_host={{ volume.identity.host }}
+admin_tenant_name={{ volume.identity.tenant }}
+auth_protocol=http
+admin_user=cinder
+signing_dirname=/tmp/keystone-signing-cinder
+auth_uri=http://{{ volume.identity.host }}:{{ volume.identity.port }}
+
+[database]
+idle_timeout=3600
+max_pool_size=30
+max_retries=-1
+max_overflow=40
+connection = {{ volume.database.engine }}://{{ volume.database.user }}:{{ volume.database.password }}@{{ volume.database.host }}/{{ volume.database.name }}
+
+{%- if volume.storage.engine == "storwize" %}
+
+{%- for type in volume.get('types', []) %}
+
+[{{ type.name }}]
+volume_driver = cinder.volume.drivers.storwize_svc.StorwizeSVCDriver
+volume_backend_name={{ type.name }}
+san_ip={{ volume.storage.host }}
+san_ssh_port={{ volume.storage.port }}
+san_login={{ volume.storage.user }}
+san_password={{ volume.storage.password }}
+
+storwize_svc_volpool_name={{ type.pool }}
+#storwize_svc_connection_protocol=iSCSI
+storwize_svc_connection_protocol={{ volume.storage.connection }}
+#storwize_svc_iscsi_chap_enabled=true
+storwize_svc_multihost_enabled={{ volume.storage.multihost }}
+storwize_svc_multipath_enabled={{ volume.storage.multipath }}
+
+{%- endfor %}
+
+{%- endif %}
+
+{%- if volume.storage.engine == "hitachi_vsp" %}
+
+{%- for type in volume.get('types', []) %}
+
+[{{ type.name }}]
+
+{%- if volume.storage.version == "1.0" %}
+
+volume_driver = cinder.volume.drivers.hitachi.hbsd_fc.HBSDFCDriver
+volume_backend_name={{ type.name }}
+
+#
+# Options defined in cinder.volume.drivers.hitachi.hbsd_common
+#
+# Serial number of storage system (string value)
+#hitachi_serial_number=<None>
+hitachi_serial_number=86644
+#hitachi_serial_number=355316
+
+# Name of an array unit (string value)
+#hitachi_unit_name=<None>
+#hitachi_unit_name=fiat
+
+# Pool ID of storage system (integer value)
+#hitachi_pool_id=<None>
+hitachi_pool_id=4
+
+# Thin pool ID of storage system (integer value)
+hitachi_thin_pool_id=13
+
+# Default copy method of storage system (string value)
+#hitachi_default_copy_method=FULL
+hitachi_default_copy_method=THIN
+
+# Control port names for HostGroup or iSCSI Target (string
+# value)
+#hitachi_target_ports=<None>
+hitachi_target_ports=CL1-E,CL2-E,CL3-B,CL4-D
+
+# Request for creating HostGroup or iSCSI Target (boolean
+# value)
+hitachi_group_request=True
+#hitachi_group_request=false
+#
+# Options defined in cinder.volume.drivers.hitachi.hbsd_fc
+#
+# Request for FC Zone creating HostGroup (boolean value)
+hitachi_zoning_request=true
+#
+# Options defined in cinder.volume.drivers.hitachi.hbsd_horcm
+#
+# Instance numbers for HORCM (string value)
+#hitachi_horcm_numbers=200,201
+hitachi_horcm_numbers=0,1
+
+# Username of storage system for HORCM (string value)
+#hitachi_horcm_user=<None>
+#hitachi_horcm_user=openstack
+hitachi_horcm_user=root
+
+# Password of storage system for HORCM (string value)
+#hitachi_horcm_password=<None>
+#hitachi_horcm_password=avg2014
+hitachi_horcm_password=X3tT35va
+
+# Add to HORCM configuration (boolean value)
+#hitachi_horcm_add_conf=true
+hitachi_horcm_add_conf=true
+
+#hitachi multipath advice
+use_multipath_for_image_xfer=false
+
+{%- endif %}
+
+{%- if volume.storage.version == "1.3" %}
+
+volume_driver = cinder.volume.drivers.hitachi.hbsd.hbsd_fc.HBSDFCDriver
+volume_backend_name=hitachi_vsp
+
+#
+# Options defined in cinder.volume.drivers.hitachi.hbsd_common
+#
+# Serial number of storage system (string value)
+#hitachi_serial_number=<None>
+#hitachi_serial_number=86644
+hitachi_storage_id=86644
+
+# Pool ID of storage system (integer value)
+#hitachi_pool_id=<None>
+hitachi_pool=4
+
+# Thin pool ID of storage system (integer value)
+hitachi_thin_pool=13
+
+# Default copy method of storage system (string value)
+#hitachi_default_copy_method=FULL
+hitachi_default_copy_method=THIN
+
+# Control port names for HostGroup or iSCSI Target (string
+# value)
+#hitachi_target_ports=<None>
+hitachi_target_ports=CL3-B
+
+hitachi_compute_target_ports=CL1-E,CL2-E,CL3-B,CL4-D
+
+# Range of group number (string value)
+#hitachi_group_range=
+#hitachi_group_range=
+
+# Request for creating HostGroup or iSCSI Target (boolean
+# value)
+#JPA
+hitachi_group_request=True
+#hitachi_group_request=false
+
+# Instance numbers for HORCM (string value)
+#hitachi_horcm_numbers=200,201
+hitachi_horcm_numbers=0,1
+
+# Username of storage system for HORCM (string value)
+#hitachi_horcm_user=<None>
+#hitachi_horcm_user=openstack
+hitachi_horcm_user=root
+
+# Password of storage system for HORCM (string value)
+#hitachi_horcm_password=<None>
+#hitachi_horcm_password=avg2014
+hitachi_horcm_password=X3tT35va
+
+# Add to HORCM configuration (boolean value)
+#hitachi_horcm_add_conf=true
+hitachi_horcm_add_conf=true
+
+#hitachi multipath advice
+use_multipath_for_image_xfer=false
+
+hitachi_storage_cli=HORCM
+
+
+{%- endif %}
+
+{%- endfor %}
+
+{%- endif %}
+
+{%- if volume.storage.engine == "ceph" %}
+
+{%- for type in volume.get('types', []) %}
+
+[{{ type.name }}]
+
+volume_driver = cinder.volume.drivers.rbd.RBDDriver
+#
+# Options defined in cinder.volume.drivers.rbd
+#
+# The RADOS pool where rbd volumes are stored (string value)
+#rbd_pool=volumes
+rbd_pool={{ type.pool }}
+
+# The RADOS client name for accessing rbd volumes - only set
+# when using cephx authentication (string value)
+#rbd_user=cinder
+rbd_user={{ volume.storage.user }}
+
+# Path to the ceph configuration file (string value)
+#rbd_ceph_conf=
+rbd_ceph_conf=/etc/ceph/ceph.conf
+
+# Flatten volumes created from snapshots to remove dependency
+# from volume to snapshot (boolean value)
+#rbd_flatten_volume_from_snapshot=false
+
+# The libvirt uuid of the secret for the rbd_user volumes
+# (string value)
+#rbd_secret_uuid=da74ccb7-aa59-1721-a172-0006b1aa4e3e
+rbd_secret_uuid={{ volume.storage.secret_uuid }}
+
+# Directory where temporary image files are stored when the
+# volume driver does not write them directly to the volume.
+# (string value)
+#volume_tmp_dir=<None>
+
+# Maximum number of nested volume clones that are taken before
+# a flatten occurs. Set to 0 to disable cloning. (integer
+# value)
+#rbd_max_clone_depth=5
+
+# Volumes will be chunked into objects of this size (in
+# megabytes). (integer value)
+#rbd_store_chunk_size=4
+
+# Timeout value (in seconds) used when connecting to ceph
+# cluster. If value < 0, no timeout is set and default
+# librados value is used. (integer value)
+#rados_connect_timeout=-1
+
+{%- endfor %}
+
+{%- endif %}
+
+
+{%- if volume.storage.engine == "hp3par" %}
+
+{%- for type in volume.get('types', []) %}
+
+[{{ type.name }}]
+
+hp3par_api_url={{ volume.storage.url }}
+
+# 3PAR Super user username
+hp3par_username={{ volume.storage.user }}
+
+# 3PAR Super user password
+hp3par_password={{ volume.storage.password }}
+
+# 3PAR CPG to use for volume creation
+hp3par_cpg={{ volume.storage.cpg }}
+
+# IP address of SAN volume for SSH access to the array
+san_ip={{ volume.storage.host }}
+
+# Username for SAN volume for SSH access to the array
+san_login={{ volume.storage.login }}
+
+# Password for SAN volume for SSH access to the array
+san_password={{ volume.storage.password }}
+
+# FIBRE CHANNEL(uncomment the next line to enable the FC driver)
+volume_driver=cinder.volume.drivers.san.hp.hp_3par_fc.HP3PARFCDriver
+
+# iSCSI (uncomment the next line to enable the iSCSI driver and
+# hp3par_iscsi_ips or iscsi_ip_address)
+#volume_driver=cinder.volume.drivers.san.hp.hp_3par_iscsi.HP3PARISCSIDriver
+
+# iSCSI multiple port configuration
+# hp3par_iscsi_ips=10.10.220.253:3261,10.10.222.234
+#hp3par_iscsi_ips=10.10.103.151
+
+# Still available for single port iSCSI configuration
+#iscsi_ip_address=10.10.103.151
+
+## OPTIONAL SETTINGS
+# Enable HTTP debugging to 3PAR
+# hp3par_debug=True
+hp3par_debug={{ volume.storage.debug }}
+
+# Enable CHAP authentication for iSCSI connections.
+hp3par_iscsi_chap_enabled=false
+
+# The CPG to use for Snapshots for volumes. If empty hp3par_cpg will be used.
+hp3par_snap_cpg={{ volume.storage.snapcpg }}
+
+# Time in hours to retain a snapshot. You can't delete it before this expires.
+hp3par_snapshot_retention=2
+
+# Time in hours when a snapshot expires and is deleted. This must be larger than retention.
+hp3par_snapshot_expiration=4
+
+{%- endfor %}
+
+{%- endif %}
+
+{%- if volume.storage.engine == "openvstorage" %}
+
+{%- for vpool in pillar.openvstorage.server.get('vpools', []) %}
+
+[{{ vpool }}]
+volume_driver = cinder.volume.drivers.openvstorage.OVSVolumeDriver
+volume_backend_name={{ vpool }}
+vpool_name={{ vpool }}
+
+{%- endfor %}
+
+{%- endif %}
+
+{%- if volume.storage.engine == "fujitsu" %}
+
+{%- for type in volume.get('types', []) %}
+
+[{{ type.name }}]
+volume_backend_name={{ type.name }}
+volume_driver=cinder.volume.drivers.fujitsu.fujitsu_eternus_dx_fc.FJDXFCDriver
+cinder_eternus_config_file=/etc/cinder/cinder_fujitsu_eternus_dx_{{ type.name }}.xml
+
+{%- endfor %}
+
+{%- endif %}
\ No newline at end of file
diff --git a/cinder/files/kilo/cinder_fujitsu_eternus_dx.xml b/cinder/files/kilo/cinder_fujitsu_eternus_dx.xml
new file mode 100644
index 0000000..2f21171
--- /dev/null
+++ b/cinder/files/kilo/cinder_fujitsu_eternus_dx.xml
@@ -0,0 +1,10 @@
+{%- from "cinder/map.jinja" import volume with context -%}
+<?xml version='1.0' encoding='UTF-8'?>
+<FUJITSU>
+<EternusIP>{{ volume.storage.host }}</EternusIP>
+<EternusPort>{{ volume.storage.port }}</EternusPort>
+<EternusUser>{{ volume.storage.user }}</EternusUser>
+<EternusPassword>{{ volume.storage.password }}</EternusPassword>
+<EternusISCSIIP></EternusISCSIIP>
+<EternusPool>{{ volume_type_name }}</EternusPool>
+</FUJITSU>
diff --git a/cinder/files/sensu.conf b/cinder/files/sensu.conf
new file mode 100644
index 0000000..8035dd9
--- /dev/null
+++ b/cinder/files/sensu.conf
@@ -0,0 +1,30 @@
+local_cinder_api_proc:
+ command: "PATH=$PATH:/usr/lib64/nagios/plugins:/usr/lib/nagios/plugins check_procs -C cinder-api -u cinder -c 1:30"
+ interval: 60
+ occurrences: 1
+ subscribers:
+ - local-cinder-controller
+local_cinder_scheduler_proc:
+ command: "PATH=$PATH:/usr/lib64/nagios/plugins:/usr/lib/nagios/plugins check_procs -C cinder-schedule -u cinder -c 1:5"
+ interval: 60
+ occurrences: 1
+ subscribers:
+ - local-cinder-controller
+local_cinder_volume_proc:
+ command: "PATH=$PATH:/usr/lib64/nagios/plugins:/usr/lib/nagios/plugins check_procs -C cinder-volume -u cinder -c 1:5"
+ interval: 60
+ occurrences: 1
+ subscribers:
+ - local-cinder-volume
+remote_openstack_cinder_api:
+ command: 'PATH=$PATH:/usr/local/bin oschecks-check_cinder_api --os_auth_url="http://:::openstack.host::::5000/v2.0" --os_username=:::openstack.user::: --os_password=":::openstack.password:::" --os_tenant_name=:::openstack.tenant'
+ interval: 60
+ occurrences: 1
+ subscribers:
+ - remote-network
+remote_openstack_cinder_volume:
+ command: 'PATH=$PATH:/usr/local/bin oschecks-check_cinder_volume --auth_url="http://:::openstack.host::::5000/v2.0" --username :::openstack.user::: --password :::openstack.password::: --tenant :::openstack.tenant:::'
+ interval: 300
+ occurrences: 1
+ subscribers:
+ - remote-network
\ No newline at end of file
diff --git a/cinder/init.sls b/cinder/init.sls
new file mode 100644
index 0000000..967b134
--- /dev/null
+++ b/cinder/init.sls
@@ -0,0 +1,8 @@
+
+include:
+{% if pillar.cinder.controller is defined %}
+- cinder.controller
+{% endif %}
+{% if pillar.cinder.volume is defined %}
+- cinder.volume
+{% endif %}
\ No newline at end of file
diff --git a/cinder/map.jinja b/cinder/map.jinja
new file mode 100644
index 0000000..d2282cb
--- /dev/null
+++ b/cinder/map.jinja
@@ -0,0 +1,30 @@
+
+{% set controller = salt['grains.filter_by']({
+ 'Debian': {
+ 'pkgs': ['cinder-api', 'cinder-scheduler', 'lvm2', 'python-cinder'],
+ 'services': ['cinder-api', 'cinder-scheduler'],
+ 'wipe_method': 'none',
+ 'notification': False,
+ },
+ 'RedHat': {
+ 'pkgs': ['openstack-cinder', 'python-cinder', 'lvm2'],
+ 'services': ['openstack-cinder-api', 'openstack-cinder-scheduler'],
+ 'wipe_method': 'none',
+ 'notification': False,
+ },
+}, merge=salt['pillar.get']('cinder:controller')) %}
+
+{% set volume = salt['grains.filter_by']({
+ 'Debian': {
+ 'pkgs': ['cinder-volume', 'lvm2', 'sysfsutils', 'sg3-utils', 'python-cinder','python-mysqldb','p7zip'],
+ 'services': ['cinder-volume'],
+ 'wipe_method': 'none',
+ 'notification': False,
+ },
+ 'RedHat': {
+ 'pkgs': ['openstack-cinder', 'python-cinder', 'lvm2', 'sysfsutils', 'sg3_utils', 'device-mapper-multipath', 'device-mapper-multipath-libs'],
+ 'services': ['openstack-cinder-volume'],
+ 'wipe_method': 'none',
+ 'notification': False,
+ },
+}, merge=salt['pillar.get']('cinder:volume')) %}
\ No newline at end of file
diff --git a/cinder/user.sls b/cinder/user.sls
new file mode 100644
index 0000000..aabf39d
--- /dev/null
+++ b/cinder/user.sls
@@ -0,0 +1,25 @@
+{%- if not salt['user.info']('cinder') %}
+cinder_user:
+ user.present:
+ - name: cinder
+ - home: /var/lib/cinder
+ - uid: 304
+ - gid: 304
+ - shell: /bin/false
+ - system: True
+ - require_in:
+ {%- if pillar.cinder.controller is defined and pillar.cinder.controller.enabled %}
+ - pkg: cinder_controller_packages
+ {%- endif %}
+ {%- if pillar.cinder.volume is defined and pillar.cinder.volume.enabled %}
+ - pkg: cinder_volume_packages
+ {%- endif %}
+
+cinder_group:
+ group.present:
+ - name: cinder
+ - gid: 304
+ - system: True
+ - require_in:
+ - user: cinder_user
+{%- endif %}
diff --git a/cinder/volume.sls b/cinder/volume.sls
new file mode 100644
index 0000000..163eecb
--- /dev/null
+++ b/cinder/volume.sls
@@ -0,0 +1,125 @@
+{%- from "cinder/map.jinja" import volume with context %}
+{%- if volume.enabled %}
+
+include:
+- cinder.user
+
+cinder_volume_packages:
+ pkg.installed:
+ - names: {{ volume.pkgs }}
+
+/var/lock/cinder:
+ file.directory:
+ - mode: 755
+ - user: cinder
+ - group: cinder
+ - require:
+ - pkg: cinder_volume_packages
+ - require_in:
+ - service: cinder_volume_services
+
+{%- if pillar.cinder.controller is not defined or not pillar.cinder.controller.enabled %}
+
+/etc/cinder/cinder.conf:
+ file.managed:
+ - source: salt://cinder/files/{{ volume.version }}/cinder.conf.volume.{{ grains.os_family }}
+ - template: jinja
+ - require:
+ - pkg: cinder_volume_packages
+
+/etc/cinder/api-paste.ini:
+ file.managed:
+ - source: salt://cinder/files/{{ volume.version }}/api-paste.ini.volume.{{ grains.os_family }}
+ - template: jinja
+ - require:
+ - pkg: cinder_volume_packages
+
+{%- endif %}
+
+cinder_volume_services:
+ service.running:
+ - names: {{ volume.services }}
+ - enable: true
+ - watch:
+ - file: /etc/cinder/cinder.conf
+ - file: /etc/cinder/api-paste.ini
+
+{%- if volume.storage.engine == 'iscsi' %}
+
+cinder_iscsi_packages:
+ pkg.installed:
+ - names:
+ - iscsitarget
+ - open-iscsi
+ - iscsitarget-dkms
+ - require:
+ - pkg: cinder_volume_packages
+
+/etc/default/iscsitarget:
+ file.managed:
+ - source: salt://cinder/files/iscsitarget
+ - template: jinja
+ - require:
+ - pkg: cinder_iscsi_packages
+
+cinder_scsi_service:
+ service.running:
+ - names:
+ - iscsitarget
+ - open-iscsi
+ - enable: true
+ - watch:
+ - file: /etc/default/iscsitarget
+
+{%- endif %}
+
+{%- if volume.storage.engine == 'hitachi_vsp' %}
+
+{%- if grains.os_family == 'Debian' and volume.version == 'juno' %}
+
+hitachi_pkgs:
+ pkg.latest:
+ - names:
+ - horcm
+ - hbsd
+
+cinder_hitachi_vps_dir:
+ file.directory:
+ - name: /var/lock/hbsd
+ - user: cinder
+ - group: cinder
+
+{%- endif %}
+
+{%- endif %}
+
+{%- if volume.storage.engine == 'hp3par' %}
+
+hp3parclient:
+ pkg.latest:
+ - name: python-hp3parclient
+
+{%- endif %}
+
+{%- if volume.storage.engine == 'fujitsu' %}
+
+cinder_driver_fujitsu:
+ pkg.latest:
+ - name: cinder-driver-fujitsu
+
+{%- for type in volume.get('types', []) %}
+
+/etc/cinder/cinder_fujitsu_eternus_dx_{{ type.name }}.xml:
+ file.managed:
+ - source: salt://cinder/files/{{ volume.version }}/cinder_fujitsu_eternus_dx.xml
+ - template: jinja
+ - defaults:
+ volume_type_name: "{{ type.pool }}"
+ - require:
+ - pkg: cinder-driver-fujitsu
+
+{%- endfor %}
+
+{%- endif %}
+
+{%- endif %}
diff --git a/debian/changelog b/debian/changelog
new file mode 100644
index 0000000..632f5c8
--- /dev/null
+++ b/debian/changelog
@@ -0,0 +1,11 @@
+salt-formula-cinder (0.2) trusty; urgency=medium
+
+ * First public release
+
+ -- Filip Pytloun <filip.pytloun@tcpcloud.eu> Tue, 06 Oct 2015 16:38:38 +0200
+
+salt-formula-cinder (0.1) trusty; urgency=medium
+
+ * Initial release
+
+ -- Jakub Pavlik <jakub.pavlik@tcpcloud.eu> Thu, 13 Aug 2015 23:23:41 +0200
diff --git a/debian/compat b/debian/compat
new file mode 100644
index 0000000..ec63514
--- /dev/null
+++ b/debian/compat
@@ -0,0 +1 @@
+9
diff --git a/debian/control b/debian/control
new file mode 100644
index 0000000..18456d1
--- /dev/null
+++ b/debian/control
@@ -0,0 +1,15 @@
+Source: salt-formula-cinder
+Maintainer: Jakub Pavlik <jakub.pavlik@tcpcloud.eu>
+Section: admin
+Priority: optional
+Build-Depends: debhelper (>= 9)
+Standards-Version: 3.9.6
+Homepage: http://www.tcpcloud.eu
+Vcs-Browser: https://github.com/tcpcloud/salt-formula-cinder
+Vcs-Git: https://github.com/tcpcloud/salt-formula-cinder.git
+
+Package: salt-formula-cinder
+Architecture: all
+Depends: ${misc:Depends}, salt-master, reclass
+Description: Cinder Salt formula
+ Install and configure Cinder server and client.
diff --git a/debian/copyright b/debian/copyright
new file mode 100644
index 0000000..91a44f1
--- /dev/null
+++ b/debian/copyright
@@ -0,0 +1,15 @@
+Format: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
+Upstream-Name: salt-formula-cinder
+Upstream-Contact: Jakub Pavlik <jakub.pavlik@tcpcloud.eu>
+Source: https://github.com/tcpcloud/salt-formula-cinder
+
+Files: *
+Copyright: 2014-2015 tcp cloud
+License: Apache-2.0
+ Copyright (C) 2014-2015 tcp cloud
+ .
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ .
+ On a Debian system you can find a copy of this license in
+ /usr/share/common-licenses/Apache-2.0.
diff --git a/debian/docs b/debian/docs
new file mode 100644
index 0000000..d585829
--- /dev/null
+++ b/debian/docs
@@ -0,0 +1,3 @@
+README.rst
+CHANGELOG.rst
+VERSION
diff --git a/debian/install b/debian/install
new file mode 100644
index 0000000..fa996e5
--- /dev/null
+++ b/debian/install
@@ -0,0 +1,2 @@
+cinder/* /usr/share/salt-formulas/env/cinder/
+metadata/service/* /usr/share/salt-formulas/reclass/service/cinder/
diff --git a/debian/rules b/debian/rules
new file mode 100755
index 0000000..abde6ef
--- /dev/null
+++ b/debian/rules
@@ -0,0 +1,5 @@
+#!/usr/bin/make -f
+
+%:
+ dh $@
+
diff --git a/debian/source/format b/debian/source/format
new file mode 100644
index 0000000..89ae9db
--- /dev/null
+++ b/debian/source/format
@@ -0,0 +1 @@
+3.0 (native)
diff --git a/metadata/service/control/cluster.yml b/metadata/service/control/cluster.yml
new file mode 100644
index 0000000..b274228
--- /dev/null
+++ b/metadata/service/control/cluster.yml
@@ -0,0 +1,38 @@
+applications:
+- cinder
+parameters:
+ cinder:
+ controller:
+ enabled: true
+ version: ${_param:cinder_version}
+ osapi:
+ host: ${_param:cluster_local_address}
+ database:
+ engine: mysql
+ host: ${_param:cluster_vip_address}
+ port: 3306
+ name: cinder
+ user: cinder
+ password: ${_param:mysql_cinder_password}
+ identity:
+ engine: keystone
+ host: ${_param:cluster_vip_address}
+ port: 35357
+ tenant: service
+ user: cinder
+ password: ${_param:keystone_cinder_password}
+ glance:
+ host: ${_param:cluster_vip_address}
+ port: 9292
+ message_queue:
+ engine: rabbitmq
+ host: ${_param:cluster_vip_address}
+ port: 5672
+ user: openstack
+ password: ${_param:rabbitmq_openstack_password}
+ virtual_host: '/openstack'
+ ha_queues: true
+ storage:
+ engine: iscsi
+ host: localhost
+ types: []
\ No newline at end of file
diff --git a/metadata/service/control/cluster_control.yml b/metadata/service/control/cluster_control.yml
new file mode 100644
index 0000000..b274228
--- /dev/null
+++ b/metadata/service/control/cluster_control.yml
@@ -0,0 +1,38 @@
+applications:
+- cinder
+parameters:
+ cinder:
+ controller:
+ enabled: true
+ version: ${_param:cinder_version}
+ osapi:
+ host: ${_param:cluster_local_address}
+ database:
+ engine: mysql
+ host: ${_param:cluster_vip_address}
+ port: 3306
+ name: cinder
+ user: cinder
+ password: ${_param:mysql_cinder_password}
+ identity:
+ engine: keystone
+ host: ${_param:cluster_vip_address}
+ port: 35357
+ tenant: service
+ user: cinder
+ password: ${_param:keystone_cinder_password}
+ glance:
+ host: ${_param:cluster_vip_address}
+ port: 9292
+ message_queue:
+ engine: rabbitmq
+ host: ${_param:cluster_vip_address}
+ port: 5672
+ user: openstack
+ password: ${_param:rabbitmq_openstack_password}
+ virtual_host: '/openstack'
+ ha_queues: true
+ storage:
+ engine: iscsi
+ host: localhost
+ types: []
\ No newline at end of file
diff --git a/metadata/service/control/single.yml b/metadata/service/control/single.yml
new file mode 100644
index 0000000..68dc240
--- /dev/null
+++ b/metadata/service/control/single.yml
@@ -0,0 +1,38 @@
+applications:
+- cinder
+parameters:
+ cinder:
+ controller:
+ enabled: true
+ version: ${_param:cinder_version}
+ osapi:
+ host: ${linux:network:host:local:address}
+ database:
+ engine: mysql
+ host: localhost
+ port: 3306
+ name: cinder
+ user: cinder
+ password: ${_secret:mysql_cinder_password}
+ identity:
+ engine: keystone
+ host: ${linux:network:host:local:address}
+ port: 35357
+ tenant: service
+ user: cinder
+ password: ${_secret:keystone_cinder_password}
+ glance:
+ host: ${linux:network:host:local:address}
+ port: 9292
+ message_queue:
+ engine: rabbitmq
+ host: ${linux:network:host:local:address}
+ port: 5672
+ user: openstack
+ password: ${_secret:rabbitmq_openstack_password}
+ virtual_host: '/openstack'
+ ha_queues: false
+ storage:
+ engine: iscsi
+ host: localhost
+ types: []
diff --git a/metadata/service/volume/single.yml b/metadata/service/volume/single.yml
new file mode 100644
index 0000000..1676dcb
--- /dev/null
+++ b/metadata/service/volume/single.yml
@@ -0,0 +1,38 @@
+applications:
+- cinder
+parameters:
+ cinder:
+ volume:
+ enabled: true
+ version: ${_param:cinder_version}
+ osapi:
+ host: ${_param:cluster_local_address}
+ database:
+ engine: mysql
+ host: ${_param:cluster_vip_address}
+ port: 3306
+ name: cinder
+ user: cinder
+ password: ${_param:mysql_cinder_password}
+ identity:
+ engine: keystone
+ host: ${_param:cluster_vip_address}
+ port: 35357
+ tenant: service
+ user: cinder
+ password: ${_param:keystone_cinder_password}
+ glance:
+ host: ${_param:cluster_vip_address}
+ port: 9292
+ message_queue:
+ engine: rabbitmq
+ host: ${_param:cluster_vip_address}
+ port: 5672
+ user: openstack
+ password: ${_param:rabbitmq_openstack_password}
+ virtual_host: '/openstack'
+ ha_queues: true
+ storage:
+ engine: iscsi
+ host: localhost
+ types: []
\ No newline at end of file