Merge "Enable logging.conf & fluentd for cinder"
diff --git a/.kitchen.travis.yml b/.kitchen.travis.yml
deleted file mode 100644
index b54d661..0000000
--- a/.kitchen.travis.yml
+++ /dev/null
@@ -1,6 +0,0 @@
-suites:
-
- - name: <%= ENV['SUITE'] %>
- provisioner:
- pillars-from-files:
- cinder.sls: tests/pillar/<%= ENV['SUITE'] %>.sls
diff --git a/.travis.yml b/.travis.yml
index 62f155b..4f20546 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -17,35 +17,36 @@
- bundle install
env:
- - PLATFORM=trevorj/salty-whales:trusty OS_VERSION=mitaka SUITE=ceph_single
- - PLATFORM=trevorj/salty-whales:xenial OS_VERSION=ocata SUITE=ceph_single
- - PLATFORM=trevorj/salty-whales:trusty OS_VERSION=mitaka SUITE=control_cluster
- - PLATFORM=trevorj/salty-whales:xenial OS_VERSION=ocata SUITE=control_cluster
- - PLATFORM=trevorj/salty-whales:trusty OS_VERSION=mitaka SUITE=control_single
- - PLATFORM=trevorj/salty-whales:xenial OS_VERSION=ocata SUITE=control_single
- - PLATFORM=trevorj/salty-whales:trusty OS_VERSION=mitaka SUITE=gpfs_single
- - PLATFORM=trevorj/salty-whales:xenial OS_VERSION=ocata SUITE=gpfs_single
- - PLATFORM=trevorj/salty-whales:trusty OS_VERSION=mitaka SUITE=hp3par_single
- - PLATFORM=trevorj/salty-whales:xenial OS_VERSION=ocata SUITE=hp3par_single
- - PLATFORM=trevorj/salty-whales:trusty OS_VERSION=mitaka SUITE=lefthand_single
- - PLATFORM=trevorj/salty-whales:xenial OS_VERSION=ocata SUITE=lefthand_single
- - PLATFORM=trevorj/salty-whales:trusty OS_VERSION=mitaka SUITE=solidfire_single
- - PLATFORM=trevorj/salty-whales:xenial OS_VERSION=ocata SUITE=solidfire_single
- - PLATFORM=trevorj/salty-whales:trusty OS_VERSION=mitaka SUITE=storwize_single
- - PLATFORM=trevorj/salty-whales:xenial OS_VERSION=ocata SUITE=storwize_single
- - PLATFORM=trevorj/salty-whales:trusty OS_VERSION=mitaka SUITE=volume_single
- - PLATFORM=trevorj/salty-whales:xenial OS_VERSION=ocata SUITE=volume_single
- - PLATFORM=trevorj/salty-whales:trusty OS_VERSION=mitaka SUITE=vsp_single
- - PLATFORM=trevorj/salty-whales:xenial OS_VERSION=ocata SUITE=vsp_single
- - PLATFORM=trevorj/salty-whales:trusty OS_VERSION=mitaka SUITE=volume_single_barbican
- - PLATFORM=trevorj/salty-whales:xenial OS_VERSION=ocata SUITE=volume_single_barbican
+ - PLATFORM=trevorj/salty-whales:trusty OS_VERSION=mitaka SUITE=ceph-single
+ - PLATFORM=trevorj/salty-whales:xenial OS_VERSION=ocata SUITE=ceph-single
+ - PLATFORM=trevorj/salty-whales:trusty OS_VERSION=mitaka SUITE=control-cluster
+ - PLATFORM=trevorj/salty-whales:xenial OS_VERSION=ocata SUITE=control-cluster
+ - PLATFORM=trevorj/salty-whales:trusty OS_VERSION=mitaka SUITE=control-single
+ - PLATFORM=trevorj/salty-whales:xenial OS_VERSION=ocata SUITE=control-single
+ - PLATFORM=trevorj/salty-whales:trusty OS_VERSION=mitaka SUITE=gpfs-single
+ - PLATFORM=trevorj/salty-whales:xenial OS_VERSION=ocata SUITE=gpfs-single
+ - PLATFORM=trevorj/salty-whales:trusty OS_VERSION=mitaka SUITE=hp3par-single
+ - PLATFORM=trevorj/salty-whales:xenial OS_VERSION=ocata SUITE=hp3par-single
+ - PLATFORM=trevorj/salty-whales:trusty OS_VERSION=mitaka SUITE=lefthand-single
+ - PLATFORM=trevorj/salty-whales:xenial OS_VERSION=ocata SUITE=lefthand-single
+ - PLATFORM=trevorj/salty-whales:trusty OS_VERSION=mitaka SUITE=solidfire-single
+ - PLATFORM=trevorj/salty-whales:xenial OS_VERSION=ocata SUITE=solidfire-single
+ - PLATFORM=trevorj/salty-whales:trusty OS_VERSION=mitaka SUITE=storwize-single
+ - PLATFORM=trevorj/salty-whales:xenial OS_VERSION=ocata SUITE=storwize-single
+ - PLATFORM=trevorj/salty-whales:trusty OS_VERSION=mitaka SUITE=volume-single
+ - PLATFORM=trevorj/salty-whales:xenial OS_VERSION=ocata SUITE=volume-single
+ - PLATFORM=trevorj/salty-whales:trusty OS_VERSION=mitaka SUITE=vsp-single
+ - PLATFORM=trevorj/salty-whales:xenial OS_VERSION=ocata SUITE=vsp-single
+ - PLATFORM=trevorj/salty-whales:trusty OS_VERSION=mitaka SUITE=volume-single-barbican
+ - PLATFORM=trevorj/salty-whales:xenial OS_VERSION=ocata SUITE=volume-single-barbican
before_script:
- set -o pipefail
- make test | tail
script:
- - KITCHEN_LOCAL_YAML=.kitchen.travis.yml bundle exec kitchen test -t tests/integration
+ - test ! -e .kitchen.yml || bundle exec kitchen converge ${SUITE} || true
+ - test ! -e .kitchen.yml || bundle exec kitchen verify ${SUITE} -t tests/integration
notifications:
webhooks:
diff --git a/cinder/files/pike b/cinder/files/pike
deleted file mode 120000
index d5e8ce2..0000000
--- a/cinder/files/pike
+++ /dev/null
@@ -1 +0,0 @@
-ocata
\ No newline at end of file
diff --git a/cinder/files/pike/api-paste.ini.controller.Debian b/cinder/files/pike/api-paste.ini.controller.Debian
new file mode 100644
index 0000000..aada960
--- /dev/null
+++ b/cinder/files/pike/api-paste.ini.controller.Debian
@@ -0,0 +1,83 @@
+#############
+# OpenStack #
+#############
+{%- from "cinder/map.jinja" import controller with context %}
+
+[composite:osapi_volume]
+use = call:cinder.api:root_app_factory
+/: apiversions
+/v1: openstack_volume_api_v1
+/v2: openstack_volume_api_v2
+/v3: openstack_volume_api_v3
+
+[composite:openstack_volume_api_v1]
+use = call:cinder.api.middleware.auth:pipeline_factory
+noauth = cors http_proxy_to_wsgi request_id faultwrap sizelimit osprofiler noauth apiv1
+keystone = cors http_proxy_to_wsgi request_id faultwrap sizelimit osprofiler authtoken keystonecontext {% if controller.audit.enabled %}audit {% endif %}apiv1
+keystone_nolimit = cors http_proxy_to_wsgi request_id faultwrap sizelimit osprofiler authtoken keystonecontext {% if controller.audit.enabled %}audit {% endif %}apiv1
+
+[composite:openstack_volume_api_v2]
+use = call:cinder.api.middleware.auth:pipeline_factory
+noauth = cors http_proxy_to_wsgi request_id faultwrap sizelimit osprofiler noauth apiv2
+keystone = cors http_proxy_to_wsgi request_id faultwrap sizelimit osprofiler authtoken keystonecontext {% if controller.audit.enabled %}audit {% endif %}apiv2
+keystone_nolimit = cors http_proxy_to_wsgi request_id faultwrap sizelimit osprofiler authtoken keystonecontext {% if controller.audit.enabled %}audit {% endif %}apiv2
+
+[composite:openstack_volume_api_v3]
+use = call:cinder.api.middleware.auth:pipeline_factory
+noauth = cors http_proxy_to_wsgi request_id faultwrap sizelimit osprofiler noauth apiv3
+keystone = cors http_proxy_to_wsgi request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv3
+keystone_nolimit = cors http_proxy_to_wsgi request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv3
+
+[filter:request_id]
+paste.filter_factory = oslo_middleware.request_id:RequestId.factory
+
+[filter:http_proxy_to_wsgi]
+paste.filter_factory = oslo_middleware.http_proxy_to_wsgi:HTTPProxyToWSGI.factory
+
+[filter:cors]
+paste.filter_factory = oslo_middleware.cors:filter_factory
+oslo_config_project = cinder
+
+[filter:faultwrap]
+paste.filter_factory = cinder.api.middleware.fault:FaultWrapper.factory
+
+[filter:osprofiler]
+paste.filter_factory = osprofiler.web:WsgiMiddleware.factory
+
+[filter:noauth]
+paste.filter_factory = cinder.api.middleware.auth:NoAuthMiddleware.factory
+
+[filter:sizelimit]
+paste.filter_factory = oslo_middleware.sizelimit:RequestBodySizeLimiter.factory
+
+[app:apiv1]
+paste.app_factory = cinder.api.v1.router:APIRouter.factory
+
+[app:apiv2]
+paste.app_factory = cinder.api.v2.router:APIRouter.factory
+
+[app:apiv3]
+paste.app_factory = cinder.api.v3.router:APIRouter.factory
+
+[pipeline:apiversions]
+pipeline = cors http_proxy_to_wsgi faultwrap osvolumeversionapp
+
+[app:osvolumeversionapp]
+paste.app_factory = cinder.api.versions:Versions.factory
+
+##########
+# Shared #
+##########
+
+[filter:keystonecontext]
+paste.filter_factory = cinder.api.middleware.auth:CinderKeystoneContext.factory
+
+[filter:authtoken]
+paste.filter_factory = keystonemiddleware.auth_token:filter_factory
+
+{%- if controller.audit.enabled %}
+[filter:audit]
+paste.filter_factory = {{ controller.get("audit", {}).get("filter_factory", "keystonemiddleware.audit:filter_factory") }}
+audit_map_file = {{ controller.get("audit", {}).get("map_file", "/etc/pycadf/cinder_api_audit_map.conf") }}
+{%- endif %}
+
diff --git a/cinder/files/pike/api-paste.ini.controller.RedHat b/cinder/files/pike/api-paste.ini.controller.RedHat
new file mode 120000
index 0000000..341baca
--- /dev/null
+++ b/cinder/files/pike/api-paste.ini.controller.RedHat
@@ -0,0 +1 @@
+api-paste.ini.controller.Debian
\ No newline at end of file
diff --git a/cinder/files/pike/api-paste.ini.volume.Debian b/cinder/files/pike/api-paste.ini.volume.Debian
new file mode 100644
index 0000000..e4944ec
--- /dev/null
+++ b/cinder/files/pike/api-paste.ini.volume.Debian
@@ -0,0 +1,82 @@
+#############
+# OpenStack #
+#############
+{%- from "cinder/map.jinja" import volume with context %}
+
+[composite:osapi_volume]
+use = call:cinder.api:root_app_factory
+/: apiversions
+/v1: openstack_volume_api_v1
+/v2: openstack_volume_api_v2
+/v3: openstack_volume_api_v3
+
+[composite:openstack_volume_api_v1]
+use = call:cinder.api.middleware.auth:pipeline_factory
+noauth = cors http_proxy_to_wsgi request_id faultwrap sizelimit osprofiler noauth apiv1
+keystone = cors http_proxy_to_wsgi request_id faultwrap sizelimit osprofiler authtoken keystonecontext {% if volume.audit.enabled %}audit {% endif %}apiv1
+keystone_nolimit = cors http_proxy_to_wsgi request_id faultwrap sizelimit osprofiler authtoken keystonecontext {% if volume.audit.enabled %}audit {% endif %}apiv1
+
+[composite:openstack_volume_api_v2]
+use = call:cinder.api.middleware.auth:pipeline_factory
+noauth = cors http_proxy_to_wsgi request_id faultwrap sizelimit osprofiler noauth apiv2
+keystone = cors http_proxy_to_wsgi request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv2
+keystone_nolimit = cors http_proxy_to_wsgi request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv2
+
+[composite:openstack_volume_api_v3]
+use = call:cinder.api.middleware.auth:pipeline_factory
+noauth = cors http_proxy_to_wsgi request_id faultwrap sizelimit osprofiler noauth apiv3
+keystone = cors http_proxy_to_wsgi request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv3
+keystone_nolimit = cors http_proxy_to_wsgi request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv3
+
+[filter:request_id]
+paste.filter_factory = oslo_middleware.request_id:RequestId.factory
+
+[filter:http_proxy_to_wsgi]
+paste.filter_factory = oslo_middleware.http_proxy_to_wsgi:HTTPProxyToWSGI.factory
+
+[filter:cors]
+paste.filter_factory = oslo_middleware.cors:filter_factory
+oslo_config_project = cinder
+
+[filter:faultwrap]
+paste.filter_factory = cinder.api.middleware.fault:FaultWrapper.factory
+
+[filter:osprofiler]
+paste.filter_factory = osprofiler.web:WsgiMiddleware.factory
+
+[filter:noauth]
+paste.filter_factory = cinder.api.middleware.auth:NoAuthMiddleware.factory
+
+[filter:sizelimit]
+paste.filter_factory = oslo_middleware.sizelimit:RequestBodySizeLimiter.factory
+
+[app:apiv1]
+paste.app_factory = cinder.api.v1.router:APIRouter.factory
+
+[app:apiv2]
+paste.app_factory = cinder.api.v2.router:APIRouter.factory
+
+[app:apiv3]
+paste.app_factory = cinder.api.v3.router:APIRouter.factory
+
+[pipeline:apiversions]
+pipeline = cors http_proxy_to_wsgi faultwrap osvolumeversionapp
+
+[app:osvolumeversionapp]
+paste.app_factory = cinder.api.versions:Versions.factory
+
+##########
+# Shared #
+##########
+
+[filter:keystonecontext]
+paste.filter_factory = cinder.api.middleware.auth:CinderKeystoneContext.factory
+
+[filter:authtoken]
+paste.filter_factory = keystonemiddleware.auth_token:filter_factory
+
+{%- if volume.audit.enabled %}
+[filter:audit]
+paste.filter_factory = {{ volume.get("audit", {}).get("filter_factory", "keystonemiddleware.audit:filter_factory") }}
+audit_map_file = {{ volume.get("audit", {}).get("map_file", "/etc/pycadf/cinder_api_audit_map.conf") }}
+{%- endif %}
diff --git a/cinder/files/pike/api-paste.ini.volume.RedHat b/cinder/files/pike/api-paste.ini.volume.RedHat
new file mode 120000
index 0000000..c5204ec
--- /dev/null
+++ b/cinder/files/pike/api-paste.ini.volume.RedHat
@@ -0,0 +1 @@
+api-paste.ini.volume.Debian
\ No newline at end of file
diff --git a/cinder/files/pike/cinder-wsgi.conf b/cinder/files/pike/cinder-wsgi.conf
new file mode 100644
index 0000000..b228a06
--- /dev/null
+++ b/cinder/files/pike/cinder-wsgi.conf
@@ -0,0 +1,26 @@
+{%- from "cinder/map.jinja" import controller with context %}
+Listen {{ controller.osapi.host }}:8776
+
+<VirtualHost {{ controller.osapi.host }}:8776>
+ WSGIDaemonProcess cinder-wsgi processes=5 threads=1 user=cinder group=cinder display-name=%{GROUP}
+ WSGIProcessGroup cinder-wsgi
+ WSGIScriptAlias / /usr/bin/cinder-wsgi
+ WSGIApplicationGroup %{GLOBAL}
+ WSGIPassAuthorization On
+ <IfVersion >= 2.4>
+ ErrorLogFormat "%{cu}t %M"
+ </IfVersion>
+
+ ErrorLog /var/log/apache2/cinder_error.log
+ CustomLog /var/log/apache2/cinder.log "%v:%p %h %l %u %t \"%r\" %>s %D %O \"%{Referer}i\" \"%{User-Agent}i\""
+
+ <Directory /usr/bin>
+ <IfVersion >= 2.4>
+ Require all granted
+ </IfVersion>
+ <IfVersion < 2.4>
+ Order allow,deny
+ Allow from all
+ </IfVersion>
+ </Directory>
+</VirtualHost>
diff --git a/cinder/files/pike/cinder.conf.controller.Debian b/cinder/files/pike/cinder.conf.controller.Debian
new file mode 100644
index 0000000..8bc04ef
--- /dev/null
+++ b/cinder/files/pike/cinder.conf.controller.Debian
@@ -0,0 +1,264 @@
+{%- from "cinder/map.jinja" import controller with context %}
+
+[DEFAULT]
+rootwrap_config = /etc/cinder/rootwrap.conf
+api_paste_confg = /etc/cinder/api-paste.ini
+
+iscsi_helper = tgtadm
+volume_name_template = volume-%s
+#volume_group = cinder
+
+verbose = True
+
+osapi_volume_workers = {{ controller.get('volume_workers', '4') }}
+
+auth_strategy = keystone
+
+state_path = /var/lib/cinder
+
+use_syslog=False
+
+glance_num_retries=0
+debug=False
+
+os_region_name={{ controller.identity.region }}
+allow_availability_zone_fallback = {{ controller.get('availability_zone_fallback', True) }}
+
+#glance_api_ssl_compression=False
+#glance_api_insecure=False
+
+osapi_volume_listen={{ controller.osapi.host }}
+
+glance_api_servers = {{ controller.glance.get('protocol','http') }}://{{ controller.glance.host }}:{{ controller.glance.port }}
+{%- if controller.glance.get('protocol','http') == 'https' %}
+glance_ca_certificates_file = {{ controller.glance.get('cacert_file', controller.cacert_file) }}
+{%- endif %}
+
+
+glance_host={{ controller.glance.host }}
+glance_port={{ controller.glance.port }}
+glance_api_version=2
+
+enable_v3_api = True
+
+os_privileged_user_name={{ controller.identity.user }}
+os_privileged_user_password={{ controller.identity.password }}
+os_privileged_user_tenant={{ controller.identity.tenant }}
+os_privileged_user_auth_url={{ controller.identity.get('protocol','http') }}://{{ controller.identity.host }}:5000/v3/
+
+volume_backend_name=DEFAULT
+
+{%- if controller.backend is defined %}
+
+default_volume_type={{ controller.default_volume_type }}
+
+enabled_backends={% for backend_name, backend in controller.get('backend', {}).iteritems() %}{{ backend_name }}{% if not loop.last %},{% endif %}{% endfor %}
+
+{%- endif %}
+
+{%- if controller.storage_availability_zone is defined %}
+storage_availability_zone={{controller.storage_availability_zone}}
+{%- endif %}
+
+{%- if controller.default_availability_zone is defined %}
+default_availability_zone={{controller.default_availability_zone}}
+{%- endif %}
+
+# Enables the Force option on upload_to_image. This enables running
+# upload_volume on in-use volumes for backends that support it. (boolean value)
+#enable_force_upload = false
+enable_force_upload = {{ controller.get('enable_force_upload', False)|lower }}
+
+#RPC response timeout recommended by Hitachi
+rpc_response_timeout=3600
+
+#Rabbit
+control_exchange=cinder
+
+
+volume_clear={{ controller.wipe_method }}
+
+
+
+volume_name_template = volume-%s
+
+#volume_group = vg_cinder_volume
+
+volumes_dir = /var/lib/cinder/volumes
+log_dir=/var/log/cinder
+
+# Use syslog for logging. (boolean value)
+#use_syslog=false
+
+use_syslog=false
+verbose=True
+lock_path=/var/lock/cinder
+
+{%- if controller.query_volume_filters is defined %}
+query_volume_filters = {{ controller.query_volume_filters|join(",") }}
+{%- endif %}
+
+nova_catalog_admin_info = compute:nova:adminURL
+nova_catalog_info = compute:nova:{{ controller.identity.get('endpoint_type', 'publicURL') }}
+
+osapi_volume_extension = cinder.api.contrib.standard_extensions
+
+{%- set rabbit_port = controller.message_queue.get('port', 5671 if controller.message_queue.get('ssl',{}).get('enabled', False) else 5672) %}
+{%- if controller.message_queue.members is defined %}
+transport_url = rabbit://{% for member in controller.message_queue.members -%}
+ {{ controller.message_queue.user }}:{{ controller.message_queue.password }}@{{ member.host }}:{{ member.get('port',rabbit_port) }}
+ {%- if not loop.last -%},{%- endif -%}
+ {%- endfor -%}
+ /{{ controller.message_queue.virtual_host }}
+{%- else %}
+transport_url = rabbit://{{ controller.message_queue.user }}:{{ controller.message_queue.password }}@{{ controller.message_queue.host }}:{{ rabbit_port }}/{{ controller.message_queue.virtual_host }}
+{%- endif %}
+
+{%- if controller.backup.engine != None %}
+{%- set backup_backend_fragment = "cinder/files/backup_backend/_" + controller.backup.engine + ".conf" %}
+{%- include backup_backend_fragment %}
+{%- endif %}
+{%- if controller.nas_secure_file_permissions is defined %}
+nas_secure_file_permissions={{ controller.nas_secure_file_permissions }}
+{%- endif %}
+{%- if controller.nas_secure_file_operations is defined %}
+nas_secure_file_operations={{ controller.nas_secure_file_operations }}
+{%- endif %}
+{%- if controller.cinder_internal_tenant_user_id is defined %}
+cinder_internal_tenant_user_id={{ controller.cinder_internal_tenant_user_id }}
+{%- endif %}
+{%- if controller.cinder_internal_tenant_project_id is defined %}
+cinder_internal_tenant_project_id={{ controller.cinder_internal_tenant_project_id }}
+{%- endif %}
+
+[oslo_messaging_notifications]
+{%- if controller.notification is mapping %}
+driver = {{ controller.notification.get('driver', 'messagingv2') }}
+{%- if controller.notification.topics is defined %}
+topics = {{ controller.notification.topics }}
+{%- endif %}
+{%- elif controller.notification %}
+driver = messagingv2
+{%- endif %}
+
+[oslo_concurrency]
+
+lock_path=/var/lock/cinder
+
+[oslo_middleware]
+
+enable_proxy_headers_parsing = True
+
+{%- if controller.message_queue.get('ssl',{}).get('enabled', False) %}
+[oslo_messaging_rabbit]
+rabbit_use_ssl=true
+
+{%- if controller.message_queue.ssl.version is defined %}
+kombu_ssl_version = {{ controller.message_queue.ssl.version }}
+{%- elif salt['grains.get']('pythonversion') > [2,7,8] %}
+kombu_ssl_version = TLSv1_2
+{%- endif %}
+
+kombu_ssl_ca_certs = {{ controller.message_queue.ssl.get('cacert_file', controller.cacert_file) }}
+{%- endif %}
+
+
+[keystone_authtoken]
+signing_dir=/tmp/keystone-signing-cinder
+revocation_cache_time = 10
+auth_type = password
+user_domain_name = {{ controller.identity.get('domain', 'Default') }}
+project_domain_name = {{ controller.identity.get('domain', 'Default') }}
+project_name = {{ controller.identity.tenant }}
+username = {{ controller.identity.user }}
+password = {{ controller.identity.password }}
+
+auth_uri={{ controller.identity.get('protocol', 'http') }}://{{ controller.identity.host }}:5000
+auth_url={{ controller.identity.get('protocol', 'http') }}://{{ controller.identity.host }}:35357
+{%- if controller.identity.get('protocol', 'http') == 'https' %}
+cafile={{ controller.identity.get('cacert_file', controller.cacert_file) }}
+{%- endif %}
+
+# Temporary disabled for backward compataiblity
+#auth_uri=http://{{ controller.identity.host }}/identity
+#auth_url=http://{{ controller.identity.host }}/identity_v2_admin
+{%- if controller.cache is defined %}
+memcached_servers={%- for member in controller.cache.members %}{{ member.host }}:11211{% if not loop.last %},{% endif %}{%- endfor %}
+{%- endif %}
+auth_version = v3
+
+{%- if controller.get('barbican', {}).get('enabled', False) %}
+[key_manager]
+api_class = castellan.key_manager.barbican_key_manager.BarbicanKeyManager
+
+[barbican]
+auth_endpoint = {{ controller.identity.get('protocol', 'http') }}://{{ controller.identity.get('host', 'localhost') }}:{{ controller.identity.get('port', '5000') }}/v3
+{%- if controller.barbican.get('protocol', 'http') == 'https' %}
+cafile={{ controller.barbican.get('cacert_file', controller.cacerts_file) }}
+{%- endif %}
+{%- endif %}
+
+[database]
+idle_timeout=3600
+max_pool_size=30
+max_retries=-1
+max_overflow=40
+connection = {{ controller.database.engine }}+pymysql://{{ controller.database.user }}:{{ controller.database.password }}@{{ controller.database.host }}/{{ controller.database.name }}?charset=utf8{%- if controller.database.get('ssl',{}).get('enabled',False) %}&ssl_ca={{ controller.database.ssl.get('cacert_file', controller.cacert_file) }}{% endif %}
+
+{%- if controller.backend is defined %}
+
+{%- for backend_name, backend in controller.get('backend', {}).iteritems() %}
+
+{%- set backend_fragment = "cinder/files/backend/_" + backend.engine + ".conf" %}
+{%- include backend_fragment %}
+
+{%- endfor %}
+
+{%- endif %}
+
+[cors]
+
+#
+# From oslo.middleware.cors
+#
+
+# Indicate whether this resource may be shared with the domain
+# received in the requests "origin" header. (list value)
+#allowed_origin = <None>
+{%- if controller.cors.allowed_origin is defined %}
+allowed_origin = {{ controller.cors.allowed_origin }}
+{%- endif %}
+
+# Indicate that the actual request can include user credentials
+# (boolean value)
+#allow_credentials = true
+{%- if controller.cors.allow_credentials is defined %}
+allow_credentials = {{ controller.cors.allow_credentials }}
+{%- endif %}
+
+# Indicate which headers are safe to expose to the API. Defaults to
+# HTTP Simple Headers. (list value)
+#expose_headers = X-Image-Meta-Checksum,X-Auth-Token,X-Subject-Token,X-Service-Token,X-OpenStack-Request-ID
+{%- if controller.cors.expose_headers is defined %}
+expose_headers = {{ controller.cors.expose_headers }}
+{%- endif %}
+
+# Maximum cache age of CORS preflight requests. (integer value)
+#max_age = 3600
+{%- if controller.cors.max_age is defined %}
+max_age = {{ controller.cors.max_age }}
+{%- endif %}
+
+# Indicate which methods can be used during the actual request. (list
+# value)
+#allow_methods = GET,PUT,POST,DELETE,PATCH
+{%- if controller.cors.allow_methods is defined %}
+allow_methods = {{ controller.cors.allow_methods }}
+{%- endif %}
+
+# Indicate which header field names may be used during the actual
+# request. (list value)
+#allow_headers = Content-MD5,X-Image-Meta-Checksum,X-Storage-Token,Accept-Encoding,X-Auth-Token,X-Identity-Status,X-Roles,X-Service-Catalog,X-User-Id,X-Tenant-Id,X-OpenStack-Request-ID
+{%- if controller.cors.allow_headers is defined %}
+allow_headers = {{ controller.cors.allow_headers }}
+{%- endif %}
diff --git a/cinder/files/pike/cinder.conf.controller.RedHat b/cinder/files/pike/cinder.conf.controller.RedHat
new file mode 120000
index 0000000..563de88
--- /dev/null
+++ b/cinder/files/pike/cinder.conf.controller.RedHat
@@ -0,0 +1 @@
+cinder.conf.controller.Debian
\ No newline at end of file
diff --git a/cinder/files/pike/cinder.conf.volume.Debian b/cinder/files/pike/cinder.conf.volume.Debian
new file mode 100644
index 0000000..f999860
--- /dev/null
+++ b/cinder/files/pike/cinder.conf.volume.Debian
@@ -0,0 +1,203 @@
+{%- from "cinder/map.jinja" import volume with context %}
+
+[DEFAULT]
+rootwrap_config = /etc/cinder/rootwrap.conf
+api_paste_confg = /etc/cinder/api-paste.ini
+
+iscsi_helper = tgtadm
+volume_name_template = volume-%s
+#volume_group = cinder
+
+verbose = True
+
+osapi_volume_workers = {{ volume.get('volume_workers', '4') }}
+
+auth_strategy = keystone
+
+state_path = /var/lib/cinder
+
+use_syslog=False
+
+glance_num_retries=0
+debug=False
+
+os_region_name={{ volume.identity.region }}
+
+#glance_api_ssl_compression=False
+#glance_api_insecure=False
+
+osapi_volume_listen={{ volume.osapi.host }}
+
+glance_api_servers = {{ volume.glance.get('protocol','http') }}://{{ volume.glance.host }}:{{ volume.glance.port }}
+{%- if volume.glance.get('protocol','http') == 'https' %}
+glance_ca_certificates_file = {{ volume.glance.get('cacert_file', volume.cacert_file) }}
+{%- endif %}
+
+
+glance_host={{ volume.glance.host }}
+glance_port={{ volume.glance.port }}
+glance_api_version=2
+
+os_privileged_user_name={{ volume.identity.user }}
+os_privileged_user_password={{ volume.identity.password }}
+os_privileged_user_tenant={{ volume.identity.tenant }}
+os_privileged_user_auth_url={{ volume.identity.get('protocol','http') }}://{{ volume.identity.host }}:5000/v3/
+
+volume_backend_name=DEFAULT
+
+{%- if volume.backend is defined %}
+
+default_volume_type={{ volume.default_volume_type }}
+
+enabled_backends={% for backend_name, backend in volume.get('backend', {}).iteritems() %}{{ backend_name }}{% if not loop.last %},{% endif %}{% endfor %}
+
+{%- endif %}
+
+{%- if volume.storage_availability_zone is defined %}
+storage_availability_zone={{volume.storage_availability_zone}}
+{%- endif %}
+
+{%- if volume.default_availability_zone is defined %}
+default_availability_zone={{volume.default_availability_zone}}
+{%- endif %}
+
+# Enables the Force option on upload_to_image. This enables running
+# upload_volume on in-use volumes for backends that support it. (boolean value)
+#enable_force_upload = false
+enable_force_upload = {{ volume.get('enable_force_upload', False)|lower }}
+
+#RPC response timeout recommended by Hitachi
+rpc_response_timeout=3600
+
+#Rabbit
+control_exchange=cinder
+
+
+volume_clear={{ volume.wipe_method }}
+
+
+volume_name_template = volume-%s
+
+#volume_group = vg_cinder_volume
+
+volumes_dir = /var/lib/cinder/volumes
+log_dir=/var/log/cinder
+
+# Use syslog for logging. (boolean value)
+#use_syslog=false
+
+use_syslog=false
+verbose=True
+
+nova_catalog_admin_info = compute:nova:adminURL
+nova_catalog_info = compute:nova:{{ volume.identity.get('endpoint_type', 'publicURL') }}
+
+{%- set rabbit_port = volume.message_queue.get('port', 5671 if volume.message_queue.get('ssl',{}).get('enabled', False) else 5672) %}
+{%- if volume.message_queue.members is defined %}
+transport_url = rabbit://{% for member in volume.message_queue.members -%}
+ {{ volume.message_queue.user }}:{{ volume.message_queue.password }}@{{ member.host }}:{{ member.get('port',rabbit_port) }}
+ {%- if not loop.last -%},{%- endif -%}
+ {%- endfor -%}
+ /{{ volume.message_queue.virtual_host }}
+{%- else %}
+transport_url = rabbit://{{ volume.message_queue.user }}:{{ volume.message_queue.password }}@{{ volume.message_queue.host }}:{{ rabbit_port }}/{{ volume.message_queue.virtual_host }}
+{%- endif %}
+
+{%- if volume.backup.engine != None %}
+{%- set backup_backend_fragment = "cinder/files/backup_backend/_" + volume.backup.engine + ".conf" %}
+{%- include backup_backend_fragment %}
+{%- endif %}
+{%- if volume.nas_secure_file_permissions is defined %}
+nas_secure_file_permissions={{ volume.nas_secure_file_permissions }}
+{%- endif %}
+{%- if volume.nas_secure_file_operations is defined %}
+nas_secure_file_operations={{ volume.nas_secure_file_operations }}
+{%- endif %}
+{%- if volume.cinder_internal_tenant_user_id is defined %}
+cinder_internal_tenant_user_id={{ volume.cinder_internal_tenant_user_id }}
+{%- endif %}
+{%- if volume.cinder_internal_tenant_project_id is defined %}
+cinder_internal_tenant_project_id={{ volume.cinder_internal_tenant_project_id }}
+{%- endif %}
+
+[oslo_messaging_notifications]
+{%- if volume.notification is mapping %}
+driver = {{ volume.notification.get('driver', 'messagingv2') }}
+{%- if volume.notification.topics is defined %}
+topics = {{ volume.notification.topics }}
+{%- endif %}
+{%- elif volume.notification %}
+driver = messagingv2
+{%- endif %}
+
+[oslo_concurrency]
+
+lock_path=/var/lock/cinder
+
+[oslo_middleware]
+
+enable_proxy_headers_parsing = True
+
+{%- if volume.message_queue.get('ssl',{}).get('enabled', False) %}
+[oslo_messaging_rabbit]
+rabbit_use_ssl=true
+
+{%- if volume.message_queue.ssl.version is defined %}
+kombu_ssl_version = {{ volume.message_queue.ssl.version }}
+{%- elif salt['grains.get']('pythonversion') > [2,7,8] %}
+kombu_ssl_version = TLSv1_2
+{%- endif %}
+
+kombu_ssl_ca_certs = {{ volume.message_queue.ssl.get('cacert_file', volume.cacert_file) }}
+{%- endif %}
+
+[keystone_authtoken]
+signing_dir=/tmp/keystone-signing-cinder
+revocation_cache_time = 10
+auth_type = password
+user_domain_name = {{ volume.identity.get('domain', 'Default') }}
+project_domain_name = {{ volume.identity.get('domain', 'Default') }}
+project_name = {{ volume.identity.tenant }}
+username = {{ volume.identity.user }}
+password = {{ volume.identity.password }}
+auth_uri={{ volume.identity.get('protocol', 'http') }}://{{ volume.identity.host }}:5000
+auth_url={{ volume.identity.get('protocol', 'http') }}://{{ volume.identity.host }}:35357
+{%- if volume.identity.get('protocol', 'http') == 'https' %}
+cafile={{ volume.identity.get('cacert_file', volume.cacert_file) }}
+{%- endif %}
+
+# Temporary disabled for backward compataiblity
+#auth_uri=http://{{ volume.identity.host }}/identity
+#auth_url=http://{{ volume.identity.host }}/identity_v2_admin
+{%- if volume.cache is defined %}
+memcached_servers={%- for member in volume.cache.members %}{{ member.host }}:11211{% if not loop.last %},{% endif %}{%- endfor %}
+{%- endif %}
+auth_version = v3
+
+{%- if volume.get('barbican', {}).get('enabled', False) %}
+[key_manager]
+api_class = castellan.key_manager.barbican_key_manager.BarbicanKeyManager
+[barbican]
+auth_endpoint = {{ volume.identity.get('protocol', 'http') }}://{{ volume.identity.get('host', 'localhost') }}:{{ volume.identity.get('port', '5000') }}/v3
+{%- if volume.barbican.get('protocol', 'https') %}
+cafile={{ volume.identity.get('cacert_file', volume.cacert_file) }}
+{%- endif %}
+{%- endif %}
+
+[database]
+idle_timeout=3600
+max_pool_size=30
+max_retries=-1
+max_overflow=40
+connection = {{ volume.database.engine }}+pymysql://{{ volume.database.user }}:{{ volume.database.password }}@{{ volume.database.host }}/{{ volume.database.name }}?charset=utf8{%- if volume.database.get('ssl',{}).get('enabled',False) %}&ssl_ca={{ volume.database.ssl.get('cacert_file', volume.cacert_file) }}{% endif %}
+
+{%- if volume.backend is defined %}
+
+{%- for backend_name, backend in volume.get('backend', {}).iteritems() %}
+
+{%- set backend_fragment = "cinder/files/backend/_" + backend.engine + ".conf" %}
+{%- include backend_fragment %}
+
+{%- endfor %}
+
+{%- endif %}
diff --git a/cinder/files/pike/cinder.conf.volume.RedHat b/cinder/files/pike/cinder.conf.volume.RedHat
new file mode 120000
index 0000000..df997ca
--- /dev/null
+++ b/cinder/files/pike/cinder.conf.volume.RedHat
@@ -0,0 +1 @@
+cinder.conf.volume.Debian
\ No newline at end of file
diff --git a/cinder/files/pike/nfs_shares b/cinder/files/pike/nfs_shares
new file mode 100644
index 0000000..9878dca
--- /dev/null
+++ b/cinder/files/pike/nfs_shares
@@ -0,0 +1,3 @@
+{%- for nfs_server in backend.devices %}
+{{ nfs_server }}
+{%- endfor %}
\ No newline at end of file
diff --git a/cinder/volume.sls b/cinder/volume.sls
index dd35bf4..06f13c3 100644
--- a/cinder/volume.sls
+++ b/cinder/volume.sls
@@ -20,8 +20,6 @@
- require_in:
- service: cinder_volume_services
-{%- if not pillar.cinder.get('controller', {}).get('enabled', False) %}
-
{%- if volume.message_queue.get('ssl',{}).get('enabled', False) %}
rabbitmq_ca_cinder_volume:
{%- if volume.message_queue.ssl.cacert is defined %}
@@ -50,6 +48,8 @@
{%- endif %}
{%- endif %}
+{%- if not pillar.cinder.get('controller', {}).get('enabled', False) %}
+
/etc/cinder/cinder.conf:
file.managed:
- source: salt://cinder/files/{{ volume.version }}/cinder.conf.volume.{{ grains.os_family }}
diff --git a/tests/run_tests.sh b/tests/run_tests.sh
index 688643f..4aecca6 100755
--- a/tests/run_tests.sh
+++ b/tests/run_tests.sh
@@ -111,7 +111,7 @@
}
salt_run() {
- [ -e ${VEN_DIR}/bin/activate ] && source ${VENV_DIR}/bin/activate
+ [ -e ${VENV_DIR}/bin/activate ] && source ${VENV_DIR}/bin/activate
salt-call ${SALT_OPTS} $*
}