blob: 99c9a04992e15ca8f2205c9cb238cb1f66c29975 [file] [log] [blame]
{%- from "barbican/map.jinja" import server with context -%}
{%- set connection_x509_ssl_option = '' %}
{%- if server.database.get('x509',{}).get('enabled',False) %}
{%- set connection_x509_ssl_option = '&ssl_ca=' ~ server.database.x509.ca_file ~ '&ssl_cert=' ~ server.database.x509.cert_file ~ '&ssl_key=' ~ server.database.x509.key_file %}
{%- elif server.database.get('ssl',{}).get('enabled',False) %}
{%- set connection_x509_ssl_option = '&ssl_ca=' ~ server.database.ssl.get('cacert_file', server.cacert_file) %}
{%- endif %}
[DEFAULT]
# Show debugging output in logs (sets DEBUG log level output)
#debug = True
# Address to bind the API server
bind_host = {{ server.bind.address }}
# Port to bind the API server to
bind_port = {{ server.bind.port }}
# Host name, for use in HATEOAS-style references
# Note: Typically this would be the load balanced endpoint that clients would use
# communicate back with this service.
# If a deployment wants to derive host from wsgi request instead then make this
# blank. Blank is needed to override default config value which is
# 'http://localhost:9311'.
### host_href = http://localhost:9311
{%- if server.host_href is defined %}
host_href = {{ server.host_href }}
{%- else %}
host_href =
{%- endif %}
# The name of a logging configuration file. This file is appended to any
# existing logging configuration files. For details about logging configuration
# files, see the Python logging module documentation. Note that when logging
# configuration files are used then all logging configuration is set in the
# configuration file and other logging configuration options are ignored (for
# example, logging_context_format_string). (string value)
# Note: This option can be changed without restarting.
# Deprecated group/name - [DEFAULT]/log_config
#log_config_append = <None>
{%- if server.logging.log_appender %}
log_config_append=/etc/barbican/logging.conf
{%- endif %}
# Log to this file. Make sure you do not set the same log
# file for both the API and registry servers!
#log_file = /var/log/barbican/api.log
# Backlog requests when creating socket
backlog = 4096
# TCP_KEEPIDLE value in seconds when creating socket.
# Not supported on OS X.
#tcp_keepidle = 600
# Maximum allowed http request size against the barbican-api
max_allowed_secret_in_bytes = 10000
max_allowed_request_size_in_bytes = 1000000
# SQLAlchemy connection string for the reference implementation
# registry server. Any valid SQLAlchemy connection string is fine.
# See: http://www.sqlalchemy.org/docs/05/reference/sqlalchemy/connections.html#sqlalchemy.create_engine
# Uncomment this for local dev, putting db in project directory:
#sql_connection = sqlite:///barbican.sqlite
# Note: For absolute addresses, use '////' slashes after 'sqlite:'
# Uncomment for a more global development environment
sql_connection = {{ server.database.engine }}://{{ server.database.user }}:{{ server.database.password }}@{{ server.database.host }}/{{ server.database.name }}?charset=utf8{{ connection_x509_ssl_option|string }}
db_auto_create = False
# Period in seconds after which SQLAlchemy should reestablish its connection
# to the database.
#
# MySQL uses a default `wait_timeout` of 8 hours, after which it will drop
# idle connections. This can result in 'MySQL Gone Away' exceptions. If you
# notice this, you can lower this value to ensure that SQLAlchemy reconnects
# before MySQL can drop the connection.
sql_idle_timeout = {{ server.database.get('sql_idle_timeout', 3600) }}
# Accepts a class imported from the sqlalchemy.pool module, and handles the
# details of building the pool for you. If commented out, SQLAlchemy
# will select based on the database dialect. Other options are QueuePool
# (for SQLAlchemy-managed connections) and NullPool (to disabled SQLAlchemy
# management of connections).
# See http://docs.sqlalchemy.org/en/latest/core/pooling.html for more details.
#sql_pool_class = QueuePool
# Show SQLAlchemy pool-related debugging output in logs (sets DEBUG log level
# output) if specified.
#sql_pool_logging = True
# Size of pool used by SQLAlchemy. This is the largest number of connections
# that will be kept persistently in the pool. Can be set to 0 to indicate no
# size limit. To disable pooling, use a NullPool with sql_pool_class instead.
# Comment out to allow SQLAlchemy to select the default.
#sql_pool_size = 5
# The maximum overflow size of the pool used by SQLAlchemy. When the number of
# checked-out connections reaches the size set in sql_pool_size, additional
# connections will be returned up to this limit. It follows then that the
# total number of simultaneous connections the pool will allow is
# sql_pool_size + sql_pool_max_overflow. Can be set to -1 to indicate no
# overflow limit, so no limit will be placed on the total number of concurrent
# connections. Comment out to allow SQLAlchemy to select the default.
#sql_pool_max_overflow = 10
# Default page size for the 'limit' paging URL parameter.
default_limit_paging = 10
# Maximum page size for the 'limit' paging URL parameter.
max_limit_paging = 100
# Role used to identify an authenticated user as administrator
#admin_role = admin
# Allow unauthenticated users to access the API with read-only
# privileges. This only applies when using ContextMiddleware.
#allow_anonymous_access = False
# Allow access to version 1 of barbican api
#enable_v1_api = True
# Allow access to version 2 of barbican api
#enable_v2_api = True
# ================= SSL Options ===============================
# Certificate file to use when starting API server securely
#cert_file = /path/to/certfile
# Private key file to use when starting API server securely
#key_file = /path/to/keyfile
# CA certificate file to use to verify connecting clients
#ca_file = /path/to/cafile
# ================= Security Options ==========================
# AES key for encrypting store 'location' metadata, including
# -- if used -- Swift or S3 credentials
# Should be set to a random string of length 16, 24 or 32 bytes
#metadata_encryption_key = <16, 24 or 32 char registry metadata key>
# ================= Queue Options - oslo.messaging ==========================
{%- set rabbit_port = server.message_queue.get('port', 5671 if server.message_queue.ssl.enabled else 5672) %}
{%- if server.message_queue.members is defined %}
transport_url = rabbit://{% for member in server.message_queue.members -%}
{{ server.message_queue.user }}:{{ server.message_queue.password }}@{{ member.host }}:{{ member.get('port', rabbit_port) }}
{%- if not loop.last -%},{%- endif -%}
{%- endfor -%}
/{{ server.message_queue.virtual_host }}
{%- else %}
transport_url = rabbit://{{ server.message_queue.user }}:{{ server.message_queue.password }}@{{ server.message_queue.host }}:{{ rabbit_port }}/{{ server.message_queue.virtual_host }}
{%- endif %}
[oslo_messaging_rabbit]
{%- if server.message_queue.get('ssl',{}).get('enabled', False) %}
rabbit_use_ssl=true
{%- if server.message_queue.ssl.version is defined %}
kombu_ssl_version = {{ server.message_queue.ssl.version }}
{%- elif salt['grains.get']('pythonversion') > [2,7,8] %}
kombu_ssl_version = TLSv1_2
{%- endif %}
{%- if server.message_queue.get('x509',{}).get('enabled', False) %}
kombu_ssl_ca_certs = {{ server.message_queue.x509.ca_file }}
kombu_ssl_keyfile = {{ server.message_queue.x509.key_file }}
kombu_ssl_certfile = {{ server.message_queue.x509.cert_file }}
{%- else %}
kombu_ssl_ca_certs = {{ server.message_queue.ssl.get('cacert_file', server.cacert_file) }}
{%- endif %}
{%- endif %}
# Rabbit and HA configuration:
## amqp_durable_queues = True
# rabbit_userid=guest
# rabbit_password=guest
# rabbit_ha_queues = True
# rabbit_port=5672
# For HA, specify queue nodes in cluster, comma delimited:
# For example: rabbit_hosts=192.168.50.8:5672, 192.168.50.9:5672
# rabbit_hosts=localhost:5672
# For HA, specify queue nodes in cluster as 'user@host:5672', comma delimited, ending with '/offset':
# For example: transport_url = rabbit://guest@192.168.50.8:5672,guest@192.168.50.9:5672/
# DO NOT USE THIS, due to '# FIXME(markmc): support multiple hosts' in oslo/messaging/_drivers/amqpdriver.py
# transport_url = rabbit://guest@localhost:5672/
[oslo_messaging_notifications]
# oslo notification driver for sending audit events via audit middleware.
# Meaningful only when middleware is enabled in barbican paste ini file.
# This is oslo config MultiStrOpt so can be defined multiple times in case
# there is need to route audit event to messaging as well as log.
# driver = messagingv2
# driver = log
# ======== OpenStack policy - oslo_policy ===============
[oslo_policy]
# ======== OpenStack policy integration
# JSON file representing policy (string value)
policy_file=/etc/barbican/policy.json
# Rule checked when requested rule is not found (string value)
policy_default_rule=default
# ================= Queue Options - Application ==========================
[queue]
# Enable queuing asynchronous messaging.
# Set false to invoke worker tasks synchronously (i.e. no-queue standalone mode)
enable = {{ server.get('async_queues_enable', False) }}
# Namespace for the queue
namespace = 'barbican'
# Topic for the queue
topic = 'barbican.workers'
# Version for the task API
version = '1.1'
# Server name for RPC service
server_name = 'barbican.queue'
# Number of asynchronous worker processes.
# When greater than 1, then that many additional worker processes are
# created for asynchronous worker functionality.
asynchronous_workers = 1
# ================= Retry/Scheduler Options ==========================
[retry_scheduler]
# Seconds (float) to wait between starting retry scheduler
initial_delay_seconds = 10.0
# Seconds (float) to wait between starting retry scheduler
periodic_interval_max_seconds = 10.0
# ====================== Quota Options ===============================
[quotas]
# For each resource, the default maximum number that can be used for
# a project is set below. This value can be overridden for each
# project through the API. A negative value means no limit. A zero
# value effectively disables the resource.
# default number of secrets allowed per project
quota_secrets = -1
# default number of orders allowed per project
quota_orders = -1
# default number of containers allowed per project
quota_containers = -1
# default number of consumers allowed per project
quota_consumers = -1
# default number of CAs allowed per project
quota_cas = -1
# ================= Keystone Notification Options - Application ===============
[keystone_notifications]
# Keystone notification functionality uses transport related configuration
# from barbican common configuration as defined under
# 'Queue Options - oslo.messaging' comments.
# The HA related configuration is also shared with notification server.
# True enables keystone notification listener functionality.
enable = {{ server.get('ks_notifications_enable', 'False') }}
# The default exchange under which topics are scoped.
# May be overridden by an exchange name specified in the transport_url option.
control_exchange = 'openstack'
# Keystone notification queue topic name.
# This name needs to match one of values mentioned in Keystone deployment's
# 'notification_topics' configuration e.g.
# notification_topics=notifications, barbican_notifications
# Multiple servers may listen on a topic and messages will be dispatched to one
# of the servers in a round-robin fashion. That's why Barbican service should
# have its own dedicated notification queue so that it receives all of Keystone
# notifications.
# Alternatively if the chosen oslo.messaging backend
# supports listener pooling (for example rabbitmq), setting a non-
# default 'pool_name' option should be preferred. (string value)
topic = 'notifications'
# Pool name for notifications listener. Setting this to a distinctive
# value will allow barbican notifications listener to receive its own
# copy of all messages from the topic without without interfering with
# other services listening on the same topic. This feature is
# supported only by some oslo.messaging backends (in particilar by
# rabbitmq) and for those it is preferrable to use it instead of
# separate notification topic for barbican. (string value)
#pool_name = <None>
pool_name = {{ server.get('ks_notifications_pool_name', 'barbican') }}
# True enables requeue feature in case of notification processing error.
# Enable this only when underlying transport supports this feature.
allow_requeue = {{ server.get('ks_notifications_allow_requeue', 'False') }}
# Version of tasks invoked via notifications
version = '1.0'
# Define the number of max threads to be used for notification server
# processing functionality.
thread_pool_size = 10
# ================= Secret Store Plugin ===================
[secretstore]
namespace = barbican.secretstore.plugin
#enabled_secretstore_plugins = store_crypto
enable_multiple_secret_stores = True
stores_lookup_suffix = {{ server.get('store', {}).keys() | join(', ') }}
# ================= Crypto plugin ===================
[crypto]
namespace = barbican.crypto.plugin
#enabled_crypto_plugins = simple_crypto
{% for store_name, store in server.get('store', {}).iteritems() %}
[secretstore:{{ store_name }}]
{%- if store.store_plugin is defined %}
secret_store_plugin = {{ store.store_plugin }}
{%- endif %}
{%- if store.crypto_plugin is defined %}
crypto_plugin = {{ store.crypto_plugin }}
{%- endif %}
{%- if store.global_default is defined %}
global_default = {{ store.global_default }}
{%- endif %}
{% endfor %}
{% for plugin_name, plugin in server.get('plugin', {}).iteritems() %}
{%- set plugin_fragment = "barbican/files/" + server.version + "/plugin/_" + plugin_name +".conf" %}
{%- include plugin_fragment %}
{% endfor %}
# [simple_crypto_plugin]
# # the kek should be a 32-byte value which is base64 encoded
# kek = 'YWJjZGVmZ2hpamtsbW5vcHFyc3R1dnd4eXoxMjM0NTY='
# # User friendly plugin name
# # plugin_name = 'Software Only Crypto'
# [dogtag_plugin]
# pem_path = '/etc/barbican/kra_admin_cert.pem'
# dogtag_host = localhost
# dogtag_port = 8443
# nss_db_path = '/etc/barbican/alias'
# nss_db_path_ca = '/etc/barbican/alias-ca'
# nss_password = 'password123'
# simple_cmc_profile = 'caOtherCert'
# ca_expiration_time = 1
# plugin_working_dir = '/etc/barbican/dogtag'
# # User friendly plugin name
# # plugin_name = 'Dogtag KRA'
# [p11_crypto_plugin]
# # Path to vendor PKCS11 library
# library_path = '/usr/lib/libCryptoki2_64.so'
# # Password to login to PKCS11 session
# login = 'mypassword'
# # Label to identify master KEK in the HSM (must not be the same as HMAC label)
# mkek_label = 'an_mkek'
# # Length in bytes of master KEK
# mkek_length = 32
# # Label to identify HMAC key in the HSM (must not be the same as MKEK label)
# hmac_label = 'my_hmac_label'
# # HSM Slot id (Should correspond to a configured PKCS11 slot). Default: 1
# # slot_id = 1
# # Enable Read/Write session with the HSM?
# # rw_session = True
# # Length of Project KEKs to create
# # pkek_length = 32
# # How long to cache unwrapped Project KEKs
# # pkek_cache_ttl = 900
# # Max number of items in pkek cache
# # pkek_cache_limit = 100
# # User friendly plugin name
# # plugin_name = 'PKCS11 HSM'
# # ================== KMIP plugin =====================
# [kmip_plugin]
# username = 'admin'
# password = 'password'
# host = localhost
# port = 5696
# keyfile = '/path/to/certs/cert.key'
# certfile = '/path/to/certs/cert.crt'
# ca_certs = '/path/to/certs/LocalCA.crt'
# # User friendly plugin name
# # plugin_name = 'KMIP HSM'
# # ================= Certificate plugin ===================
# # DEPRECATION WARNING: The Certificates Plugin has been deprecated
# # and will be removed in the P release.
# [certificate]
# namespace = barbican.certificate.plugin
# enabled_certificate_plugins = simple_certificate
# enabled_certificate_plugins = snakeoil_ca
# [certificate_event]
# namespace = barbican.certificate.event.plugin
# enabled_certificate_event_plugins = simple_certificate_event
[snakeoil_ca_plugin]
ca_cert_path = /etc/barbican/snakeoil-ca.crt
ca_cert_key_path = /etc/barbican/snakeoil-ca.key
ca_cert_chain_path = /etc/barbican/snakeoil-ca.chain
ca_cert_pkcs7_path = /etc/barbican/snakeoil-ca.p7b
subca_cert_key_directory=/etc/barbican/snakeoil-cas
# ========================================================
[cors]
#
# From oslo.middleware.cors
#
# Indicate whether this resource may be shared with the domain
# received in the requests "origin" header. (list value)
#allowed_origin = <None>
# Indicate that the actual request can include user credentials
# (boolean value)
#allow_credentials = true
# Indicate which headers are safe to expose to the API. Defaults to
# HTTP Simple Headers. (list value)
#expose_headers = X-Auth-Token, X-Openstack-Request-Id, X-Project-Id, X-Identity-Status, X-User-Id, X-Storage-Token, X-Domain-Id, X-User-Domain-Id, X-Project-Domain-Id, X-Roles
# Maximum cache age of CORS preflight requests. (integer value)
#max_age = 3600
# Indicate which methods can be used during the actual request. (list
# value)
#allow_methods = GET,PUT,POST,DELETE,PATCH
# Indicate which header field names may be used during the actual
# request. (list value)
#allow_headers = X-Auth-Token, X-Openstack-Request-Id, X-Project-Id, X-Identity-Status, X-User-Id, X-Storage-Token, X-Domain-Id, X-User-Domain-Id, X-Project-Domain-Id, X-Roles
[cors.subdomain]
#
# From oslo.middleware.cors
#
# Indicate whether this resource may be shared with the domain
# received in the requests "origin" header. (list value)
#allowed_origin = <None>
# Indicate that the actual request can include user credentials
# (boolean value)
#allow_credentials = true
# Indicate which headers are safe to expose to the API. Defaults to
# HTTP Simple Headers. (list value)
#expose_headers = X-Auth-Token, X-Openstack-Request-Id, X-Project-Id, X-Identity-Status, X-User-Id, X-Storage-Token, X-Domain-Id, X-User-Domain-Id, X-Project-Domain-Id, X-Roles
# Maximum cache age of CORS preflight requests. (integer value)
#max_age = 3600
# Indicate which methods can be used during the actual request. (list
# value)
#allow_methods = GET,PUT,POST,DELETE,PATCH
# Indicate which header field names may be used during the actual
# request. (list value)
#allow_headers = X-Auth-Token, X-Openstack-Request-Id, X-Project-Id, X-Identity-Status, X-User-Id, X-Storage-Token, X-Domain-Id, X-User-Domain-Id, X-Project-Domain-Id, X-Roles
[oslo_middleware]
#
# From oslo.middleware.http_proxy_to_wsgi
#
# Wether the application is behind a proxy or not. This determines if
# the middleware should parse the headers or not. (boolean value)
enable_proxy_headers_parsing = {{ server.get('is_proxied', False) }}
[keystone_authtoken]
#
# From keystonemiddleware.auth_token
#
auth_type = password
user_domain_id = {{ server.identity.get('domain', 'default') }}
project_domain_id = {{ server.identity.get('domain', 'default') }}
project_name = {{ server.identity.tenant }}
username = {{ server.identity.user }}
password = {{ server.identity.password }}
auth_uri = {{ server.identity.get('protocol', 'http') }}://{{ server.identity.host }}:5000
auth_url = {{ server.identity.get('protocol', 'http') }}://{{ server.identity.host }}:35357
interface = {{ server.identity.get('endpoint_type', 'internal') }}
{%- if server.get('cache', {}).members is defined %}
memcached_servers = {%- for member in server.cache.members %}{{ member.host }}:{{ member.get('port', '11211') }}{% if not loop.last %},{% endif %}{%- endfor %}
{%- if server.cache.get('security', {}).get('enabled', False) %}
memcache_security_strategy = {{ server.cache.security.get('strategy', 'ENCRYPT') }}
{%- if server.cache.security.secret_key is not defined or not server.cache.security.secret_key %}
{%- do salt.test.exception('barbican.server.cache.security.secret_key is not defined: Please add secret_key') %}
{%- else %}
memcache_secret_key = {{ server.cache.security.secret_key }}
{%- endif %}
{%- endif %}
{%- else %}
token_cache_time = -1
{%- endif %}
region_name = {{ server.get('region', 'RegionOne') }}
{%- if server.identity.get('protocol', 'http') == 'https' %}
cafile = {{ server.identity.get('cacert_file', server.cacert_file) }}
{%- endif %}
# Complete "public" Identity API endpoint. This endpoint should not be an
# "admin" endpoint, as it should be accessible by all end users. Unauthenticated
# clients are redirected to this endpoint to authenticate. Although this
# endpoint should ideally be unversioned, client support in the wild varies.
# If you're using a versioned v2 endpoint here, then this should *not* be the
# same endpoint the service user utilizes for validating tokens, because normal
# end users may not be able to reach that endpoint. (string value)
#auth_uri = <None>
# API version of the admin Identity API endpoint. (string value)
#auth_version = <None>
# Do not handle authorization requests within the middleware, but delegate the
# authorization decision to downstream WSGI components. (boolean value)
#delay_auth_decision = false
# Request timeout value for communicating with Identity API server. (integer
# value)
#http_connect_timeout = <None>
# How many times are we trying to reconnect when communicating with Identity API
# Server. (integer value)
#http_request_max_retries = 3
# Request environment key where the Swift cache object is stored. When
# auth_token middleware is deployed with a Swift cache, use this option to have
# the middleware share a caching backend with swift. Otherwise, use the
# ``memcached_servers`` option instead. (string value)
#cache = <None>
# Required if identity server requires client certificate (string value)
#certfile = <None>
# Required if identity server requires client certificate (string value)
#keyfile = <None>
# A PEM encoded Certificate Authority to use when verifying HTTPs connections.
# Defaults to system CAs. (string value)
#cafile = <None>
# Verify HTTPS connections. (boolean value)
#insecure = false
# The region in which the identity server can be found. (string value)
#region_name = <None>
# Directory used to cache files related to PKI tokens. (string value)
#signing_dir = <None>
# Optionally specify a list of memcached server(s) to use for caching. If left
# undefined, tokens will instead be cached in-process. (list value)
# Deprecated group/name - [keystone_authtoken]/memcache_servers
#memcached_servers = <None>
# In order to prevent excessive effort spent validating tokens, the middleware
# caches previously-seen tokens for a configurable duration (in seconds). Set to
# -1 to disable caching completely. (integer value)
#token_cache_time = 300
# Determines the frequency at which the list of revoked tokens is retrieved from
# the Identity service (in seconds). A high number of revocation events combined
# with a low cache duration may significantly reduce performance. Only valid for
# PKI tokens. (integer value)
#revocation_cache_time = 10
# (Optional) If defined, indicate whether token data should be authenticated or
# authenticated and encrypted. If MAC, token data is authenticated (with HMAC)
# in the cache. If ENCRYPT, token data is encrypted and authenticated in the
# cache. If the value is not one of these options or empty, auth_token will
# raise an exception on initialization. (string value)
# Allowed values: None, MAC, ENCRYPT
#memcache_security_strategy = None
# (Optional, mandatory if memcache_security_strategy is defined) This string is
# used for key derivation. (string value)
#memcache_secret_key = <None>
# (Optional) Number of seconds memcached server is considered dead before it is
# tried again. (integer value)
#memcache_pool_dead_retry = 300
# (Optional) Maximum total number of open connections to every memcached server.
# (integer value)
#memcache_pool_maxsize = 10
# (Optional) Socket timeout in seconds for communicating with a memcached
# server. (integer value)
#memcache_pool_socket_timeout = 3
# (Optional) Number of seconds a connection to memcached is held unused in the
# pool before it is closed. (integer value)
#memcache_pool_unused_timeout = 60
# (Optional) Number of seconds that an operation will wait to get a memcached
# client connection from the pool. (integer value)
#memcache_pool_conn_get_timeout = 10
# (Optional) Use the advanced (eventlet safe) memcached client pool. The
# advanced pool will only work under python 2.x. (boolean value)
#memcache_use_advanced_pool = false
# (Optional) Indicate whether to set the X-Service-Catalog header. If False,
# middleware will not ask for service catalog on token validation and will not
# set the X-Service-Catalog header. (boolean value)
#include_service_catalog = true
# Used to control the use and type of token binding. Can be set to: "disabled"
# to not check token binding. "permissive" (default) to validate binding
# information if the bind type is of a form known to the server and ignore it if
# not. "strict" like "permissive" but if the bind type is unknown the token will
# be rejected. "required" any form of token binding is needed to be allowed.
# Finally the name of a binding method that must be present in tokens. (string
# value)
#enforce_token_bind = permissive
# If true, the revocation list will be checked for cached tokens. This requires
# that PKI tokens are configured on the identity server. (boolean value)
#check_revocations_for_cached = false
# Hash algorithms to use for hashing PKI tokens. This may be a single algorithm
# or multiple. The algorithms are those supported by Python standard
# hashlib.new(). The hashes will be tried in the order given, so put the
# preferred one first for performance. The result of the first hash will be
# stored in the cache. This will typically be set to multiple values only while
# migrating from a less secure algorithm to a more secure one. Once all the old
# tokens are expired this option should be set to a single value for better
# performance. (list value)
#hash_algorithms = md5
# Authentication type to load (string value)
# Deprecated group/name - [keystone_authtoken]/auth_plugin
#auth_type = <None>
# Config Section from which to load plugin specific options (string value)
#auth_section = <None>
{%- if server.configmap is defined %}
{%- set _data = server.configmap %}
{%- include "oslo_templates/files/configmap/configmap.conf" %}
{%- endif %}