Upload OSLO templates for SF to work with Queens
* Adds some templates from oslo/keystoneauth.
* Adds rendering tests.
* Small cleanup after initial commit.
Change-Id: I8a1e37d132e6befd66d03da1809a95359f836365
diff --git a/LICENSE b/LICENSE
index 952df70..18b4002 100644
--- a/LICENSE
+++ b/LICENSE
@@ -1,4 +1,4 @@
-Copyright (c) 2018 OSCore Team
+Copyright (c) 2018 Mirantis Inc
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -10,4 +10,4 @@
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
-limitations under the License.
\ No newline at end of file
+limitations under the License.
diff --git a/README.rst b/README.rst
index ef39d43..7b13b0b 100644
--- a/README.rst
+++ b/README.rst
@@ -1,21 +1,9 @@
-==================================
+======================
Oslo_templates Formula
-==================================
+======================
-Service oslo-templates description
-
-
-Sample Metadata
-===============
-
-Single oslo_templates service
-
-.. code-block:: yaml
-
- oslo_templates:
- template:
- enabled: true
+Contains common config templates used by different OpenStack services.
References
@@ -33,26 +21,3 @@
available online at:
http://salt-formulas.readthedocs.io/
-
-In the unfortunate event that bugs are discovered, they should be reported to
-the appropriate issue tracker. Use GitHub issue tracker for specific salt
-formula:
-
- https://github.com/salt-formulas/salt-formula-oslo-templates/issues
-
-For feature requests, bug reports or blueprints affecting entire ecosystem,
-use Launchpad salt-formulas project:
-
- https://launchpad.net/salt-formulas
-
-Developers wishing to work on the salt-formulas projects should always base
-their work on master branch and submit pull request against specific formula.
-
-You should also subscribe to mailing list (salt-formulas@freelists.org):
-
- https://www.freelists.org/list/salt-formulas
-
-Any questions or feedback is always welcome so feel free to join our IRC
-channel:
-
- #salt-formulas @ irc.freenode.net
diff --git a/debian/copyright b/debian/copyright
index 02b1808..b5a7d64 100644
--- a/debian/copyright
+++ b/debian/copyright
@@ -4,9 +4,9 @@
Source: https://github.com/salt-formulas/salt-formula-oslo-templates
Files: *
-Copyright: 2018 OSCore Team
+Copyright: 2018 Mirantis Inc
License: Apache-2.0
- Copyright (C) 2018 OSCore Team
+ Copyright (C) 2018 Mirantis Inc
.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
diff --git a/doc/source/conf.py b/doc/source/conf.py
index 70ff525..510e8de 100644
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -35,8 +35,8 @@
master_doc = 'index'
# General information about the project.
-project = u'salt-formula-oslo_templates'
-copyright = u'2016, OSCore Team'
+project = u'salt-formula-oslo-templates'
+copyright = u'2018, Mirantis Inc'
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
diff --git a/metadata/service/oslo/_auth_type_password.conf b/metadata/service/oslo/_auth_type_password.conf
deleted file mode 100644
index 1ad0414..0000000
--- a/metadata/service/oslo/_auth_type_password.conf
+++ /dev/null
@@ -1,77 +0,0 @@
-{%- if _service_catalog.get('engine') == 'keystone' %}
-www_authenticate_uri = {{ _service_catalog.get('protocol', 'http') }}://{{ _service_catalog.host }}:{{ _service_catalog.port }}
-auth_url = {{ _service_catalog.get('protocol', 'http') }}://{{ _service_catalog.host }}:{{ _service_catalog.port }}/identity
-
-{%- if _service_catalog.get('protocol', 'http') == 'https' %}
-cafile={{ _service_catalog.get('cacert_file', cfg.cacert_file) }}
-{%- endif %}
-
-{%- if _service_catalog.get('default_domain_id') %}
-default_domain_id = {{ _service_catalog.default_domain_id }}
-{%- endif %}
-
-{%- if _service_catalog.get('default_domain_name') %}
-default_domain_name = {{ _service_catalog.default_domain_name }}
-{%- endif %}
-
-{%- if _service_catalog.get('domain_id') %}
-domain_id = {{ _service_catalog.domain_id }}
-{%- endif %}
-
-{%- if _service_catalog.get('domain_name') %}
-domain_name = {{ _service_catalog.domain_name }}
-{%- endif %}
-
-{%- if _service_catalog.get('password') %}
-password = {{ _service_catalog.password }}
-{%- endif %}
-
-{%- if _service_catalog.get('project_domain_id') %}
-project_domain_id = {{ _service_catalog.project_domain_id }}
-{%- endif %}
-
-{%- if _service_catalog.get('project_domain_name') %}
-project_domain_name = {{ _service_catalog.project_domain_name }}
-{%- endif %}
-
-{%- if _service_catalog.get('project_id') %}
-project_id = {{ _service_catalog.project_id }}
-{%- endif %}
-
-{%- if _service_catalog.get('project_name') %}
-project_name = {{ _service_catalog.project_name }}
-{%- endif %}
-
-{%- if _service_catalog.get('tenant_id') %}
-tenant_id = {{ _service_catalog.tenant_id }}
-{%- endif %}
-
-{%- if _service_catalog.get('tenant_name') %}
-tenant_name = {{ _service_catalog.tenant_name }}
-{%- endif %}
-
-{%- if _service_catalog.get('user_domain_id') %}
-user_domain_id = {{ _service_catalog.user_domain_id }}
-{%- endif %}
-
-{%- if _service_catalog.get('user_domain_name') %}
-user_domain_name = {{ _service_catalog.user_domain_name }}
-{%- endif %}
-
-{%- if _service_catalog.get('user_id') %}
-user_id = {{ _service_catalog.user_id }}
-{%- endif %}
-
-{%- if _service_catalog.get('user') %}
-username = {{ _service_catalog.user }}
-{%- endif %}
-
-{%- if _service_catalog.get('version') %}
-auth_version = {{ _service_catalog.version }}
-{%- endif %}
-
-{%- if _service_catalog.get('memcached_servers') %}
-memcached_servers = {{ _service_catalog.memcached_servers }}
-{%- endif %}
-
-{%- endif %}
diff --git a/oslo_templates/files/oslo_templates.conf b/oslo_templates/files/oslo_templates.conf
deleted file mode 100644
index 44e3449..0000000
--- a/oslo_templates/files/oslo_templates.conf
+++ /dev/null
@@ -1 +0,0 @@
-# Service config file
\ No newline at end of file
diff --git a/oslo_templates/files/queens/keystoneauth/_type_password.conf b/oslo_templates/files/queens/keystoneauth/_type_password.conf
new file mode 100644
index 0000000..8b84cc6
--- /dev/null
+++ b/oslo_templates/files/queens/keystoneauth/_type_password.conf
@@ -0,0 +1,179 @@
+
+#
+# From keystonemiddleware.auth_token
+#
+
+# Complete "public" Identity API endpoint. This endpoint should not be an
+# "admin" endpoint, as it should be accessible by all end users. Unauthenticated
+# clients are redirected to this endpoint to authenticate. Although this
+# endpoint should ideally be unversioned, client support in the wild varies. If
+# you're using a versioned v2 endpoint here, then this should *not* be the same
+# endpoint the service user utilizes for validating tokens, because normal end
+# users may not be able to reach that endpoint. (string value)
+# Deprecated group/name - [keystone_authtoken]/auth_uri
+www_authenticate_uri = {{ _data.get('protocol', 'http') }}://{{ _data.host }}:{{ _data.port }}
+
+# API version of the admin Identity API endpoint. (string value)
+#auth_version = <None>
+{%- if _data.get('version') %}
+auth_version = {{ _data.version }}
+{%- endif %}
+
+# Required if identity server requires client certificate (string value)
+#certfile = <None>
+
+# Required if identity server requires client certificate (string value)
+#keyfile = <None>
+
+# A PEM encoded Certificate Authority to use when verifying HTTPs connections.
+# Defaults to system CAs. (string value)
+#cafile = <None>
+{%- if _data.get('protocol', 'http') == 'https' %}
+cafile={{ _data.get('cacert_file', cfg.cacert_file) }}
+{%- endif %}
+
+# Verify HTTPS connections. (boolean value)
+#insecure = false
+
+# Timeout value for http requests (integer value)
+#timeout = <None>
+
+# Authentication type to load (string value)
+# Deprecated group/name - [keystone_authtoken]/auth_plugin
+#auth_type = <None>
+{%- if _data.get('auth_type', 'password') %}
+auth_type = {{ _data.get('auth_type', 'password') }}
+{%- endif %}
+
+# Config Section from which to load plugin specific options (string value)
+#auth_section = <None>
+
+
+# DECATED: Complete "public" Identity API endpoint. This endpoint should not
+# be an "admin" endpoint, as it should be accessible by all end users.
+# Unauthenticated clients are redirected to this endpoint to authenticate.
+# Although this endpoint should ideally be unversioned, client support in the
+# wild varies. If you're using a versioned v2 endpoint here, then this should
+# *not* be the same endpoint the service user utilizes for validating tokens,
+# because normal end users may not be able to reach that endpoint. This option
+# is deprecated in favor of www_authenticate_uri and will be removed in the S
+# release. (string value)
+# This option is deprecated for removal since Queens.
+# Its value may be silently ignored in the future.
+# Reason: The auth_uri option is deprecated in favor of www_authenticate_uri and
+# will be removed in the S release.
+auth_url = {{ _data.get('protocol', 'http') }}://{{ _data.host }}:{{ _data.port }}/identity
+
+# Scope for system operations (string value)
+#system_scope = <None>
+
+# Domain ID to scope to (string value)
+#domain_id = <None>
+{%- if _data.get('domain_id') %}
+domain_id = {{ _data.domain_id }}
+{%- endif %}
+
+# Domain name to scope to (string value)
+#domain_name = <None>
+{%- if _data.get('domain_name') %}
+domain_name = {{ _data.domain_name }}
+{%- endif %}
+
+# Project ID to scope to (string value)
+#project_id = <None>
+{%- if _data.get('project_id') %}
+project_id = {{ _data.project_id }}
+{%- endif %}
+
+# Project name to scope to (string value)
+#project_name = <None>
+{%- if _data.get('project_name') %}
+project_name = {{ _data.project_name }}
+{%- endif %}
+
+# Domain ID containing project (string value)
+#project_domain_id = <None>
+{%- if _data.get('project_domain_id','default') %}
+project_domain_id = {{ _data.get('project_domain_id','default') }}
+{%- endif %}
+
+# Domain name containing project (string value)
+#project_domain_name = <None>
+{%- if _data.get('project_domain_name') %}
+project_domain_name = {{ _data.project_domain_name }}
+{%- endif %}
+
+# Trust ID (string value)
+#trust_id = <None>
+
+# Optional domain ID to use with v3 and v2 parameters. It will be used for both
+# the user and project domain in v3 and ignored in v2 authentication. (string
+# value)
+{%- if _data.get('default_domain_id') %}
+default_domain_id = {{ _data.default_domain_id }}
+{%- else %}
+#default_domain_id = <None>
+{%- endif %}
+
+# Optional domain name to use with v3 API and v2 parameters. It will be used for
+# both the user and project domain in v3 and ignored in v2 authentication.
+# (string value)
+#default_domain_name = <None>
+{%- if _data.get('default_domain_name') %}
+default_domain_name = {{ _data.default_domain_name }}
+{%- endif %}
+
+# User ID (string value)
+#user_id = <None>
+{%- if _data.get('user_id') %}
+user_id = {{ _data.user_id }}
+{%- endif %}
+
+# Username (string value)
+# Deprecated group/name - [neutron]/user_name
+#username = <None>
+{%- if _data.get('user') %}
+username = {{ _data.user }}
+{%- endif %}
+
+# User's domain id (string value)
+#user_domain_id = <None>
+{%- if _data.get('user_domain_id','default') %}
+user_domain_id = {{ _data.get('user_domain_id','default') }}
+{%- endif %}
+
+# User's domain name (string value)
+#user_domain_name = <None>
+{%- if _data.get('user_domain_name') %}
+user_domain_name = {{ _data.user_domain_name }}
+{%- endif %}
+
+# User's password (string value)
+#password = <None>
+{%- if _data.get('password') %}
+password = {{ _data.password }}
+{%- endif %}
+
+# Tenant ID (string value)
+#tenant_id = <None>
+{%- if _data.get('tenant_id') %}
+tenant_id = {{ _data.tenant_id }}
+{%- endif %}
+
+# Tenant Name (string value)
+#tenant_name = <None>
+{%- if _data.get('tenant_name') %}
+tenant_name = {{ _data.tenant_name }}
+{%- endif %}
+
+# The default service_type for endpoint URL discovery. (string value)
+#service_type = network
+
+# The default service_name for endpoint URL discovery. (string value)
+#service_name = <None>
+
+# List of interfaces, in order of preference, for endpoint URL. (list value)
+#valid_interfaces = internal,public
+
+# The default region_name for endpoint URL discovery. (string value)
+#region_name = <None>
diff --git a/oslo_templates/files/queens/oslo/_cache.conf b/oslo_templates/files/queens/oslo/_cache.conf
new file mode 100644
index 0000000..6ae46ad
--- /dev/null
+++ b/oslo_templates/files/queens/oslo/_cache.conf
@@ -0,0 +1,90 @@
+
+#
+# From oslo.cache
+#
+
+# Prefix for building the configuration dictionary for the cache
+# region. This should not need to be changed unless there is another
+# dogpile.cache region with the same configuration name. (string
+# value)
+#config_prefix = cache.oslo
+
+# Default TTL, in seconds, for any cached item in the dogpile.cache
+# region. This applies to any cached method that doesn't have an
+# explicit cache expiration time defined for it. (integer value)
+#expiration_time = 600
+
+# Cache backend module. For eventlet-based or environments with
+# hundreds of threaded servers, Memcache with pooling
+# (oslo_cache.memcache_pool) is recommended. For environments with
+# less than 100 threaded servers, Memcached (dogpile.cache.memcached)
+# or Redis (dogpile.cache.redis) is recommended. Test environments
+# with a single instance of the server can use the
+# dogpile.cache.memory backend. (string value)
+# Possible values:
+# oslo_cache.memcache_pool - <No description provided>
+# oslo_cache.dict - <No description provided>
+# oslo_cache.mongo - <No description provided>
+# oslo_cache.etcd3gw - <No description provided>
+# dogpile.cache.memcached - <No description provided>
+# dogpile.cache.pylibmc - <No description provided>
+# dogpile.cache.bmemcached - <No description provided>
+# dogpile.cache.dbm - <No description provided>
+# dogpile.cache.redis - <No description provided>
+# dogpile.cache.memory - <No description provided>
+# dogpile.cache.memory_pickle - <No description provided>
+# dogpile.cache.null - <No description provided>
+#backend = dogpile.cache.null
+
+# Arguments supplied to the backend module. Specify this option once
+# per argument to be passed to the dogpile.cache backend. Example
+# format: "<argname>:<value>". (multi valued)
+#backend_argument =
+
+# Proxy classes to import that will affect the way the dogpile.cache
+# backend functions. See the dogpile.cache documentation on changing-
+# backend-behavior. (list value)
+#proxies =
+
+# Global toggle for caching. (boolean value)
+#enabled = false
+
+# Extra debugging from the cache backend (cache keys,
+# get/set/delete/etc calls). This is only really useful if you need to
+# see the specific cache-backend get/set/delete calls with the
+# keys/values. Typically this should be left set to false. (boolean
+# value)
+#debug_cache_backend = false
+
+# Memcache servers in the format of "host:port".
+# (dogpile.cache.memcache and oslo_cache.memcache_pool backends only).
+# (list value)
+#memcache_servers = localhost:11211
+{%- if _data.servers is defined %}
+memcached_servers = {% for member,data in _data.servers.iteritems() %}{% if data.get('enabled', False) %}{{ member }}:{{ data.port }}{% if not loop.last %},{% endif %}{% endif %}{%- endfor %}
+{%- elif _data.members is defined %}
+memcache_servers = {%- for member in _data.members %}{{ member.host }}:{{ member.port }}{% if not loop.last %},{% endif %}{%- endfor %}
+{%- endif %}
+
+# Number of seconds memcached server is considered dead before it is
+# tried again. (dogpile.cache.memcache and oslo_cache.memcache_pool
+# backends only). (integer value)
+#memcache_dead_retry = 300
+
+# Timeout in seconds for every call to a server.
+# (dogpile.cache.memcache and oslo_cache.memcache_pool backends only).
+# (integer value)
+#memcache_socket_timeout = 3
+
+# Max total number of open connections to every memcached server.
+# (oslo_cache.memcache_pool backend only). (integer value)
+#memcache_pool_maxsize = 10
+
+# Number of seconds a connection to memcached is held unused in the
+# pool before it is closed. (oslo_cache.memcache_pool backend only).
+# (integer value)
+#memcache_pool_unused_timeout = 60
+
+# Number of seconds that an operation will wait to get a memcache
+# client connection. (integer value)
+#memcache_pool_connection_get_timeout = 10
diff --git a/oslo_templates/files/queens/oslo/_concurrency.conf b/oslo_templates/files/queens/oslo/_concurrency.conf
new file mode 100644
index 0000000..856ae0d
--- /dev/null
+++ b/oslo_templates/files/queens/oslo/_concurrency.conf
@@ -0,0 +1,18 @@
+
+#
+# From oslo.concurrency
+#
+
+# Enables or disables inter-process locks. (boolean value)
+#disable_process_locking = false
+{%- if _data.disable_process_locking is defined %}
+disable_process_locking = {{ _data.disable_process_locking }}
+{%- endif %}
+
+# Directory to use for lock files. For security, the specified
+# directory should only be writable by the user running the processes
+# that need locking. Defaults to environment variable OSLO_LOCK_PATH.
+# If OSLO_LOCK_PATH is not set in the environment, use the Python
+# tempfile.gettempdir function to find a suitable location. If
+# external locks are used, a lock path must be set. (string value)
+#lock_path = /tmp
diff --git a/oslo_templates/files/queens/oslo/_cors.conf b/oslo_templates/files/queens/oslo/_cors.conf
new file mode 100644
index 0000000..c0c3f37
--- /dev/null
+++ b/oslo_templates/files/queens/oslo/_cors.conf
@@ -0,0 +1,42 @@
+
+#
+# From oslo.middleware
+#
+
+# Indicate whether this resource may be shared with the domain
+# received in the requests "origin" header. Format:
+# "<protocol>://<host>[:<port>]", no trailing slash. Example:
+# https://horizon.example.com (list value)
+{%- if _data.allowed_origin is defined %}
+allowed_origin = {{ _data.allowed_origin }}
+{%- endif %}
+
+# Indicate that the actual request can include user credentials
+# (boolean value)
+{%- if _data.allow_credentials is defined %}
+allow_credentials = {{ _data.allow_credentials }}
+{%- endif %}
+
+# Indicate which headers are safe to expose to the API. Defaults to
+# HTTP Simple Headers. (list value)
+{%- if _data.expose_headers is defined %}
+expose_headers = {{ _data.expose_headers }}
+{%- endif %}
+
+# Maximum cache age of CORS preflight requests. (integer value)
+{%- if _data.max_age is defined %}
+max_age = {{ _data.max_age }}
+{%- endif %}
+
+# Indicate which methods can be used during the actual request. (list
+# value)
+{%- if _data.allow_methods is defined %}
+allow_methods = {{ _data.allow_methods }}
+{%- endif %}
+
+# Indicate which header field names may be used during the actual
+# request. (list value)
+{%- if _data.allow_headers is defined %}
+allow_headers = {{ _data.allow_headers }}
+{%- endif %}
+
diff --git a/oslo_templates/files/queens/oslo/_database.conf b/oslo_templates/files/queens/oslo/_database.conf
new file mode 100644
index 0000000..1519a50
--- /dev/null
+++ b/oslo_templates/files/queens/oslo/_database.conf
@@ -0,0 +1,130 @@
+
+#
+# From oslo.db
+#
+
+# If True, SQLite uses synchronous mode. (boolean value)
+#sqlite_synchronous = true
+
+# The back end to use for the database. (string value)
+# Deprecated group/name - [DEFAULT]/db_backend
+#backend = sqlalchemy
+
+# The SQLAlchemy connection string to use to connect to the database.
+# (string value)
+# Deprecated group/name - [DEFAULT]/sql_connection
+# Deprecated group/name - [DATABASE]/sql_connection
+# Deprecated group/name - [sql]/connection
+#connection = <None>
+connection = {{ _data.engine }}+pymysql://{{ _data.user }}:{{ _data.password }}@{{ _data.host }}/{{ _data.name }}?charset=utf8{%- if _data.get('ssl',{}).get('enabled',False) %}&ssl_ca={{ _data.ssl.cacert_file}}{% endif %}
+
+# The SQLAlchemy connection string to use to connect to the slave
+# database. (string value)
+#slave_connection = <None>
+
+# The SQL mode to be used for MySQL sessions. This option, including
+# the default, overrides any server-set SQL mode. To use whatever SQL
+# mode is set by the server configuration, set this to no value.
+# Example: mysql_sql_mode= (string value)
+#mysql_sql_mode = TRADITIONAL
+
+# If True, transparently enables support for handling MySQL Cluster
+# (NDB). (boolean value)
+#mysql_enable_ndb = false
+
+# Connections which have been present in the connection pool longer
+# than this number of seconds will be replaced with a new one the next
+# time they are checked out from the pool. (integer value)
+# Deprecated group/name - [DATABASE]/idle_timeout
+# Deprecated group/name - [database]/idle_timeout
+# Deprecated group/name - [DEFAULT]/sql_idle_timeout
+# Deprecated group/name - [DATABASE]/sql_idle_timeout
+# Deprecated group/name - [sql]/idle_timeout
+#connection_recycle_time = 3600
+{%- if _data.connection_recycle_time is defined %}
+connection_recycle_time = {{ _data.connection_recycle_time }}
+{%- endif %}
+
+# Minimum number of SQL connections to keep open in a pool. (integer
+# value)
+# Deprecated group/name - [DEFAULT]/sql_min_pool_size
+# Deprecated group/name - [DATABASE]/sql_min_pool_size
+#min_pool_size = 1
+
+# Maximum number of SQL connections to keep open in a pool. Setting a
+# value of 0 indicates no limit. (integer value)
+# Deprecated group/name - [DEFAULT]/sql_max_pool_size
+# Deprecated group/name - [DATABASE]/sql_max_pool_size
+#max_pool_size = 5
+{%- if _data.get('max_pool_size', 10) %}
+max_pool_size = {{ _data.get('max_pool_size', 10) }}
+{%- endif %}
+
+# Maximum number of database connection retries during startup. Set to
+# -1 to specify an infinite retry count. (integer value)
+# Deprecated group/name - [DEFAULT]/sql_max_retries
+# Deprecated group/name - [DATABASE]/sql_max_retries
+#max_retries = 10
+{%- if _data.get('max_retries', -1) %}
+max_retries = {{ _data.get('max_retries', -1) }}
+{%- endif %}
+
+# Interval between retries of opening a SQL connection. (integer
+# value)
+# Deprecated group/name - [DEFAULT]/sql_retry_interval
+# Deprecated group/name - [DATABASE]/reconnect_interval
+#retry_interval = 10
+
+# If set, use this value for max_overflow with SQLAlchemy. (integer
+# value)
+# Deprecated group/name - [DEFAULT]/sql_max_overflow
+# Deprecated group/name - [DATABASE]/sqlalchemy_max_overflow
+#max_overflow = 50
+{%- if _data.get('max_overflow', 30) %}
+max_overflow = {{ _data.get('max_overflow', 30) }}
+{%- endif %}
+
+# Verbosity of SQL debugging information: 0=None, 100=Everything.
+# (integer value)
+# Minimum value: 0
+# Maximum value: 100
+# Deprecated group/name - [DEFAULT]/sql_connection_debug
+#connection_debug = 0
+
+# Add Python stack traces to SQL as comment strings. (boolean value)
+# Deprecated group/name - [DEFAULT]/sql_connection_trace
+#connection_trace = false
+
+# If set, use this value for pool_timeout with SQLAlchemy. (integer
+# value)
+# Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout
+#pool_timeout = <None>
+
+# Enable the experimental use of database reconnect on connection
+# lost. (boolean value)
+#use_db_reconnect = false
+
+# Seconds between retries of a database transaction. (integer value)
+#db_retry_interval = 1
+
+# If True, increases the interval between retries of a database
+# operation up to db_max_retry_interval. (boolean value)
+#db_inc_retry_interval = true
+
+# If db_inc_retry_interval is set, the maximum seconds between retries
+# of a database operation. (integer value)
+#db_max_retry_interval = 10
+
+# Maximum retries in case of connection error or deadlock error before
+# error is raised. Set to -1 to specify an infinite retry count.
+# (integer value)
+#db_max_retries = 20
+
+#
+# From oslo.db.concurrency
+#
+
+# Enable the experimental use of thread pooling for all DB API calls
+# (boolean value)
+# Deprecated group/name - [DEFAULT]/dbapi_use_tpool
+#use_tpool = false
diff --git a/oslo_templates/files/queens/oslo/_healthcheck.conf b/oslo_templates/files/queens/oslo/_healthcheck.conf
new file mode 100644
index 0000000..2b40f3b
--- /dev/null
+++ b/oslo_templates/files/queens/oslo/_healthcheck.conf
@@ -0,0 +1,32 @@
+
+#
+# From oslo.middleware
+#
+
+# DEPRECATED: The path to respond to healtcheck requests on. (string
+# value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+#path = /healthcheck
+{%- if _data.path is defined %}
+path = {{ _data.path }}
+{%- endif %}
+
+# Show more detailed information as part of the response (boolean
+# value)
+#detailed = false
+
+# Additional backends that can perform health checks and report that
+# information back as part of a request. (list value)
+#backends =
+
+# Check the presence of a file to determine if an application is
+# running on a port. Used by DisableByFileHealthcheck plugin. (string
+# value)
+#disable_by_file_path = <None>
+
+# Check the presence of a file based on a port to determine if an
+# application is running on a port. Expects a "port:path" list of
+# strings. Used by DisableByFilesPortsHealthcheck plugin. (list value)
+#disable_by_file_paths =
+
diff --git a/oslo_templates/files/queens/oslo/_log.conf b/oslo_templates/files/queens/oslo/_log.conf
new file mode 100644
index 0000000..8c5fc97
--- /dev/null
+++ b/oslo_templates/files/queens/oslo/_log.conf
@@ -0,0 +1,132 @@
+
+#
+# From oslo.log
+#
+
+# If set to true, the logging level will be set to DEBUG instead of
+# the default INFO level. (boolean value)
+# Note: This option can be changed without restarting.
+#debug = false
+{%- if _data.debug is defined %}
+debug = {{ _data.debug }}
+{%- endif %}
+
+# The name of a logging configuration file. This file is appended to
+# any existing logging configuration files. For details about logging
+# configuration files, see the Python logging module documentation.
+# Note that when logging configuration files are used then all logging
+# configuration is set in the configuration file and other logging
+# configuration options are ignored (for example,
+# logging_context_format_string). (string value)
+# Note: This option can be changed without restarting.
+# Deprecated group/name - [DEFAULT]/log_config
+#log_config_append = <None>
+
+# Defines the format string for %%(asctime)s in log records. Default:
+# %(default)s . This option is ignored if log_config_append is set.
+# (string value)
+#log_date_format = %Y-%m-%d %H:%M:%S
+
+# (Optional) Name of log file to send logging output to. If no default
+# is set, logging will go to stderr as defined by use_stderr. This
+# option is ignored if log_config_append is set. (string value)
+# Deprecated group/name - [DEFAULT]/logfile
+#log_file = <None>
+{%- if _data.log_file is defined %}
+log_file = {{ _data.log_file }}
+{%- endif %}
+
+# (Optional) The base directory used for relative log_file paths.
+# This option is ignored if log_config_append is set. (string value)
+# Deprecated group/name - [DEFAULT]/logdir
+#log_dir = <None>
+{%- if _data.log_dir is defined %}
+log_dir = {{ _data.log_dir }}
+{%- endif %}
+
+# Uses logging handler designed to watch file system. When log file is
+# moved or removed this handler will open a new log file with
+# specified path instantaneously. It makes sense only if log_file
+# option is specified and Linux platform is used. This option is
+# ignored if log_config_append is set. (boolean value)
+#watch_log_file = false
+
+# Use syslog for logging. Existing syslog format is DEPRECATED and
+# will be changed later to honor RFC5424. This option is ignored if
+# log_config_append is set. (boolean value)
+#use_syslog = false
+{%- if _data.use_syslog is defined %}
+use_syslog = {{ _data.use_syslog }}
+{%- endif %}
+
+# Enable journald for logging. If running in a systemd environment you
+# may wish to enable journal support. Doing so will use the journal
+# native protocol which includes structured metadata in addition to
+# log messages.This option is ignored if log_config_append is set.
+# (boolean value)
+#use_journal = false
+
+# Syslog facility to receive log lines. This option is ignored if
+# log_config_append is set. (string value)
+#syslog_log_facility = LOG_USER
+{%- if _data.syslog_log_facility is defined %}
+syslog_log_facility = {{ _data.syslog_log_facility }}
+{%- endif %}
+
+# Use JSON formatting for logging. This option is ignored if
+# log_config_append is set. (boolean value)
+#use_json = false
+
+# Log output to standard error. This option is ignored if
+# log_config_append is set. (boolean value)
+#use_stderr = false
+
+# Format string to use for log messages with context. (string value)
+#logging_context_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s
+
+# Format string to use for log messages when context is undefined.
+# (string value)
+#logging_default_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s
+
+# Additional data to append to log message when logging level for the
+# message is DEBUG. (string value)
+#logging_debug_format_suffix = %(funcName)s %(pathname)s:%(lineno)d
+
+# Prefix each line of exception output with this format. (string
+# value)
+#logging_exception_prefix = %(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s
+
+# Defines the format string for %(user_identity)s that is used in
+# logging_context_format_string. (string value)
+#logging_user_identity_format = %(user)s %(tenant)s %(domain)s %(user_domain)s %(project_domain)s
+
+# List of package logging levels in logger=LEVEL pairs. This option is
+# ignored if log_config_append is set. (list value)
+#default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,oslo_messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN,keystoneauth=WARN,oslo.cache=INFO,dogpile.core.dogpile=INFO
+
+# Enables or disables publication of error events. (boolean value)
+#publish_errors = false
+
+# The format for an instance that is passed with the log message.
+# (string value)
+#instance_format = "[instance: %(uuid)s] "
+
+# The format for an instance UUID that is passed with the log message.
+# (string value)
+#instance_uuid_format = "[instance: %(uuid)s] "
+
+# Interval, number of seconds, of log rate limiting. (integer value)
+#rate_limit_interval = 0
+
+# Maximum number of logged messages per rate_limit_interval. (integer
+# value)
+#rate_limit_burst = 0
+
+# Log level name used by rate limiting: CRITICAL, ERROR, INFO,
+# WARNING, DEBUG or empty string. Logs with level greater or equal to
+# rate_limit_except_level are not filtered. An empty string means that
+# all levels are filtered. (string value)
+#rate_limit_except_level = CRITICAL
+
+# Enables or disables fatal status of deprecations. (boolean value)
+#fatal_deprecations = false
diff --git a/oslo_templates/files/queens/oslo/_middleware.conf b/oslo_templates/files/queens/oslo/_middleware.conf
new file mode 100644
index 0000000..5a9f0e4
--- /dev/null
+++ b/oslo_templates/files/queens/oslo/_middleware.conf
@@ -0,0 +1,25 @@
+
+#
+# From oslo.middleware
+#
+
+# The maximum body size for each request, in bytes. (integer value)
+# Deprecated group/name - [DEFAULT]/osapi_max_request_body_size
+# Deprecated group/name - [DEFAULT]/max_request_body_size
+#max_request_body_size = 114688
+{%- if _data.max_request_body_size is defined %}
+max_request_body_size= {{ _data.max_request_body_size }}
+{%- endif %}
+
+# DEPRECATED: The HTTP Header that will be used to determine what the
+# original request protocol scheme was, even if it was hidden by a SSL
+# termination proxy. (string value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+#secure_proxy_ssl_header = X-Forwarded-Proto
+
+# Whether the application is behind a proxy or not. This determines if
+# the middleware should parse the headers or not. (boolean value)
+{%- if _data.enable_proxy_headers_parsing is defined %}
+enable_proxy_headers_parsing = {{ _data.enable_proxy_headers_parsing }}
+{%- endif %}
diff --git a/oslo_templates/files/queens/oslo/_osprofiler.conf b/oslo_templates/files/queens/oslo/_osprofiler.conf
new file mode 100644
index 0000000..03bc340
--- /dev/null
+++ b/oslo_templates/files/queens/oslo/_osprofiler.conf
@@ -0,0 +1,117 @@
+
+#
+# From osprofiler
+#
+
+#
+# Enables the profiling for all services on this node. Default value
+# is False
+# (fully disable the profiling feature).
+#
+# Possible values:
+#
+# * True: Enables the feature
+# * False: Disables the feature. The profiling cannot be started via
+# this project
+# operations. If the profiling is triggered by another project, this
+# project part
+# will be empty.
+# (boolean value)
+# Deprecated group/name - [profiler]/profiler_enabled
+#enabled = false
+{%- if _data.enabled is defined %}
+enabled = {{ _data.enabled }}
+{%- endif %}
+
+#
+# Enables SQL requests profiling in services. Default value is False
+# (SQL
+# requests won't be traced).
+#
+# Possible values:
+#
+# * True: Enables SQL requests profiling. Each SQL query will be part
+# of the
+# trace and can the be analyzed by how much time was spent for that.
+# * False: Disables SQL requests profiling. The spent time is only
+# shown on a
+# higher level of operations. Single SQL queries cannot be analyzed
+# this
+# way.
+# (boolean value)
+#trace_sqlalchemy = false
+
+#
+# Secret key(s) to use for encrypting context data for performance
+# profiling.
+# This string value should have the following format:
+# <key1>[,<key2>,...<keyn>],
+# where each key is some random string. A user who triggers the
+# profiling via
+# the REST API has to set one of these keys in the headers of the REST
+# API call
+# to include profiling results of this node for this particular
+# project.
+#
+# Both "enabled" flag and "hmac_keys" config options should be set to
+# enable
+# profiling. Also, to generate correct profiling information across
+# all services
+# at least one key needs to be consistent between OpenStack projects.
+# This
+# ensures it can be used from client side to generate the trace,
+# containing
+# information from all possible resources. (string value)
+#hmac_keys = SECRET_KEY
+
+#
+# Connection string for a notifier backend. Default value is
+# messaging:// which
+# sets the notifier to oslo_messaging.
+#
+# Examples of possible values:
+#
+# * messaging://: use oslo_messaging driver for sending notifications.
+# * mongodb://127.0.0.1:27017 : use mongodb driver for sending
+# notifications.
+# * elasticsearch://127.0.0.1:9200 : use elasticsearch driver for
+# sending
+# notifications.
+# (string value)
+#connection_string = messaging://
+
+#
+# Document type for notification indexing in elasticsearch.
+# (string value)
+#es_doc_type = notification
+
+#
+# This parameter is a time value parameter (for example:
+# es_scroll_time=2m),
+# indicating for how long the nodes that participate in the search
+# will maintain
+# relevant resources in order to continue and support it.
+# (string value)
+#es_scroll_time = 2m
+
+#
+# Elasticsearch splits large requests in batches. This parameter
+# defines
+# maximum size of each batch (for example: es_scroll_size=10000).
+# (integer value)
+#es_scroll_size = 10000
+
+#
+# Redissentinel provides a timeout option on the connections.
+# This parameter defines that timeout (for example:
+# socket_timeout=0.1).
+# (floating point value)
+#socket_timeout = 0.1
+
+#
+# Redissentinel uses a service name to identify a master redis
+# service.
+# This parameter defines the name (for example:
+# sentinal_service_name=mymaster).
+# (string value)
+#sentinel_service_name = mymaster
diff --git a/oslo_templates/files/queens/oslo/_policy.conf b/oslo_templates/files/queens/oslo/_policy.conf
new file mode 100644
index 0000000..8b535eb
--- /dev/null
+++ b/oslo_templates/files/queens/oslo/_policy.conf
@@ -0,0 +1,51 @@
+
+#
+# From oslo.policy
+#
+
+# This option controls whether or not to enforce scope when evaluating
+# policies. If ``True``, the scope of the token used in the request is
+# compared to the ``scope_types`` of the policy being enforced. If the
+# scopes do not match, an ``InvalidScope`` exception will be raised.
+# If ``False``, a message will be logged informing operators that
+# policies are being invoked with mismatching scope. (boolean value)
+#enforce_scope = false
+
+# The file that defines policies. (string value)
+#policy_file = policy.json
+{%- if _data.policy_file is defined %}
+policy_file = {{ _data.policy_file }}
+{%- endif %}
+
+# Default rule. Enforced when a requested rule is not found. (string
+# value)
+#policy_default_rule = default
+
+# Directories where policy configuration files are stored. They can be
+# relative to any directory in the search path defined by the
+# config_dir option, or absolute paths. The file defined by
+# policy_file must exist for these directories to be searched.
+# Missing or empty directories are ignored. (multi valued)
+#policy_dirs = policy.d
+
+# Content Type to send and receive data for REST based policy check
+# (string value)
+# Possible values:
+# application/x-www-form-urlencoded - <No description provided>
+# application/json - <No description provided>
+#remote_content_type = application/x-www-form-urlencoded
+
+# server identity verification for REST based policy check (boolean
+# value)
+#remote_ssl_verify_server_crt = false
+
+# Absolute path to ca cert file for REST based policy check (string
+# value)
+#remote_ssl_ca_crt_file = <None>
+
+# Absolute path to client cert for REST based policy check (string
+# value)
+#remote_ssl_client_crt_file = <None>
+
+# Absolute path client key file REST based policy check (string value)
+#remote_ssl_client_key_file = <None>
diff --git a/oslo_templates/files/queens/oslo/messaging/_amqp.conf b/oslo_templates/files/queens/oslo/messaging/_amqp.conf
new file mode 100644
index 0000000..59b43cc
--- /dev/null
+++ b/oslo_templates/files/queens/oslo/messaging/_amqp.conf
@@ -0,0 +1,214 @@
+
+#
+# From oslo.messaging
+#
+
+# Name for the AMQP container. must be globally unique. Defaults to a
+# generated UUID (string value)
+#container_name = <None>
+
+# Timeout for inactive connections (in seconds) (integer value)
+#idle_timeout = 0
+
+# Debug: dump AMQP frames to stdout (boolean value)
+#trace = false
+
+# Attempt to connect via SSL. If no other ssl-related parameters are
+# given, it will use the system's CA-bundle to verify the server's
+# certificate. (boolean value)
+#ssl = false
+
+# CA certificate PEM file used to verify the server's certificate
+# (string value)
+#ssl_ca_file =
+
+# Self-identifying certificate PEM file for client authentication
+# (string value)
+#ssl_cert_file =
+
+# Private key PEM file used to sign ssl_cert_file certificate
+# (optional) (string value)
+#ssl_key_file =
+
+# Password for decrypting ssl_key_file (if encrypted) (string value)
+#ssl_key_password = <None>
+
+# By default SSL checks that the name in the server's certificate
+# matches the hostname in the transport_url. In some configurations it
+# may be preferable to use the virtual hostname instead, for example
+# if the server uses the Server Name Indication TLS extension
+# (rfc6066) to provide a certificate per virtual host. Set
+# ssl_verify_vhost to True if the server's SSL certificate uses the
+# virtual host name instead of the DNS name. (boolean value)
+#ssl_verify_vhost = false
+
+# DEPRECATED: Accept clients using either SSL or plain TCP (boolean
+# value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+# Reason: Not applicable - not a SSL server
+#allow_insecure_clients = false
+
+# Space separated list of acceptable SASL mechanisms (string value)
+#sasl_mechanisms =
+
+# Path to directory that contains the SASL configuration (string
+# value)
+#sasl_config_dir =
+
+# Name of configuration file (without .conf suffix) (string value)
+#sasl_config_name =
+
+# SASL realm to use if no realm present in username (string value)
+#sasl_default_realm =
+
+# DEPRECATED: User name for message broker authentication (string
+# value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+# Reason: Should use configuration option transport_url to provide the
+# username.
+#username =
+
+# DEPRECATED: Password for message broker authentication (string
+# value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+# Reason: Should use configuration option transport_url to provide the
+# password.
+#password =
+
+# Seconds to pause before attempting to re-connect. (integer value)
+# Minimum value: 1
+#connection_retry_interval = 1
+
+# Increase the connection_retry_interval by this many seconds after
+# each unsuccessful failover attempt. (integer value)
+# Minimum value: 0
+#connection_retry_backoff = 2
+
+# Maximum limit for connection_retry_interval +
+# connection_retry_backoff (integer value)
+# Minimum value: 1
+#connection_retry_interval_max = 30
+
+# Time to pause between re-connecting an AMQP 1.0 link that failed due
+# to a recoverable error. (integer value)
+# Minimum value: 1
+#link_retry_delay = 10
+
+# The maximum number of attempts to re-send a reply message which
+# failed due to a recoverable error. (integer value)
+# Minimum value: -1
+#default_reply_retry = 0
+
+# The deadline for an rpc reply message delivery. (integer value)
+# Minimum value: 5
+#default_reply_timeout = 30
+
+# The deadline for an rpc cast or call message delivery. Only used
+# when caller does not provide a timeout expiry. (integer value)
+# Minimum value: 5
+#default_send_timeout = 30
+
+# The deadline for a sent notification message delivery. Only used
+# when caller does not provide a timeout expiry. (integer value)
+# Minimum value: 5
+#default_notify_timeout = 30
+
+# The duration to schedule a purge of idle sender links. Detach link
+# after expiry. (integer value)
+# Minimum value: 1
+#default_sender_link_timeout = 600
+
+# Indicates the addressing mode used by the driver.
+# Permitted values:
+# 'legacy' - use legacy non-routable addressing
+# 'routable' - use routable addresses
+# 'dynamic' - use legacy addresses if the message bus does not
+# support routing otherwise use routable addressing (string value)
+#addressing_mode = dynamic
+
+# Enable virtual host support for those message buses that do not
+# natively support virtual hosting (such as qpidd). When set to true
+# the virtual host name will be added to all message bus addresses,
+# effectively creating a private 'subnet' per virtual host. Set to
+# False if the message bus supports virtual hosting using the
+# 'hostname' field in the AMQP 1.0 Open performative as the name of
+# the virtual host. (boolean value)
+#pseudo_vhost = true
+
+# address prefix used when sending to a specific server (string value)
+#server_request_prefix = exclusive
+
+# address prefix used when broadcasting to all servers (string value)
+#broadcast_prefix = broadcast
+
+# address prefix when sending to any server in group (string value)
+#group_request_prefix = unicast
+
+# Address prefix for all generated RPC addresses (string value)
+#rpc_address_prefix = openstack.org/om/rpc
+
+# Address prefix for all generated Notification addresses (string
+# value)
+#notify_address_prefix = openstack.org/om/notify
+
+# Appended to the address prefix when sending a fanout message. Used
+# by the message bus to identify fanout messages. (string value)
+#multicast_address = multicast
+
+# Appended to the address prefix when sending to a particular
+# RPC/Notification server. Used by the message bus to identify
+# messages sent to a single destination. (string value)
+#unicast_address = unicast
+
+# Appended to the address prefix when sending to a group of consumers.
+# Used by the message bus to identify messages that should be
+# delivered in a round-robin fashion across consumers. (string value)
+#anycast_address = anycast
+
+# Exchange name used in notification addresses.
+# Exchange name resolution precedence:
+# Target.exchange if set
+# else default_notification_exchange if set
+# else control_exchange if set
+# else 'notify' (string value)
+#default_notification_exchange = <None>
+{%- if _data.default_notification_exchange is defined %}
+default_notification_exchange = {{ _data.default_notification_exchange }}
+{%- endif %}
+
+# Exchange name used in RPC addresses.
+# Exchange name resolution precedence:
+# Target.exchange if set
+# else default_rpc_exchange if set
+# else control_exchange if set
+# else 'rpc' (string value)
+#default_rpc_exchange = <None>
+
+# Window size for incoming RPC Reply messages. (integer value)
+# Minimum value: 1
+#reply_link_credit = 200
+
+# Window size for incoming RPC Request messages (integer value)
+# Minimum value: 1
+#rpc_server_credit = 100
+
+# Window size for incoming Notification messages (integer value)
+# Minimum value: 1
+#notify_server_credit = 100
+
+# Send messages of this type pre-settled.
+# Pre-settled messages will not receive acknowledgement
+# from the peer. Note well: pre-settled messages may be
+# silently discarded if the delivery fails.
+# Permitted values:
+# 'rpc-call' - send RPC Calls pre-settled
+# 'rpc-reply'- send RPC Replies pre-settled
+# 'rpc-cast' - Send RPC Casts pre-settled
+# 'notify' - Send Notifications pre-settled
+# (multi valued)
+#pre_settled = rpc-cast
+#pre_settled = rpc-reply
+
diff --git a/oslo_templates/files/queens/oslo/messaging/_default.conf b/oslo_templates/files/queens/oslo/messaging/_default.conf
new file mode 100644
index 0000000..d1231ca
--- /dev/null
+++ b/oslo_templates/files/queens/oslo/messaging/_default.conf
@@ -0,0 +1,212 @@
+
+#
+# From oslo.messaging
+#
+
+# Size of RPC connection pool. (integer value)
+#rpc_conn_pool_size = 30
+
+# The pool size limit for connections expiration policy (integer
+# value)
+#conn_pool_min_size = 2
+
+# The time-to-live in sec of idle connections in the pool (integer
+# value)
+#conn_pool_ttl = 1200
+
+# ZeroMQ bind address. Should be a wildcard (*), an ethernet
+# interface, or IP. The "host" option should point or resolve to this
+# address. (string value)
+#rpc_zmq_bind_address = *
+
+# MatchMaker driver. (string value)
+# Possible values:
+# redis - <No description provided>
+# sentinel - <No description provided>
+# dummy - <No description provided>
+#rpc_zmq_matchmaker = redis
+
+# Number of ZeroMQ contexts, defaults to 1. (integer value)
+#rpc_zmq_contexts = 1
+
+# Maximum number of ingress messages to locally buffer per topic.
+# Default is unlimited. (integer value)
+#rpc_zmq_topic_backlog = <None>
+
+# Directory for holding IPC sockets. (string value)
+#rpc_zmq_ipc_dir = /var/run/openstack
+
+# Name of this node. Must be a valid hostname, FQDN, or IP address.
+# Must match "host" option, if running Nova. (string value)
+#rpc_zmq_host = localhost
+
+# Number of seconds to wait before all pending messages will be sent
+# after closing a socket. The default value of -1 specifies an
+# infinite linger period. The value of 0 specifies no linger period.
+# Pending messages shall be discarded immediately when the socket is
+# closed. Positive values specify an upper bound for the linger
+# period. (integer value)
+# Deprecated group/name - [DEFAULT]/rpc_cast_timeout
+#zmq_linger = -1
+
+# The default number of seconds that poll should wait. Poll raises
+# timeout exception when timeout expired. (integer value)
+#rpc_poll_timeout = 1
+
+# Expiration timeout in seconds of a name service record about
+# existing target ( < 0 means no timeout). (integer value)
+#zmq_target_expire = 300
+
+# Update period in seconds of a name service record about existing
+# target. (integer value)
+#zmq_target_update = 180
+
+# Use PUB/SUB pattern for fanout methods. PUB/SUB always uses proxy.
+# (boolean value)
+#use_pub_sub = false
+
+# Use ROUTER remote proxy. (boolean value)
+#use_router_proxy = false
+
+# This option makes direct connections dynamic or static. It makes
+# sense only with use_router_proxy=False which means to use direct
+# connections for direct message types (ignored otherwise). (boolean
+# value)
+#use_dynamic_connections = false
+
+# How many additional connections to a host will be made for failover
+# reasons. This option is actual only in dynamic connections mode.
+# (integer value)
+#zmq_failover_connections = 2
+
+# Minimal port number for random ports range. (port value)
+# Minimum value: 0
+# Maximum value: 65535
+#rpc_zmq_min_port = 49153
+
+# Maximal port number for random ports range. (integer value)
+# Minimum value: 1
+# Maximum value: 65536
+#rpc_zmq_max_port = 65536
+
+# Number of retries to find free port number before fail with
+# ZMQBindError. (integer value)
+#rpc_zmq_bind_port_retries = 100
+
+# Default serialization mechanism for serializing/deserializing
+# outgoing/incoming messages (string value)
+# Possible values:
+# json - <No description provided>
+# msgpack - <No description provided>
+#rpc_zmq_serialization = json
+
+# This option configures round-robin mode in zmq socket. True means
+# not keeping a queue when server side disconnects. False means to
+# keep queue and messages even if server is disconnected, when the
+# server appears we send all accumulated messages to it. (boolean
+# value)
+#zmq_immediate = true
+
+# Enable/disable TCP keepalive (KA) mechanism. The default value of -1
+# (or any other negative value) means to skip any overrides and leave
+# it to OS default; 0 and 1 (or any other positive value) mean to
+# disable and enable the option respectively. (integer value)
+#zmq_tcp_keepalive = -1
+
+# The duration between two keepalive transmissions in idle condition.
+# The unit is platform dependent, for example, seconds in Linux,
+# milliseconds in Windows etc. The default value of -1 (or any other
+# negative value and 0) means to skip any overrides and leave it to OS
+# default. (integer value)
+#zmq_tcp_keepalive_idle = -1
+
+# The number of retransmissions to be carried out before declaring
+# that remote end is not available. The default value of -1 (or any
+# other negative value and 0) means to skip any overrides and leave it
+# to OS default. (integer value)
+#zmq_tcp_keepalive_cnt = -1
+
+# The duration between two successive keepalive retransmissions, if
+# acknowledgement to the previous keepalive transmission is not
+# received. The unit is platform dependent, for example, seconds in
+# Linux, milliseconds in Windows etc. The default value of -1 (or any
+# other negative value and 0) means to skip any overrides and leave it
+# to OS default. (integer value)
+#zmq_tcp_keepalive_intvl = -1
+
+# Maximum number of (green) threads to work concurrently. (integer
+# value)
+#rpc_thread_pool_size = 100
+
+# Expiration timeout in seconds of a sent/received message after which
+# it is not tracked anymore by a client/server. (integer value)
+#rpc_message_ttl = 300
+
+# Wait for message acknowledgements from receivers. This mechanism
+# works only via proxy without PUB/SUB. (boolean value)
+#rpc_use_acks = false
+
+# Number of seconds to wait for an ack from a cast/call. After each
+# retry attempt this timeout is multiplied by some specified
+# multiplier. (integer value)
+#rpc_ack_timeout_base = 15
+
+# Number to multiply base ack timeout by after each retry attempt.
+# (integer value)
+#rpc_ack_timeout_multiplier = 2
+
+# Default number of message sending attempts in case of any problems
+# occurred: positive value N means at most N retries, 0 means no
+# retries, None or -1 (or any other negative values) mean to retry
+# forever. This option is used only if acknowledgments are enabled.
+# (integer value)
+#rpc_retry_attempts = 3
+
+# List of publisher hosts SubConsumer can subscribe on. This option
+# has higher priority then the default publishers list taken from the
+# matchmaker. (list value)
+#subscribe_on =
+
+# Size of executor thread pool when executor is threading or eventlet.
+# (integer value)
+# Deprecated group/name - [DEFAULT]/rpc_thread_pool_size
+#executor_thread_pool_size = 64
+
+# Seconds to wait for a response from a call. (integer value)
+#rpc_response_timeout = 60
+
+# The network address and optional user credentials for connecting to
+# the messaging backend, in URL format. The expected format is:
+#
+# driver://[user:pass@]host:port[,[userN:passN@]hostN:portN]/virtual_host?query
+#
+# Example: rabbit://rabbitmq:password@127.0.0.1:5672//
+#
+# For full details on the fields in the URL see the documentation of
+# oslo_messaging.TransportURL at
+# https://docs.openstack.org/oslo.messaging/latest/reference/transport.html
+# (string value)
+#transport_url = <None>
+{%- set rabbit_port = _data.get('port', 5671 if _data.get('ssl',{}).get('enabled', False) else 5672) %}
+{%- if _data.members is defined %}
+transport_url = rabbit://{% for member in _data.members -%}
+ {{ _data.user }}:{{ _data.password }}@{{ member.host }}:{{ member.get('port', rabbit_port) }}
+ {%- if not loop.last -%},{%- endif -%}
+ {%- endfor -%}
+ /{{ _data.virtual_host }}
+{%- else %}
+transport_url = rabbit://{{ _data.user }}:{{ _data.password }}@{{ _data.host }}:{{ rabbit_port }}/{{ _data.virtual_host }}
+{%- endif %}
+
+# DEPRECATED: The messaging driver to use, defaults to rabbit. Other
+# drivers include amqp and zmq. (string value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+# Reason: Replaced by [DEFAULT]/transport_url
+#rpc_backend = rabbit
+
+# The default exchange under which topics are scoped. May be
+# overridden by an exchange name specified in the transport_url
+# option. (string value)
+#control_exchange = openstack
+
diff --git a/oslo_templates/files/queens/oslo/messaging/_kafka.conf b/oslo_templates/files/queens/oslo/messaging/_kafka.conf
new file mode 100644
index 0000000..a0fb3e6
--- /dev/null
+++ b/oslo_templates/files/queens/oslo/messaging/_kafka.conf
@@ -0,0 +1,56 @@
+
+#
+# From oslo.messaging
+#
+
+# DEPRECATED: Default Kafka broker Host (string value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+# Reason: Replaced by [DEFAULT]/transport_url
+#kafka_default_host = localhost
+
+# DEPRECATED: Default Kafka broker Port (port value)
+# Minimum value: 0
+# Maximum value: 65535
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+# Reason: Replaced by [DEFAULT]/transport_url
+#kafka_default_port = 9092
+
+# Max fetch bytes of Kafka consumer (integer value)
+#kafka_max_fetch_bytes = 1048576
+
+# Default timeout(s) for Kafka consumers (floating point value)
+#kafka_consumer_timeout = 1.0
+
+# DEPRECATED: Pool Size for Kafka Consumers (integer value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+# Reason: Driver no longer uses connection pool.
+#pool_size = 10
+
+# DEPRECATED: The pool size limit for connections expiration policy
+# (integer value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+# Reason: Driver no longer uses connection pool.
+#conn_pool_min_size = 2
+
+# DEPRECATED: The time-to-live in sec of idle connections in the pool
+# (integer value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+# Reason: Driver no longer uses connection pool.
+#conn_pool_ttl = 1200
+
+# Group id for Kafka consumer. Consumers in one group will coordinate
+# message consumption (string value)
+#consumer_group = oslo_messaging_consumer
+
+# Upper bound on the delay for KafkaProducer batching in seconds
+# (floating point value)
+#producer_batch_timeout = 0.0
+
+# Size of batch for the producer async send (integer value)
+#producer_batch_size = 16384
+
diff --git a/oslo_templates/files/queens/oslo/messaging/_notifications.conf b/oslo_templates/files/queens/oslo/messaging/_notifications.conf
new file mode 100644
index 0000000..67c6222
--- /dev/null
+++ b/oslo_templates/files/queens/oslo/messaging/_notifications.conf
@@ -0,0 +1,35 @@
+
+#
+# From oslo.messaging
+#
+
+# The Drivers(s) to handle sending notifications. Possible values are
+# messaging, messagingv2, routing, log, test, noop (multi valued)
+# Deprecated group/name - [DEFAULT]/notification_driver
+#driver =
+
+# A URL representing the messaging driver to use for notifications. If
+# not set, we fall back to the same configuration used for RPC.
+# (string value)
+# Deprecated group/name - [DEFAULT]/notification_transport_url
+#transport_url = <None>
+
+# AMQP topic used for OpenStack notifications. (list value)
+# Deprecated group/name - [rpc_notifier2]/topics
+# Deprecated group/name - [DEFAULT]/notification_topics
+#topics = notifications
+
+# The maximum number of attempts to re-send a notification message
+# which failed to be delivered due to a recoverable error. 0 - No
+# retry, -1 - indefinite (integer value)
+#retry = -1
+
+{%- if _data is mapping %}
+driver = {{ _data.get('driver', 'messagingv2') }}
+{%- if _data.topics is defined %}
+topics = {{ _data.topics }}
+{%- endif %}
+{%- elif _data %}
+driver=messagingv2
+{%- endif %}
+
diff --git a/oslo_templates/files/queens/oslo/messaging/_rabbit.conf b/oslo_templates/files/queens/oslo/messaging/_rabbit.conf
new file mode 100644
index 0000000..c20e233
--- /dev/null
+++ b/oslo_templates/files/queens/oslo/messaging/_rabbit.conf
@@ -0,0 +1,288 @@
+
+#
+# From oslo.messaging
+#
+
+# Use durable queues in AMQP. (boolean value)
+# Deprecated group/name - [DEFAULT]/amqp_durable_queues
+# Deprecated group/name - [DEFAULT]/rabbit_durable_queues
+#amqp_durable_queues = false
+
+# Auto-delete queues in AMQP. (boolean value)
+#amqp_auto_delete = false
+
+# Enable SSL (boolean value)
+#ssl = <None>
+{%- if _data.get('ssl',{}).get('enabled', False) %}
+ssl = true
+
+
+# SSL version to use (valid only if SSL enabled). Valid values are
+# TLSv1 and SSLv23. SSLv2, SSLv3, TLSv1_1, and TLSv1_2 may be
+# available on some distributions. (string value)
+# Deprecated group/name - [oslo_messaging_rabbit]/kombu_ssl_version
+#ssl_version =
+{%- if _data.ssl.version is defined %}
+ssl_version = {{ _data.ssl.version }}
+{%- elif salt['grains.get']('pythonversion') > [2,7,8] %}
+ssl_version = TLSv1_2
+{%- endif %}
+
+
+# SSL key file (valid only if SSL enabled). (string value)
+# Deprecated group/name - [oslo_messaging_rabbit]/kombu_ssl_keyfile
+#ssl_key_file =
+
+# SSL cert file (valid only if SSL enabled). (string value)
+# Deprecated group/name - [oslo_messaging_rabbit]/kombu_ssl_certfile
+#ssl_cert_file =
+
+# SSL certification authority file (valid only if SSL enabled).
+# (string value)
+# Deprecated group/name - [oslo_messaging_rabbit]/kombu_ssl_ca_certs
+#ssl_ca_file =
+ssl_ca_certs = {{ _data.ssl.cacert_file }}
+{%- endif %}
+
+
+# How long to wait before reconnecting in response to an AMQP consumer
+# cancel notification. (floating point value)
+#kombu_reconnect_delay = 1.0
+
+# EXPERIMENTAL: Possible values are: gzip, bz2. If not set compression
+# will not be used. This option may not be available in future
+# versions. (string value)
+#kombu_compression = <None>
+
+# How long to wait a missing client before abandoning to send it its
+# replies. This value should not be longer than rpc_response_timeout.
+# (integer value)
+# Deprecated group/name - [oslo_messaging_rabbit]/kombu_reconnect_timeout
+#kombu_missing_consumer_retry_timeout = 60
+
+# Determines how the next RabbitMQ node is chosen in case the one we
+# are currently connected to becomes unavailable. Takes effect only if
+# more than one RabbitMQ node is provided in config. (string value)
+# Possible values:
+# round-robin - <No description provided>
+# shuffle - <No description provided>
+#kombu_failover_strategy = round-robin
+
+# DEPRECATED: The RabbitMQ broker address where a single node is used.
+# (string value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+# Reason: Replaced by [DEFAULT]/transport_url
+#rabbit_host = localhost
+
+# DEPRECATED: The RabbitMQ broker port where a single node is used.
+# (port value)
+# Minimum value: 0
+# Maximum value: 65535
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+# Reason: Replaced by [DEFAULT]/transport_url
+#rabbit_port = 5672
+
+# DEPRECATED: RabbitMQ HA cluster host:port pairs. (list value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+# Reason: Replaced by [DEFAULT]/transport_url
+#rabbit_hosts = $rabbit_host:$rabbit_port
+
+# DEPRECATED: The RabbitMQ userid. (string value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+# Reason: Replaced by [DEFAULT]/transport_url
+#rabbit_userid = guest
+
+# DEPRECATED: The RabbitMQ password. (string value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+# Reason: Replaced by [DEFAULT]/transport_url
+#rabbit_password = guest
+
+# The RabbitMQ login method. (string value)
+# Possible values:
+# PLAIN - <No description provided>
+# AMQPLAIN - <No description provided>
+# RABBIT-CR-DEMO - <No description provided>
+#rabbit_login_method = AMQPLAIN
+
+# DEPRECATED: The RabbitMQ virtual host. (string value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+# Reason: Replaced by [DEFAULT]/transport_url
+#rabbit_virtual_host = /
+
+# How frequently to retry connecting with RabbitMQ. (integer value)
+#rabbit_retry_interval = 1
+
+# How long to backoff for between retries when connecting to RabbitMQ.
+# (integer value)
+#rabbit_retry_backoff = 2
+
+# Maximum interval of RabbitMQ connection retries. Default is 30
+# seconds. (integer value)
+#rabbit_interval_max = 30
+
+# DEPRECATED: Maximum number of RabbitMQ connection retries. Default
+# is 0 (infinite retry count). (integer value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+#rabbit_max_retries = 0
+
+# Try to use HA queues in RabbitMQ (x-ha-policy: all). If you change
+# this option, you must wipe the RabbitMQ database. In RabbitMQ 3.0,
+# queue mirroring is no longer controlled by the x-ha-policy argument
+# when declaring a queue. If you just want to make sure that all
+# queues (except those with auto-generated names) are mirrored across
+# all nodes, run: "rabbitmqctl set_policy HA '^(?!amq\.).*' '{"ha-
+# mode": "all"}' " (boolean value)
+#rabbit_ha_queues = false
+
+# Positive integer representing duration in seconds for queue TTL
+# (x-expires). Queues which are unused for the duration of the TTL are
+# automatically deleted. The parameter affects only reply and fanout
+# queues. (integer value)
+# Minimum value: 1
+#rabbit_transient_queues_ttl = 1800
+
+# Specifies the number of messages to prefetch. Setting to zero allows
+# unlimited messages. (integer value)
+#rabbit_qos_prefetch_count = 64
+
+# Number of seconds after which the Rabbit broker is considered down
+# if heartbeat's keep-alive fails (0 disable the heartbeat).
+# EXPERIMENTAL (integer value)
+#heartbeat_timeout_threshold = 60
+{%- if _data.get('heartbeat_timeout_threshold', 0) %}
+heartbeat_timeout_threshold = {{ _data.get('heartbeat_timeout_threshold', 0) }}
+{%- endif %}
+
+# How often times during the heartbeat_timeout_threshold we check the
+# heartbeat. (integer value)
+#heartbeat_rate = 2
+{%- if _data.heartbeat_rate is defined %}
+heartbeat_rate = {{ _data.heartbeat_rate }}
+{%- endif %}
+
+# Deprecated, use rpc_backend=kombu+memory or rpc_backend=fake
+# (boolean value)
+#fake_rabbit = false
+
+# Maximum number of channels to allow (integer value)
+#channel_max = <None>
+
+# The maximum byte size for an AMQP frame (integer value)
+#frame_max = <None>
+
+# How often to send heartbeats for consumer's connections (integer
+# value)
+#heartbeat_interval = 3
+
+# Arguments passed to ssl.wrap_socket (dict value)
+#ssl_options = <None>
+
+# Set socket timeout in seconds for connection's socket (floating
+# point value)
+#socket_timeout = 0.25
+
+# Set TCP_USER_TIMEOUT in seconds for connection's socket (floating
+# point value)
+#tcp_user_timeout = 0.25
+
+# Set delay for reconnection to some host which has connection error
+# (floating point value)
+#host_connection_reconnect_delay = 0.25
+
+# Connection factory implementation (string value)
+# Possible values:
+# new - <No description provided>
+# single - <No description provided>
+# read_write - <No description provided>
+#connection_factory = single
+
+# Maximum number of connections to keep queued. (integer value)
+#pool_max_size = 30
+
+# Maximum number of connections to create above `pool_max_size`.
+# (integer value)
+#pool_max_overflow = 0
+
+# Default number of seconds to wait for a connections to available
+# (integer value)
+#pool_timeout = 30
+
+# Lifetime of a connection (since creation) in seconds or None for no
+# recycling. Expired connections are closed on acquire. (integer
+# value)
+#pool_recycle = 600
+
+# Threshold at which inactive (since release) connections are
+# considered stale in seconds or None for no staleness. Stale
+# connections are closed on acquire. (integer value)
+#pool_stale = 60
+
+# Default serialization mechanism for serializing/deserializing
+# outgoing/incoming messages (string value)
+# Possible values:
+# json - <No description provided>
+# msgpack - <No description provided>
+#default_serializer_type = json
+
+# Persist notification messages. (boolean value)
+#notification_persistence = false
+
+# Exchange name for sending notifications (string value)
+#default_notification_exchange = ${control_exchange}_notification
+
+# Max number of not acknowledged message which RabbitMQ can send to
+# notification listener. (integer value)
+#notification_listener_prefetch_count = 100
+
+# Reconnecting retry count in case of connectivity problem during
+# sending notification, -1 means infinite retry. (integer value)
+#default_notification_retry_attempts = -1
+
+# Reconnecting retry delay in case of connectivity problem during
+# sending notification message (floating point value)
+#notification_retry_delay = 0.25
+
+# Time to live for rpc queues without consumers in seconds. (integer
+# value)
+#rpc_queue_expiration = 60
+
+# Exchange name for sending RPC messages (string value)
+#default_rpc_exchange = ${control_exchange}_rpc
+
+# Exchange name for receiving RPC replies (string value)
+#rpc_reply_exchange = ${control_exchange}_rpc_reply
+
+# Max number of not acknowledged message which RabbitMQ can send to
+# rpc listener. (integer value)
+#rpc_listener_prefetch_count = 100
+
+# Max number of not acknowledged message which RabbitMQ can send to
+# rpc reply listener. (integer value)
+#rpc_reply_listener_prefetch_count = 100
+
+# Reconnecting retry count in case of connectivity problem during
+# sending reply. -1 means infinite retry during rpc_timeout (integer
+# value)
+#rpc_reply_retry_attempts = -1
+
+# Reconnecting retry delay in case of connectivity problem during
+# sending reply. (floating point value)
+#rpc_reply_retry_delay = 0.25
+
+# Reconnecting retry count in case of connectivity problem during
+# sending RPC message, -1 means infinite retry. If actual retry
+# attempts in not 0 the rpc request could be processed more than one
+# time (integer value)
+#default_rpc_retry_attempts = -1
+
+# Reconnecting retry delay in case of connectivity problem during
+# sending RPC message (floating point value)
+#rpc_retry_delay = 0.25
+
diff --git a/oslo_templates/files/queens/oslo/messaging/_zmq.conf b/oslo_templates/files/queens/oslo/messaging/_zmq.conf
new file mode 100644
index 0000000..17e3fa5
--- /dev/null
+++ b/oslo_templates/files/queens/oslo/messaging/_zmq.conf
@@ -0,0 +1,157 @@
+
+#
+# From oslo.messaging
+#
+
+# ZeroMQ bind address. Should be a wildcard (*), an ethernet
+# interface, or IP. The "host" option should point or resolve to this
+# address. (string value)
+#rpc_zmq_bind_address = *
+
+# MatchMaker driver. (string value)
+# Possible values:
+# redis - <No description provided>
+# sentinel - <No description provided>
+# dummy - <No description provided>
+#rpc_zmq_matchmaker = redis
+
+# Number of ZeroMQ contexts, defaults to 1. (integer value)
+#rpc_zmq_contexts = 1
+
+# Maximum number of ingress messages to locally buffer per topic.
+# Default is unlimited. (integer value)
+#rpc_zmq_topic_backlog = <None>
+
+# Directory for holding IPC sockets. (string value)
+#rpc_zmq_ipc_dir = /var/run/openstack
+
+# Name of this node. Must be a valid hostname, FQDN, or IP address.
+# Must match "host" option, if running Nova. (string value)
+#rpc_zmq_host = localhost
+
+# Number of seconds to wait before all pending messages will be sent
+# after closing a socket. The default value of -1 specifies an
+# infinite linger period. The value of 0 specifies no linger period.
+# Pending messages shall be discarded immediately when the socket is
+# closed. Positive values specify an upper bound for the linger
+# period. (integer value)
+# Deprecated group/name - [DEFAULT]/rpc_cast_timeout
+#zmq_linger = -1
+
+# The default number of seconds that poll should wait. Poll raises
+# timeout exception when timeout expired. (integer value)
+#rpc_poll_timeout = 1
+
+# Expiration timeout in seconds of a name service record about
+# existing target ( < 0 means no timeout). (integer value)
+#zmq_target_expire = 300
+
+# Update period in seconds of a name service record about existing
+# target. (integer value)
+#zmq_target_update = 180
+
+# Use PUB/SUB pattern for fanout methods. PUB/SUB always uses proxy.
+# (boolean value)
+#use_pub_sub = false
+
+# Use ROUTER remote proxy. (boolean value)
+#use_router_proxy = false
+
+# This option makes direct connections dynamic or static. It makes
+# sense only with use_router_proxy=False which means to use direct
+# connections for direct message types (ignored otherwise). (boolean
+# value)
+#use_dynamic_connections = false
+
+# How many additional connections to a host will be made for failover
+# reasons. This option is actual only in dynamic connections mode.
+# (integer value)
+#zmq_failover_connections = 2
+
+# Minimal port number for random ports range. (port value)
+# Minimum value: 0
+# Maximum value: 65535
+#rpc_zmq_min_port = 49153
+
+# Maximal port number for random ports range. (integer value)
+# Minimum value: 1
+# Maximum value: 65536
+#rpc_zmq_max_port = 65536
+
+# Number of retries to find free port number before fail with
+# ZMQBindError. (integer value)
+#rpc_zmq_bind_port_retries = 100
+
+# Default serialization mechanism for serializing/deserializing
+# outgoing/incoming messages (string value)
+# Possible values:
+# json - <No description provided>
+# msgpack - <No description provided>
+#rpc_zmq_serialization = json
+
+# This option configures round-robin mode in zmq socket. True means
+# not keeping a queue when server side disconnects. False means to
+# keep queue and messages even if server is disconnected, when the
+# server appears we send all accumulated messages to it. (boolean
+# value)
+#zmq_immediate = true
+
+# Enable/disable TCP keepalive (KA) mechanism. The default value of -1
+# (or any other negative value) means to skip any overrides and leave
+# it to OS default; 0 and 1 (or any other positive value) mean to
+# disable and enable the option respectively. (integer value)
+#zmq_tcp_keepalive = -1
+
+# The duration between two keepalive transmissions in idle condition.
+# The unit is platform dependent, for example, seconds in Linux,
+# milliseconds in Windows etc. The default value of -1 (or any other
+# negative value and 0) means to skip any overrides and leave it to OS
+# default. (integer value)
+#zmq_tcp_keepalive_idle = -1
+
+# The number of retransmissions to be carried out before declaring
+# that remote end is not available. The default value of -1 (or any
+# other negative value and 0) means to skip any overrides and leave it
+# to OS default. (integer value)
+#zmq_tcp_keepalive_cnt = -1
+
+# The duration between two successive keepalive retransmissions, if
+# acknowledgement to the previous keepalive transmission is not
+# received. The unit is platform dependent, for example, seconds in
+# Linux, milliseconds in Windows etc. The default value of -1 (or any
+# other negative value and 0) means to skip any overrides and leave it
+# to OS default. (integer value)
+#zmq_tcp_keepalive_intvl = -1
+
+# Maximum number of (green) threads to work concurrently. (integer
+# value)
+#rpc_thread_pool_size = 100
+
+# Expiration timeout in seconds of a sent/received message after which
+# it is not tracked anymore by a client/server. (integer value)
+#rpc_message_ttl = 300
+
+# Wait for message acknowledgements from receivers. This mechanism
+# works only via proxy without PUB/SUB. (boolean value)
+#rpc_use_acks = false
+
+# Number of seconds to wait for an ack from a cast/call. After each
+# retry attempt this timeout is multiplied by some specified
+# multiplier. (integer value)
+#rpc_ack_timeout_base = 15
+
+# Number to multiply base ack timeout by after each retry attempt.
+# (integer value)
+#rpc_ack_timeout_multiplier = 2
+
+# Default number of message sending attempts in case of any problems
+# occurred: positive value N means at most N retries, 0 means no
+# retries, None or -1 (or any other negative values) mean to retry
+# forever. This option is used only if acknowledgments are enabled.
+# (integer value)
+#rpc_retry_attempts = 3
+
+# List of publisher hosts SubConsumer can subscribe on. This option
+# has higher priority then the default publishers list taken from the
+# matchmaker. (list value)
+#subscribe_on =
diff --git a/oslo_templates/init.sls b/oslo_templates/init.sls
deleted file mode 100644
index 98318cb..0000000
--- a/oslo_templates/init.sls
+++ /dev/null
@@ -1,6 +0,0 @@
-{%- if pillar.oslo_templates is defined %}
-include:
-{%- if pillar.oslo_templates.template is defined %}
-- oslo_templates.template
-{%- endif %}
-{%- endif %}
diff --git a/oslo_templates/map.jinja b/oslo_templates/map.jinja
deleted file mode 100644
index fa9ad0b..0000000
--- a/oslo_templates/map.jinja
+++ /dev/null
@@ -1,10 +0,0 @@
-
-{%- load_yaml as base_defaults %}
-
-Debian:
- pkgs:
- - oslo_templates
-
-{%- endload %}
-
-{%- set template = salt['grains.filter_by'](base_defaults, merge=salt['pillar.get']('oslo_templates:template')) %}
diff --git a/oslo_templates/schemas/template.yaml b/oslo_templates/schemas/template.yaml
deleted file mode 100644
index 1e12b12..0000000
--- a/oslo_templates/schemas/template.yaml
+++ /dev/null
@@ -1,17 +0,0 @@
-%YAML 1.1
----
-"$schema": "http://json-schema.org/draft-06/schema#"
-title: oslo_templates daemon role
-description: |
- Oslo_templates daemon, server role
-type: object
-additionalProperties: false
-
-required:
- - enabled
-
-properties:
- enabled:
- description: |
- Enables oslo_templates daemon service
- type: boolean
diff --git a/tests/pillar/queens/keystoneauth/_type_password.sls b/tests/pillar/queens/keystoneauth/_type_password.sls
new file mode 100644
index 0000000..d7bad0a
--- /dev/null
+++ b/tests/pillar/queens/keystoneauth/_type_password.sls
@@ -0,0 +1,25 @@
+_data:
+ host: localhost
+ port: 5000
+ default_domain_id: id
+ default_domain_name: name
+ domain_id: id
+ domain_name: name
+ password: password
+ project_domain_name: name
+ project_id: id
+ project_name: name
+ tenant_id: id
+ tenant_name: name
+ user_domain_name: name
+ user_id: id
+ user: user
+ version: v1
+ cache:
+ servers:
+ srv1:
+ port: 12345
+ enabled: true
+ srv2:
+ port: 12345
+ enabled: true
diff --git a/tests/pillar/queens/oslo/_cache.sls b/tests/pillar/queens/oslo/_cache.sls
new file mode 100644
index 0000000..3f2a44d
--- /dev/null
+++ b/tests/pillar/queens/oslo/_cache.sls
@@ -0,0 +1,8 @@
+_data:
+ servers:
+ srv1:
+ port: 12345
+ enabled: true
+ srv2:
+ port: 12345
+ enabled: true
diff --git a/tests/pillar/queens/oslo/_concurrency.sls b/tests/pillar/queens/oslo/_concurrency.sls
new file mode 100644
index 0000000..fe67aa2
--- /dev/null
+++ b/tests/pillar/queens/oslo/_concurrency.sls
@@ -0,0 +1,2 @@
+_data:
+ disable_process_locking: true
diff --git a/tests/pillar/queens/oslo/_cors.sls b/tests/pillar/queens/oslo/_cors.sls
new file mode 100644
index 0000000..71a3e26
--- /dev/null
+++ b/tests/pillar/queens/oslo/_cors.sls
@@ -0,0 +1,9 @@
+_data:
+ allowed_origin: origin
+ allow_credentials: abcde
+ expose_headers:
+ - h1
+ - h2
+ max_age: 12
+ allow_methods: all_methods
+ allow_headers: allow_headers
diff --git a/tests/pillar/queens/oslo/_database-ssl.sls b/tests/pillar/queens/oslo/_database-ssl.sls
new file mode 100644
index 0000000..2677990
--- /dev/null
+++ b/tests/pillar/queens/oslo/_database-ssl.sls
@@ -0,0 +1,9 @@
+_data:
+ engine: mysql
+ user: foo
+ password: bar
+ host: localhost
+ name: mydatabase
+ ssl:
+ enabled: true
+ cacert_file: //path/to/cacert.file
diff --git a/tests/pillar/queens/oslo/_database.sls b/tests/pillar/queens/oslo/_database.sls
new file mode 100644
index 0000000..865485e
--- /dev/null
+++ b/tests/pillar/queens/oslo/_database.sls
@@ -0,0 +1,6 @@
+_data:
+ engine: mysql
+ user: foo
+ password: bar
+ host: localhost
+ name: mydatabase
diff --git a/tests/pillar/queens/oslo/_healthcheck.sls b/tests/pillar/queens/oslo/_healthcheck.sls
new file mode 100644
index 0000000..10122af
--- /dev/null
+++ b/tests/pillar/queens/oslo/_healthcheck.sls
@@ -0,0 +1,2 @@
+_data:
+ path: /path/to
diff --git a/tests/pillar/queens/oslo/_log.sls b/tests/pillar/queens/oslo/_log.sls
new file mode 100644
index 0000000..ae30847
--- /dev/null
+++ b/tests/pillar/queens/oslo/_log.sls
@@ -0,0 +1,5 @@
+_data:
+ debug: false
+ log_file: /path/to/file
+ use_syslog: true
+ syslog_log_facility: INFO
diff --git a/tests/pillar/queens/oslo/_middleware.sls b/tests/pillar/queens/oslo/_middleware.sls
new file mode 100644
index 0000000..ed08e6d
--- /dev/null
+++ b/tests/pillar/queens/oslo/_middleware.sls
@@ -0,0 +1,3 @@
+_data:
+ max_request_body_size: 123
+ enable_proxy_headers_parsing: true
diff --git a/tests/pillar/queens/oslo/_osprofiler.sls b/tests/pillar/queens/oslo/_osprofiler.sls
new file mode 100644
index 0000000..4d19604
--- /dev/null
+++ b/tests/pillar/queens/oslo/_osprofiler.sls
@@ -0,0 +1,2 @@
+_data:
+ enabled: true
diff --git a/tests/pillar/queens/oslo/_policy.sls b/tests/pillar/queens/oslo/_policy.sls
new file mode 100644
index 0000000..693dc81
--- /dev/null
+++ b/tests/pillar/queens/oslo/_policy.sls
@@ -0,0 +1,2 @@
+_data:
+ policy_file: policy.json
diff --git a/tests/pillar/queens/oslo/messaging/_amqp.sls b/tests/pillar/queens/oslo/messaging/_amqp.sls
new file mode 100644
index 0000000..67d1dad
--- /dev/null
+++ b/tests/pillar/queens/oslo/messaging/_amqp.sls
@@ -0,0 +1,2 @@
+_data:
+ default_notification_exchange: abcde
diff --git a/tests/pillar/queens/oslo/messaging/_default.sls b/tests/pillar/queens/oslo/messaging/_default.sls
new file mode 100644
index 0000000..78db5b3
--- /dev/null
+++ b/tests/pillar/queens/oslo/messaging/_default.sls
@@ -0,0 +1,8 @@
+_data:
+ user: user
+ password: password
+ virtual_host: /
+ members:
+ - host: srv1
+ port: 5671
+ - host: srv2
diff --git a/tests/pillar/queens/oslo/messaging/_notifications.sls b/tests/pillar/queens/oslo/messaging/_notifications.sls
new file mode 100644
index 0000000..8955ccf
--- /dev/null
+++ b/tests/pillar/queens/oslo/messaging/_notifications.sls
@@ -0,0 +1,3 @@
+_data:
+ driver: messagingv2
+ topics: topics
diff --git a/tests/pillar/queens/oslo/messaging/_rabbit-ssl.sls b/tests/pillar/queens/oslo/messaging/_rabbit-ssl.sls
new file mode 100644
index 0000000..0695e42
--- /dev/null
+++ b/tests/pillar/queens/oslo/messaging/_rabbit-ssl.sls
@@ -0,0 +1,4 @@
+_data:
+ ssl:
+ enabled: true
+ cacert_file: /path/to/ca
diff --git a/tests/pillar/queens/oslo/messaging/_rabbit.sls b/tests/pillar/queens/oslo/messaging/_rabbit.sls
new file mode 100644
index 0000000..02d586e
--- /dev/null
+++ b/tests/pillar/queens/oslo/messaging/_rabbit.sls
@@ -0,0 +1,3 @@
+_data:
+ heartbeat_timeout_threshold: 1
+ heartbeat_rate: 3
diff --git a/tests/pillar/template_single.sls b/tests/pillar/template_single.sls
deleted file mode 100644
index 2c1deb3..0000000
--- a/tests/pillar/template_single.sls
+++ /dev/null
@@ -1,3 +0,0 @@
-oslo_templates:
- template:
- enabled: true
diff --git a/tests/run_tests.sh b/tests/run_tests.sh
index a348912..96cb9ad 100755
--- a/tests/run_tests.sh
+++ b/tests/run_tests.sh
@@ -1,35 +1,25 @@
#!/usr/bin/env bash
-###
-# Script requirments:
-#apt-get install -y python-yaml virtualenv git
-
set -e
[ -n "$DEBUG" ] && set -x
CURDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
METADATA=${CURDIR}/../metadata.yml
FORMULA_NAME=$(cat $METADATA | python -c "import sys,yaml; print yaml.load(sys.stdin)['name']")
-FORMULA_META_DIR=${CURDIR}/../${FORMULA_NAME}/meta
## Overrideable parameters
PILLARDIR=${PILLARDIR:-${CURDIR}/pillar}
BUILDDIR=${BUILDDIR:-${CURDIR}/build}
VENV_DIR=${VENV_DIR:-${BUILDDIR}/virtualenv}
-MOCK_BIN_DIR=${MOCK_BIN_DIR:-${CURDIR}/mock_bin}
DEPSDIR=${BUILDDIR}/deps
-SCHEMARDIR=${SCHEMARDIR:-"${CURDIR}/../${FORMULA_NAME}/schemas/"}
SALT_FILE_DIR=${SALT_FILE_DIR:-${BUILDDIR}/file_root}
SALT_PILLAR_DIR=${SALT_PILLAR_DIR:-${BUILDDIR}/pillar_root}
SALT_CONFIG_DIR=${SALT_CONFIG_DIR:-${BUILDDIR}/salt}
SALT_CACHE_DIR=${SALT_CACHE_DIR:-${SALT_CONFIG_DIR}/cache}
-SALT_CACHE_EXTMODS_DIR=${SALT_CACHE_EXTMODS_DIR:-${SALT_CONFIG_DIR}/cache_master_extmods}
SALT_OPTS="${SALT_OPTS} --retcode-passthrough --local -c ${SALT_CONFIG_DIR} --log-file=/dev/null"
-IGNORE_MODELVALIDATE_MASK=${IGNORE_MODELVALIDATE_MASK:-"novalidate"}
-
if [ "x${SALT_VERSION}" != "x" ]; then
PIP_SALT_VERSION="==${SALT_VERSION}"
fi
@@ -45,31 +35,48 @@
setup_virtualenv() {
log_info "Setting up Python virtualenv"
- dependency_check virtualenv
virtualenv $VENV_DIR
source ${VENV_DIR}/bin/activate
python -m pip install salt${PIP_SALT_VERSION}
- if [[ -f ${CURDIR}/test-requirements.txt ]]; then
- python -m pip install -r ${CURDIR}/test-requirements.txt
- fi
}
-setup_mock_bin() {
- # If some state requires a binary, a lightweight replacement for
- # such binary can be put into MOCK_BIN_DIR for test purposes
- if [ -d "${MOCK_BIN_DIR}" ]; then
- PATH="${MOCK_BIN_DIR}:$PATH"
- export PATH
- fi
+setup_test_state() {
+ local template_path=$1
+ local state_name=$2
+
+ [ ! -d ${BUILDDIR}/tstates/ ] && mkdir ${BUILDDIR}/tstates/
+ [ ! -d ${BUILDDIR}/rfiles/ ] && mkdir ${BUILDDIR}/rfiles/
+
+ cat << EOF > ${BUILDDIR}/tstates/${state_name}.sls
+
+test_${state_name}_rendering:
+ file.managed:
+ - name: ${BUILDDIR}/rfiles/${state_name}.conf
+ - template: jinja
+ - source: ${template_path}
+ - context:
+ _data: {{ pillar.get("_data", {}) }}
+EOF
}
setup_pillar() {
[ ! -d ${SALT_PILLAR_DIR} ] && mkdir -p ${SALT_PILLAR_DIR}
echo "base:" > ${SALT_PILLAR_DIR}/top.sls
- for pillar in ${PILLARDIR}/*; do
- grep ${FORMULA_NAME}: ${pillar} &>/dev/null || continue
- state_name=$(basename ${pillar%.sls})
- echo -e " ${state_name}:\n - ${state_name}" >> ${SALT_PILLAR_DIR}/top.sls
+ local sdir
+ local state_name
+ local template_name
+ local pillar_name
+
+ pushd ${PILLARDIR}/
+ for spath in $(find ./ -type f -name '*.sls'); do
+ pillar_name=$(basename $spath | sed -e 's/.sls$//')
+ sdir=$(dirname $spath | sed -e 's/^.\///g')
+ template_name=$(echo $pillar_name | cut -d '-' -f 1)
+ state_name=$(echo ${sdir}_${pillar_name} | sed -e 's/\//_/g')
+ if ! echo $pillar_name |grep '-'; then
+ setup_test_state "salt://oslo_templates/files/$sdir/$template_name.conf" "$state_name"
+ fi
+ echo -e " ${state_name}:\n - ${sdir}/${pillar_name}" >> ${SALT_PILLAR_DIR}/top.sls
done
}
@@ -77,7 +84,6 @@
[ ! -d ${SALT_FILE_DIR} ] && mkdir -p ${SALT_FILE_DIR}
[ ! -d ${SALT_CONFIG_DIR} ] && mkdir -p ${SALT_CONFIG_DIR}
[ ! -d ${SALT_CACHE_DIR} ] && mkdir -p ${SALT_CACHE_DIR}
- [ ! -d ${SALT_CACHE_EXTMODS_DIR} ] && mkdir -p ${SALT_CACHE_EXTMODS_DIR}
echo "base:" > ${SALT_FILE_DIR}/top.sls
for pillar in ${PILLARDIR}/*.sls; do
@@ -89,7 +95,6 @@
cat << EOF > ${SALT_CONFIG_DIR}/minion
file_client: local
cachedir: ${SALT_CACHE_DIR}
-extension_modules: ${SALT_CACHE_EXTMODS_DIR}
verify_env: False
minion_id_caching: False
@@ -97,6 +102,8 @@
base:
- ${SALT_FILE_DIR}
- ${CURDIR}/..
+ - ${BUILDDIR}/tstates/
+ - /usr/share/salt-formulas/env
pillar_roots:
base:
@@ -106,14 +113,13 @@
}
fetch_dependency() {
- # example: fetch_dependency "linux:https://github.com/salt-formulas/salt-formula-linux"
dep_name="$(echo $1|cut -d : -f 1)"
dep_source="$(echo $1|cut -d : -f 2-)"
dep_root="${DEPSDIR}/$(basename $dep_source .git)"
dep_metadata="${dep_root}/metadata.yml"
- dependency_check git
- [ -d $dep_root ] && { log_info "Dependency $dep_name already fetched"; return 0; }
+ [ -d /usr/share/salt-formulas/env/${dep_name} ] && log_info "Dependency $dep_name already present in system-wide salt env" && return 0
+ [ -d $dep_root ] && log_info "Dependency $dep_name already fetched" && return 0
log_info "Fetching dependency $dep_name"
[ ! -d ${DEPSDIR} ] && mkdir -p ${DEPSDIR}
@@ -123,19 +129,6 @@
METADATA="${dep_metadata}" install_dependencies
}
-link_modules(){
- # Link modules *.py files to temporary salt-root
- local SALT_ROOT=${1:-$SALT_FILE_DIR}
- local SALT_ENV=${2:-$DEPSDIR}
-
- mkdir -p "${SALT_ROOT}/_modules/"
- # from git, development versions
- find ${SALT_ENV} -maxdepth 3 -mindepth 3 -path '*_modules*' -iname "*.py" -type f -print0 | while read -d $'\0' file; do
- ln -fs $(readlink -e ${file}) "$SALT_ROOT"/_modules/$(basename ${file}) ;
- done
- salt_run saltutil.sync_all
-}
-
install_dependencies() {
grep -E "^dependencies:" ${METADATA} >/dev/null || return 0
(python - | while read dep; do fetch_dependency "$dep"; done) << EOF
@@ -152,96 +145,36 @@
salt_run() {
[ -e ${VENV_DIR}/bin/activate ] && source ${VENV_DIR}/bin/activate
- python $(which salt-call) ${SALT_OPTS} $*
+ local cmd=''
+ cmd="python $(which salt-call) ${SALT_OPTS} $*"
+ log_info "$cmd"
+ $cmd
}
prepare() {
- if [[ -f ${BUILDDIR}/.prepare_done ]]; then
- log_info "${BUILDDIR}/.prepare_done exist, not rebuilding BUILDDIR"
- return
- fi
- [[ -d ${BUILDDIR} ]] && mkdir -p ${BUILDDIR}
+ [ -d ${BUILDDIR} ] && mkdir -p ${BUILDDIR}
- [[ ! -f "${VENV_DIR}/bin/activate" ]] && setup_virtualenv
- setup_mock_bin
+ which salt-call || setup_virtualenv
setup_pillar
setup_salt
install_dependencies
- link_modules
- touch ${BUILDDIR}/.prepare_done
-}
-
-lint_releasenotes() {
- [[ ! -f "${VENV_DIR}/bin/activate" ]] && setup_virtualenv
- source ${VENV_DIR}/bin/activate
- reno lint ${CURDIR}/../
-}
-
-lint() {
-# lint_releasenotes
- log_err "TODO: lint_releasenotes"
}
run() {
- for pillar in ${PILLARDIR}/*.sls; do
- grep ${FORMULA_NAME}: ${pillar} &>/dev/null || continue
- state_name=$(basename ${pillar%.sls})
- salt_run grains.set 'noservices' False force=True
-
- echo "Checking state ${FORMULA_NAME}.${state_name} ..."
- salt_run --id=${state_name} state.show_sls ${FORMULA_NAME} || (log_err "Execution of ${FORMULA_NAME}.${state_name} failed"; exit 1)
-
- # Check that all files in 'meta' folder can be rendered using any valid pillar
- for meta in `find ${FORMULA_META_DIR} -type f`; do
- meta_name=$(basename ${meta})
- echo "Checking meta ${meta_name} ..."
- salt_run --out=quiet --id=${state_name} cp.get_template ${meta} ${SALT_CACHE_DIR}/${meta_name} \
- || { log_err "Failed to render meta ${meta} using pillar ${FORMULA_NAME}.${state_name}"; exit 1; }
- cat ${SALT_CACHE_DIR}/${meta_name}
- done
+ pushd ${PILLARDIR}/
+ local sdir
+ local sname
+ local state_name
+ local pillar_name
+ for spath in $(find ./ -type f -name '*.sls'); do
+ sname=$(basename $spath | basename $spath | sed -e 's/.sls$//')
+ sdir=$(dirname $spath | sed -e 's/^.\///')
+ state_name=$(echo ${sdir}_${sname} | sed -e 's/\//_/g' | cut -d '-' -f 1)
+ pillar_name=$(echo ${sdir}_${sname} | sed -e 's/\//_/g')
+ salt_run --id=${pillar_name} state.show_sls ${state_name} || (log_err "Show state ${state_name} with pillar ${pillar_name} failed"; exit 1)
+ salt_run --id=${pillar_name} state.sls ${state_name} || (log_err "Execution of ${state_name} with_pillar ${pillar_name} failed"; exit 1)
done
-}
-
-real_run() {
- for pillar in ${PILLARDIR}/*.sls; do
- state_name=$(basename ${pillar%.sls})
- salt_run --id=${state_name} state.sls ${FORMULA_NAME} || { log_err "Execution of ${FORMULA_NAME}.${state_name} failed"; exit 1; }
- done
-}
-
-run_model_validate(){
- # Run modelschema.model_validate validation.
- # TEST iterateble, run for `each formula ROLE against each ROLE_PILLARNAME`
- # Pillars should be named in conviend ROLE_XXX.sls or ROLE.sls
- # Example:
- # client.sls client_auth.sls server.sls server_auth.sls
- if [ -d ${SCHEMARDIR} ]; then
- # model validator require py modules
- fetch_dependency "salt:https://github.com/salt-formulas/salt-formula-salt"
- link_modules
- salt_run saltutil.clear_cache; salt_run saltutil.refresh_pillar; salt_run saltutil.sync_all;
- for role in ${SCHEMARDIR}/*.yaml; do
- role_name=$(basename "${role%*.yaml}")
- for pillar in $(ls pillar/${role_name}*.sls | grep -v ${IGNORE_MODELVALIDATE_MASK} ); do
- pillar_name=$(basename "${pillar%*.sls}")
- local _message="FORMULA:${FORMULA_NAME} ROLE:${role_name} against PILLAR:${pillar_name}"
- log_info "model_validate ${_message}"
- # Rendered Example:
- # python $(which salt-call) --local -c /test1/maas/tests/build/salt --id=maas_cluster modelschema.model_validate maas cluster
- salt_run -m ${DEPSDIR}/salt-formula-salt --id=${pillar_name} modelschema.model_validate ${FORMULA_NAME} ${role_name} || { log_err "Execution of model_validate ${_message} failed"; exit 1 ; }
- done
- done
- else
- log_info "${SCHEMARDIR} not found!";
- fi
-}
-
-dependency_check() {
- local DEPENDENCY_COMMANDS=$*
-
- for DEPENDENCY_COMMAND in $DEPENDENCY_COMMANDS; do
- which $DEPENDENCY_COMMAND > /dev/null || ( log_err "Command \"$DEPENDENCY_COMMAND\" can not be found in default path."; exit 1; )
- done
+ popd
}
_atexit() {
@@ -257,32 +190,22 @@
}
## Main
-trap _atexit INT TERM EXIT
+[[ "$0" != "$BASH_SOURCE" ]] || {
+ trap _atexit INT TERM EXIT
-case $1 in
- clean)
- clean
- ;;
- prepare)
- prepare
- ;;
- lint)
- lint
- ;;
- run)
- run
- ;;
- real-run)
- real_run
- ;;
- model-validate)
- prepare
- run_model_validate
- ;;
- *)
- prepare
-# lint
- run
- run_model_validate
- ;;
-esac
+ case $1 in
+ clean)
+ clean
+ ;;
+ prepare)
+ prepare
+ ;;
+ run)
+ run
+ ;;
+ *)
+ prepare
+ run
+ ;;
+ esac
+}