Adjust SF for Queens release
Change-Id: I555ddafd90abd041b64c1ff8cc16b78af7d82fc3
diff --git a/nova/files/queens/api-paste.ini.Debian b/nova/files/queens/api-paste.ini.Debian
new file mode 100644
index 0000000..9da196f
--- /dev/null
+++ b/nova/files/queens/api-paste.ini.Debian
@@ -0,0 +1,93 @@
+############
+# Metadata #
+############
+{%- from "nova/map.jinja" import controller with context %}
+[composite:metadata]
+use = egg:Paste#urlmap
+/: meta
+
+[pipeline:meta]
+pipeline = cors metaapp
+
+[app:metaapp]
+paste.app_factory = nova.api.metadata.handler:MetadataRequestHandler.factory
+
+#############
+# OpenStack #
+#############
+
+[composite:osapi_compute]
+use = call:nova.api.openstack.urlmap:urlmap_factory
+/: oscomputeversions
+# v21 is an exactly feature match for v2, except it has more stringent
+# input validation on the wsgi surface (prevents fuzzing early on the
+# API). It also provides new features via API microversions which are
+# opt into for clients. Unaware clients will receive the same frozen
+# v2 API feature set, but with some relaxed validation
+/v2: openstack_compute_api_v21_legacy_v2_compatible
+/v2.1: openstack_compute_api_v21
+
+[composite:openstack_compute_api_v21]
+use = call:nova.api.auth:pipeline_factory_v21
+noauth2 = cors http_proxy_to_wsgi compute_req_id faultwrap request_log sizelimit osprofiler noauth2 osapi_compute_app_v21
+keystone = cors http_proxy_to_wsgi compute_req_id faultwrap request_log sizelimit osprofiler authtoken keystonecontext {% if controller.audit.enabled %}audit {% endif %}osapi_compute_app_v21
+
+[composite:openstack_compute_api_v21_legacy_v2_compatible]
+use = call:nova.api.auth:pipeline_factory_v21
+noauth2 = cors http_proxy_to_wsgi compute_req_id faultwrap request_log sizelimit osprofiler noauth2 legacy_v2_compatible osapi_compute_app_v21
+keystone = cors http_proxy_to_wsgi compute_req_id faultwrap request_log sizelimit osprofiler authtoken keystonecontext {% if controller.audit.enabled %}audit {% endif %}legacy_v2_compatible osapi_compute_app_v21
+
+[filter:request_id]
+paste.filter_factory = oslo_middleware:RequestId.factory
+
+[filter:request_log]
+paste.filter_factory = nova.api.openstack.requestlog:RequestLog.factory
+
+[filter:compute_req_id]
+paste.filter_factory = nova.api.compute_req_id:ComputeReqIdMiddleware.factory
+
+[filter:faultwrap]
+paste.filter_factory = nova.api.openstack:FaultWrapper.factory
+
+[filter:noauth2]
+paste.filter_factory = nova.api.openstack.auth:NoAuthMiddleware.factory
+
+[filter:osprofiler]
+paste.filter_factory = nova.profiler:WsgiMiddleware.factory
+
+[filter:sizelimit]
+paste.filter_factory = oslo_middleware:RequestBodySizeLimiter.factory
+
+[filter:http_proxy_to_wsgi]
+paste.filter_factory = oslo_middleware.http_proxy_to_wsgi:HTTPProxyToWSGI.factory
+
+[filter:legacy_v2_compatible]
+paste.filter_factory = nova.api.openstack:LegacyV2CompatibleWrapper.factory
+
+[app:osapi_compute_app_v21]
+paste.app_factory = nova.api.openstack.compute:APIRouterV21.factory
+
+[pipeline:oscomputeversions]
+pipeline = cors faultwrap request_log http_proxy_to_wsgi oscomputeversionapp
+
+[app:oscomputeversionapp]
+paste.app_factory = nova.api.openstack.compute.versions:Versions.factory
+
+{%- if controller.audit.enabled %}
+[filter:audit]
+paste.filter_factory = {{ controller.get("audit", {}).get("filter_factory", "keystonemiddleware.audit:filter_factory") }}
+audit_map_file = {{ controller.get("audit", {}).get("map_file", "/etc/pycadf/nova_api_audit_map.conf") }}
+{%- endif %}
+##########
+# Shared #
+##########
+
+[filter:cors]
+paste.filter_factory = oslo_middleware.cors:filter_factory
+oslo_config_project = nova
+
+[filter:keystonecontext]
+paste.filter_factory = nova.api.auth:NovaKeystoneContext.factory
+
+[filter:authtoken]
+paste.filter_factory = keystonemiddleware.auth_token:filter_factory
diff --git a/nova/files/queens/api-paste.ini.RedHat b/nova/files/queens/api-paste.ini.RedHat
new file mode 120000
index 0000000..08fd76a
--- /dev/null
+++ b/nova/files/queens/api-paste.ini.RedHat
@@ -0,0 +1 @@
+api-paste.ini.Debian
\ No newline at end of file
diff --git a/nova/files/queens/libvirt.Debian b/nova/files/queens/libvirt.Debian
new file mode 100644
index 0000000..b80fab0
--- /dev/null
+++ b/nova/files/queens/libvirt.Debian
@@ -0,0 +1,19 @@
+# Defaults for libvirt-bin initscript (/etc/init.d/libvirt-bin)
+# This is a POSIX shell fragment
+
+# Start libvirtd to handle qemu/kvm:
+start_libvirtd="yes"
+
+# options passed to libvirtd, add "-l" to listen on tcp
+{%- if grains.get('init', None) != 'systemd' %}
+libvirtd_opts="-d -l"
+LIBVIRTD_ARGS="--listen"
+
+{%- else %}
+# Don't use "-d" option with systemd
+libvirtd_opts="-l"
+LIBVIRTD_ARGS="--listen"
+{%- endif %}
+# pass in location of kerberos keytab
+#export KRB5_KTNAME=/etc/libvirt/libvirt.keytab
+
diff --git a/nova/files/queens/libvirt.RedHat b/nova/files/queens/libvirt.RedHat
new file mode 120000
index 0000000..f8f6638
--- /dev/null
+++ b/nova/files/queens/libvirt.RedHat
@@ -0,0 +1 @@
+libvirt.Debian
\ No newline at end of file
diff --git a/nova/files/queens/libvirtd.conf.Debian b/nova/files/queens/libvirtd.conf.Debian
new file mode 100644
index 0000000..6f4afb9
--- /dev/null
+++ b/nova/files/queens/libvirtd.conf.Debian
@@ -0,0 +1,413 @@
+{%- from "nova/map.jinja" import compute with context %}
+# Master libvirt daemon configuration file
+#
+# For further information consult http://libvirt.org/format.html
+#
+# NOTE: the tests/daemon-conf regression test script requires
+# that each "PARAMETER = VALUE" line in this file have the parameter
+# name just after a leading "#".
+
+#################################################################
+#
+# Network connectivity controls
+#
+
+# Flag listening for secure TLS connections on the public TCP/IP port.
+# NB, must pass the --listen flag to the libvirtd process for this to
+# have any effect.
+#
+# It is necessary to setup a CA and issue server certificates before
+# using this capability.
+#
+# This is enabled by default, uncomment this to disable it
+#listen_tls = 0
+
+
+listen_tls = 0
+listen_tcp = 1
+auth_tcp = "none"
+
+
+# Listen for unencrypted TCP connections on the public TCP/IP port.
+# NB, must pass the --listen flag to the libvirtd process for this to
+# have any effect.
+#
+# Using the TCP socket requires SASL authentication by default. Only
+# SASL mechanisms which support data encryption are allowed. This is
+# DIGEST_MD5 and GSSAPI (Kerberos5)
+#
+# This is disabled by default, uncomment this to enable it.
+#listen_tcp = 1
+
+
+
+# Override the port for accepting secure TLS connections
+# This can be a port number, or service name
+#
+#tls_port = "16514"
+
+# Override the port for accepting insecure TCP connections
+# This can be a port number, or service name
+#
+#tcp_port = "16509"
+
+
+# Override the default configuration which binds to all network
+# interfaces. This can be a numeric IPv4/6 address, or hostname
+#
+#listen_addr = "192.168.0.1"
+
+
+# Flag toggling mDNS advertizement of the libvirt service.
+#
+# Alternatively can disable for all services on a host by
+# stopping the Avahi daemon
+#
+# This is disabled by default, uncomment this to enable it
+#mdns_adv = 1
+
+# Override the default mDNS advertizement name. This must be
+# unique on the immediate broadcast network.
+#
+# The default is "Virtualization Host HOSTNAME", where HOSTNAME
+# is subsituted for the short hostname of the machine (without domain)
+#
+#mdns_name = "Virtualization Host Joe Demo"
+
+
+#################################################################
+#
+# UNIX socket access controls
+#
+
+# Set the UNIX domain socket group ownership. This can be used to
+# allow a 'trusted' set of users access to management capabilities
+# without becoming root.
+#
+# This is restricted to 'root' by default.
+unix_sock_group = "{{ compute.get('libvirt_service_group', 'libvirtd') }}"
+
+# Set the UNIX socket permissions for the R/O socket. This is used
+# for monitoring VM status only
+#
+# Default allows any user. If setting group ownership may want to
+# restrict this to:
+#unix_sock_ro_perms = "0777"
+
+# Set the UNIX socket permissions for the R/W socket. This is used
+# for full management of VMs
+#
+# Default allows only root. If PolicyKit is enabled on the socket,
+# the default will change to allow everyone (eg, 0777)
+#
+# If not using PolicyKit and setting group ownership for access
+# control then you may want to relax this to:
+unix_sock_rw_perms = "0770"
+
+# Set the name of the directory in which sockets will be found/created.
+#unix_sock_dir = "/var/run/libvirt"
+
+#################################################################
+#
+# Authentication.
+#
+# - none: do not perform auth checks. If you can connect to the
+# socket you are allowed. This is suitable if there are
+# restrictions on connecting to the socket (eg, UNIX
+# socket permissions), or if there is a lower layer in
+# the network providing auth (eg, TLS/x509 certificates)
+#
+# - sasl: use SASL infrastructure. The actual auth scheme is then
+# controlled from /etc/sasl2/libvirt.conf. For the TCP
+# socket only GSSAPI & DIGEST-MD5 mechanisms will be used.
+# For non-TCP or TLS sockets, any scheme is allowed.
+#
+# - polkit: use PolicyKit to authenticate. This is only suitable
+# for use on the UNIX sockets. The default policy will
+# require a user to supply their own password to gain
+# full read/write access (aka sudo like), while anyone
+# is allowed read/only access.
+#
+# Set an authentication scheme for UNIX read-only sockets
+# By default socket permissions allow anyone to connect
+#
+# To restrict monitoring of domains you may wish to enable
+# an authentication mechanism here
+auth_unix_ro = "none"
+
+# Set an authentication scheme for UNIX read-write sockets
+# By default socket permissions only allow root. If PolicyKit
+# support was compiled into libvirt, the default will be to
+# use 'polkit' auth.
+#
+# If the unix_sock_rw_perms are changed you may wish to enable
+# an authentication mechanism here
+auth_unix_rw = "none"
+
+# Change the authentication scheme for TCP sockets.
+#
+# If you don't enable SASL, then all TCP traffic is cleartext.
+# Don't do this outside of a dev/test scenario. For real world
+# use, always enable SASL and use the GSSAPI or DIGEST-MD5
+# mechanism in /etc/sasl2/libvirt.conf
+#auth_tcp = "sasl"
+#auth_tcp = "none"
+
+# Change the authentication scheme for TLS sockets.
+#
+# TLS sockets already have encryption provided by the TLS
+# layer, and limited authentication is done by certificates
+#
+# It is possible to make use of any SASL authentication
+# mechanism as well, by using 'sasl' for this option
+#auth_tls = "none"
+
+
+
+#################################################################
+#
+# TLS x509 certificate configuration
+#
+
+
+# Override the default server key file path
+#
+#key_file = "/etc/pki/libvirt/private/serverkey.pem"
+
+# Override the default server certificate file path
+#
+#cert_file = "/etc/pki/libvirt/servercert.pem"
+
+# Override the default CA certificate path
+#
+#ca_file = "/etc/pki/CA/cacert.pem"
+
+# Specify a certificate revocation list.
+#
+# Defaults to not using a CRL, uncomment to enable it
+#crl_file = "/etc/pki/CA/crl.pem"
+
+
+
+#################################################################
+#
+# Authorization controls
+#
+
+
+# Flag to disable verification of our own server certificates
+#
+# When libvirtd starts it performs some sanity checks against
+# its own certificates.
+#
+# Default is to always run sanity checks. Uncommenting this
+# will disable sanity checks which is not a good idea
+#tls_no_sanity_certificate = 1
+
+# Flag to disable verification of client certificates
+#
+# Client certificate verification is the primary authentication mechanism.
+# Any client which does not present a certificate signed by the CA
+# will be rejected.
+#
+# Default is to always verify. Uncommenting this will disable
+# verification - make sure an IP whitelist is set
+#tls_no_verify_certificate = 1
+
+
+# A whitelist of allowed x509 Distinguished Names
+# This list may contain wildcards such as
+#
+# "C=GB,ST=London,L=London,O=Red Hat,CN=*"
+#
+# See the POSIX fnmatch function for the format of the wildcards.
+#
+# NB If this is an empty list, no client can connect, so comment out
+# entirely rather than using empty list to disable these checks
+#
+# By default, no DN's are checked
+#tls_allowed_dn_list = ["DN1", "DN2"]
+
+
+# A whitelist of allowed SASL usernames. The format for usernames
+# depends on the SASL authentication mechanism. Kerberos usernames
+# look like username@REALM
+#
+# This list may contain wildcards such as
+#
+# "*@EXAMPLE.COM"
+#
+# See the POSIX fnmatch function for the format of the wildcards.
+#
+# NB If this is an empty list, no client can connect, so comment out
+# entirely rather than using empty list to disable these checks
+#
+# By default, no Username's are checked
+#sasl_allowed_username_list = ["joe@EXAMPLE.COM", "fred@EXAMPLE.COM" ]
+
+
+
+#################################################################
+#
+# Processing controls
+#
+
+# The maximum number of concurrent client connections to allow
+# over all sockets combined.
+#max_clients = 20
+
+
+# The minimum limit sets the number of workers to start up
+# initially. If the number of active clients exceeds this,
+# then more threads are spawned, upto max_workers limit.
+# Typically you'd want max_workers to equal maximum number
+# of clients allowed
+#min_workers = 5
+#max_workers = 20
+
+
+# The number of priority workers. If all workers from above
+# pool will stuck, some calls marked as high priority
+# (notably domainDestroy) can be executed in this pool.
+#prio_workers = 5
+
+# Total global limit on concurrent RPC calls. Should be
+# at least as large as max_workers. Beyond this, RPC requests
+# will be read into memory and queued. This directly impact
+# memory usage, currently each request requires 256 KB of
+# memory. So by default upto 5 MB of memory is used
+#
+# XXX this isn't actually enforced yet, only the per-client
+# limit is used so far
+#max_requests = 20
+
+# Limit on concurrent requests from a single client
+# connection. To avoid one client monopolizing the server
+# this should be a small fraction of the global max_requests
+# and max_workers parameter
+#max_client_requests = 5
+
+#################################################################
+#
+# Logging controls
+#
+
+# Logging level: 4 errors, 3 warnings, 2 information, 1 debug
+# basically 1 will log everything possible
+#log_level = 3
+{%- if compute.libvirt.get('logging',{}).level is defined %}
+log_level = {{ compute.libvirt.logging.level }}
+{%- endif %}
+# Logging filters:
+# A filter allows to select a different logging level for a given category
+# of logs
+# The format for a filter is one of:
+# x:name
+# x:+name
+# where name is a string which is matched against source file name,
+# e.g., "remote", "qemu", or "util/json", the optional "+" prefix
+# tells libvirt to log stack trace for each message matching name,
+# and x is the minimal level where matching messages should be logged:
+# 1: DEBUG
+# 2: INFO
+# 3: WARNING
+# 4: ERROR
+#
+# Multiple filter can be defined in a single @filters, they just need to be
+# separated by spaces.
+#
+# e.g. to only get warning or errors from the remote layer and only errors
+# from the event layer:
+#log_filters="3:remote 4:event"
+{%- if compute.libvirt.get('logging',{}).filters is defined %}
+log_filters={{ compute.libvirt.logging.filters|yaml_dquote }}
+{%- endif %}
+
+# Logging outputs:
+# An output is one of the places to save logging information
+# The format for an output can be:
+# x:stderr
+# output goes to stderr
+# x:syslog:name
+# use syslog for the output and use the given name as the ident
+# x:file:file_path
+# output to a file, with the given filepath
+# In all case the x prefix is the minimal level, acting as a filter
+# 1: DEBUG
+# 2: INFO
+# 3: WARNING
+# 4: ERROR
+#
+# Multiple output can be defined, they just need to be separated by spaces.
+# e.g. to log all warnings and errors to syslog under the libvirtd ident:
+#log_outputs="3:syslog:libvirtd"
+#
+{%- if compute.libvirt.get('logging',{}).outputs is defined %}
+log_outputs={{ compute.libvirt.logging.outputs|yaml_dquote }}
+{%- endif %}
+
+# Log debug buffer size: default 64
+# The daemon keeps an internal debug log buffer which will be dumped in case
+# of crash or upon receiving a SIGUSR2 signal. This setting allows to override
+# the default buffer size in kilobytes.
+# If value is 0 or less the debug log buffer is deactivated
+#log_buffer_size = 64
+{%- if compute.libvirt.get('logging',{}).buffer_size is defined %}
+log_buffer_size = {{ compute.libvirt.logging.buffer_size }}
+{%- endif %}
+
+##################################################################
+#
+# Auditing
+#
+# This setting allows usage of the auditing subsystem to be altered:
+#
+# audit_level == 0 -> disable all auditing
+# audit_level == 1 -> enable auditing, only if enabled on host (default)
+# audit_level == 2 -> enable auditing, and exit if disabled on host
+#
+#audit_level = 2
+#
+# If set to 1, then audit messages will also be sent
+# via libvirt logging infrastructure. Defaults to 0
+#
+#audit_logging = 1
+
+###################################################################
+# UUID of the host:
+# Provide the UUID of the host here in case the command
+# 'dmidecode -s system-uuid' does not provide a valid uuid. In case
+# 'dmidecode' does not provide a valid UUID and none is provided here, a
+# temporary UUID will be generated.
+# Keep the format of the example UUID below. UUID must not have all digits
+# be the same.
+
+# NB This default all-zeros UUID will not work. Replace
+# it with the output of the 'uuidgen' command and then
+# uncomment this entry
+#host_uuid = "00000000-0000-0000-0000-000000000000"
+
+###################################################################
+# Keepalive protocol:
+# This allows libvirtd to detect broken client connections or even
+# dead client. A keepalive message is sent to a client after
+# keepalive_interval seconds of inactivity to check if the client is
+# still responding; keepalive_count is a maximum number of keepalive
+# messages that are allowed to be sent to the client without getting
+# any response before the connection is considered broken. In other
+# words, the connection is automatically closed approximately after
+# keepalive_interval * (keepalive_count + 1) seconds since the last
+# message received from the client. If keepalive_interval is set to
+# -1, libvirtd will never send keepalive requests; however clients
+# can still send them and the deamon will send responses. When
+# keepalive_count is set to 0, connections will be automatically
+# closed after keepalive_interval seconds of inactivity without
+# sending any keepalive messages.
+#
+#keepalive_interval = 5
+#keepalive_count = 5
+#
+# If set to 1, libvirtd will refuse to talk to clients that do not
+# support keepalive protocol. Defaults to 0.
+#
+#keepalive_required = 1
diff --git a/nova/files/queens/libvirtd.conf.RedHat b/nova/files/queens/libvirtd.conf.RedHat
new file mode 120000
index 0000000..2a7b101
--- /dev/null
+++ b/nova/files/queens/libvirtd.conf.RedHat
@@ -0,0 +1 @@
+libvirtd.conf.Debian
\ No newline at end of file
diff --git a/nova/files/queens/nova-compute.conf.Debian b/nova/files/queens/nova-compute.conf.Debian
new file mode 100644
index 0000000..a0bd6cb
--- /dev/null
+++ b/nova/files/queens/nova-compute.conf.Debian
@@ -0,0 +1,11242 @@
+{%- from "nova/map.jinja" import compute with context %}
+[DEFAULT]
+
+#
+# From nova.conf
+#
+compute_manager=nova.compute.manager.ComputeManager
+network_device_mtu=65000
+use_neutron = True
+security_group_api=neutron
+image_service=nova.image.glance.GlanceImageService
+
+#
+# Availability zone for internal services.
+#
+# This option determines the availability zone for the various
+# internal nova
+# services, such as 'nova-scheduler', 'nova-conductor', etc.
+#
+# Possible values:
+#
+# * Any string representing an existing availability zone name.
+# (string value)
+#internal_service_availability_zone = internal
+
+#
+# Default availability zone for compute services.
+#
+# This option determines the default availability zone for 'nova-
+# compute'
+# services, which will be used if the service(s) do not belong to
+# aggregates with
+# availability zone metadata.
+#
+# Possible values:
+#
+# * Any string representing an existing availability zone name.
+# (string value)
+#default_availability_zone = nova
+
+#
+# Default availability zone for instances.
+#
+# This option determines the default availability zone for instances,
+# which will
+# be used when a user does not specify one when creating an instance.
+# The
+# instance(s) will be bound to this availability zone for their
+# lifetime.
+#
+# Possible values:
+#
+# * Any string representing an existing availability zone name.
+# * None, which means that the instance can move from one availability
+# zone to
+# another during its lifetime if it is moved from one compute node
+# to another.
+# (string value)
+#default_schedule_zone = <None>
+
+# Length of generated instance admin passwords. (integer value)
+# Minimum value: 0
+#password_length = 12
+
+#
+# Time period to generate instance usages for. It is possible to
+# define optional
+# offset to given period by appending @ character followed by a number
+# defining
+# offset.
+#
+# Possible values:
+#
+# * period, example: ``hour``, ``day``, ``month` or ``year``
+# * period with offset, example: ``month@15`` will result in monthly
+# audits
+# starting on 15th day of month.
+# (string value)
+#instance_usage_audit_period = month
+{% if pillar.ceilometer is defined %}
+instance_usage_audit = True
+instance_usage_audit_period = hour
+{%- endif %}
+
+#
+# Start and use a daemon that can run the commands that need to be run
+# with
+# root privileges. This option is usually enabled on nodes that run
+# nova compute
+# processes.
+# (boolean value)
+#use_rootwrap_daemon = false
+
+#
+# Path to the rootwrap configuration file.
+#
+# Goal of the root wrapper is to allow a service-specific unprivileged
+# user to
+# run a number of actions as the root user in the safest manner
+# possible.
+# The configuration file used here must match the one defined in the
+# sudoers
+# entry.
+# (string value)
+rootwrap_config = /etc/nova/rootwrap.conf
+
+# Explicitly specify the temporary working directory. (string value)
+#tempdir = <None>
+
+# DEPRECATED:
+# Determine if monkey patching should be applied.
+#
+# Related options:
+#
+# * ``monkey_patch_modules``: This must have values set for this
+# option to
+# have any effect
+# (boolean value)
+# This option is deprecated for removal since 17.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# Monkey patching nova is not tested, not supported, and is a barrier
+# for interoperability.
+#monkey_patch = false
+
+# DEPRECATED:
+# List of modules/decorators to monkey patch.
+#
+# This option allows you to patch a decorator for all functions in
+# specified
+# modules.
+#
+# Possible values:
+#
+# * nova.compute.api:nova.notifications.notify_decorator
+# * [...]
+#
+# Related options:
+#
+# * ``monkey_patch``: This must be set to ``True`` for this option to
+# have any effect
+# (list value)
+# This option is deprecated for removal since 17.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# Monkey patching nova is not tested, not supported, and is a barrier
+# for interoperability.
+#monkey_patch_modules = nova.compute.api:nova.notifications.notify_decorator
+
+#
+# Defines which driver to use for controlling virtualization.
+#
+# Possible values:
+#
+# * ``libvirt.LibvirtDriver``
+# * ``xenapi.XenAPIDriver``
+# * ``fake.FakeDriver``
+# * ``ironic.IronicDriver``
+# * ``vmwareapi.VMwareVCDriver``
+# * ``hyperv.HyperVDriver``
+# * ``powervm.PowerVMDriver``
+# (string value)
+#compute_driver = <None>
+compute_driver = {{ compute.get('compute_driver', 'libvirt.LibvirtDriver') }}
+
+#
+# Allow destination machine to match source for resize. Useful when
+# testing in single-host environments. By default it is not allowed
+# to resize to the same host. Setting this option to true will add
+# the same host to the destination options. Also set to true
+# if you allow the ServerGroupAffinityFilter and need to resize.
+# (boolean value)
+#allow_resize_to_same_host = false
+allow_resize_to_same_host = true
+
+#
+# Image properties that should not be inherited from the instance
+# when taking a snapshot.
+#
+# This option gives an opportunity to select which image-properties
+# should not be inherited by newly created snapshots.
+#
+# Possible values:
+#
+# * A comma-separated list whose item is an image property. Usually
+# only
+# the image properties that are only needed by base images can be
+# included
+# here, since the snapshots that are created from the base images
+# don't
+# need them.
+# * Default list: cache_in_nova, bittorrent,
+# img_signature_hash_method,
+# img_signature, img_signature_key_type,
+# img_signature_certificate_uuid
+#
+# (list value)
+#non_inheritable_image_properties = cache_in_nova,bittorrent,img_signature_hash_method,img_signature,img_signature_key_type,img_signature_certificate_uuid
+
+# DEPRECATED:
+# When creating multiple instances with a single request using the
+# os-multiple-create API extension, this template will be used to
+# build
+# the display name for each instance. The benefit is that the
+# instances
+# end up with different hostnames. Example display names when creating
+# two VM's: name-1, name-2.
+#
+# Possible values:
+#
+# * Valid keys for the template are: name, uuid, count.
+# (string value)
+# This option is deprecated for removal since 15.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# This config changes API behaviour. All changes in API behaviour
+# should be
+# discoverable.
+#multi_instance_display_name_template = %(name)s-%(count)d
+
+#
+# Maximum number of devices that will result in a local image being
+# created on the hypervisor node.
+#
+# A negative number means unlimited. Setting max_local_block_devices
+# to 0 means that any request that attempts to create a local disk
+# will fail. This option is meant to limit the number of local discs
+# (so root local disc that is the result of --image being used, and
+# any other ephemeral and swap disks). 0 does not mean that images
+# will be automatically converted to volumes and boot instances from
+# volumes - it just means that all requests that attempt to create a
+# local disk will fail.
+#
+# Possible values:
+#
+# * 0: Creating a local disk is not allowed.
+# * Negative number: Allows unlimited number of local discs.
+# * Positive number: Allows only these many number of local discs.
+# (Default value is 3).
+# (integer value)
+#max_local_block_devices = 3
+
+#
+# A comma-separated list of monitors that can be used for getting
+# compute metrics. You can use the alias/name from the setuptools
+# entry points for nova.compute.monitors.* namespaces. If no
+# namespace is supplied, the "cpu." namespace is assumed for
+# backwards-compatibility.
+#
+# NOTE: Only one monitor per namespace (For example: cpu) can be
+# loaded at
+# a time.
+#
+# Possible values:
+#
+# * An empty list will disable the feature (Default).
+# * An example value that would enable both the CPU and NUMA memory
+# bandwidth monitors that use the virt driver variant:
+#
+# compute_monitors = cpu.virt_driver, numa_mem_bw.virt_driver
+# (list value)
+#compute_monitors =
+
+#
+# The default format an ephemeral_volume will be formatted with on
+# creation.
+#
+# Possible values:
+#
+# * ``ext2``
+# * ``ext3``
+# * ``ext4``
+# * ``xfs``
+# * ``ntfs`` (only for Windows guests)
+# (string value)
+#default_ephemeral_format = <None>
+
+#
+# Determine if instance should boot or fail on VIF plugging timeout.
+#
+# Nova sends a port update to Neutron after an instance has been
+# scheduled,
+# providing Neutron with the necessary information to finish setup of
+# the port.
+# Once completed, Neutron notifies Nova that it has finished setting
+# up the
+# port, at which point Nova resumes the boot of the instance since
+# network
+# connectivity is now supposed to be present. A timeout will occur if
+# the reply
+# is not received after a given interval.
+#
+# This option determines what Nova does when the VIF plugging timeout
+# event
+# happens. When enabled, the instance will error out. When disabled,
+# the
+# instance will continue to boot on the assumption that the port is
+# ready.
+#
+# Possible values:
+#
+# * True: Instances should fail after VIF plugging timeout
+# * False: Instances should continue booting after VIF plugging
+# timeout
+# (boolean value)
+vif_plugging_is_fatal = {{ compute.get('vif_plugging_is_fatal', 'true') }}
+
+#
+# Timeout for Neutron VIF plugging event message arrival.
+#
+# Number of seconds to wait for Neutron vif plugging events to
+# arrive before continuing or failing (see 'vif_plugging_is_fatal').
+#
+# Related options:
+#
+# * vif_plugging_is_fatal - If ``vif_plugging_timeout`` is set to zero
+# and
+# ``vif_plugging_is_fatal`` is False, events should not be expected
+# to
+# arrive at all.
+# (integer value)
+# Minimum value: 0
+vif_plugging_timeout = {{ compute.get('vif_plugging_timeout', '300') }}
+
+# Path to '/etc/network/interfaces' template.
+#
+# The path to a template file for the '/etc/network/interfaces'-style
+# file, which
+# will be populated by nova and subsequently used by cloudinit. This
+# provides a
+# method to configure network connectivity in environments without a
+# DHCP server.
+#
+# The template will be rendered using Jinja2 template engine, and
+# receive a
+# top-level key called ``interfaces``. This key will contain a list of
+# dictionaries, one for each interface.
+#
+# Refer to the cloudinit documentaion for more information:
+#
+# https://cloudinit.readthedocs.io/en/latest/topics/datasources.html
+#
+# Possible values:
+#
+# * A path to a Jinja2-formatted template for a Debian
+# '/etc/network/interfaces'
+# file. This applies even if using a non Debian-derived guest.
+#
+# Related options:
+#
+# * ``flat_inject``: This must be set to ``True`` to ensure nova
+# embeds network
+# configuration information in the metadata provided through the
+# config drive.
+# (string value)
+#injected_network_template = $pybasedir/nova/virt/interfaces.template
+
+#
+# The image preallocation mode to use.
+#
+# Image preallocation allows storage for instance images to be
+# allocated up front
+# when the instance is initially provisioned. This ensures immediate
+# feedback is
+# given if enough space isn't available. In addition, it should
+# significantly
+# improve performance on writes to new blocks and may even improve I/O
+# performance to prewritten blocks due to reduced fragmentation.
+#
+# Possible values:
+#
+# * "none" => no storage provisioning is done up front
+# * "space" => storage is fully allocated at instance start
+# (string value)
+# Possible values:
+# none - <No description provided>
+# space - <No description provided>
+#preallocate_images = none
+
+#
+# Enable use of copy-on-write (cow) images.
+#
+# QEMU/KVM allow the use of qcow2 as backing files. By disabling this,
+# backing files will not be used.
+# (boolean value)
+#use_cow_images = true
+{%- if compute.image is defined and compute.image.use_cow is defined %}
+use_cow_images = {{ compute.image.use_cow }}
+{%- endif %}
+
+#
+# Force conversion of backing images to raw format.
+#
+# Possible values:
+#
+# * True: Backing image files will be converted to raw image format
+# * False: Backing image files will not be converted
+#
+# Related options:
+#
+# * ``compute_driver``: Only the libvirt driver uses this option.
+# (boolean value)
+#force_raw_images = true
+force_raw_images={{ compute.get('image', {}).get('force_raw', True)|lower }}
+
+#
+# Name of the mkfs commands for ephemeral device.
+#
+# The format is <os_type>=<mkfs command>
+# (multi valued)
+#virt_mkfs =
+
+#
+# Enable resizing of filesystems via a block device.
+#
+# If enabled, attempt to resize the filesystem by accessing the image
+# over a
+# block device. This is done by the host and may not be necessary if
+# the image
+# contains a recent version of cloud-init. Possible mechanisms require
+# the nbd
+# driver (for qcow and raw), or loop (for raw).
+# (boolean value)
+#resize_fs_using_block_device = false
+
+# Amount of time, in seconds, to wait for NBD device start up.
+# (integer value)
+# Minimum value: 0
+#timeout_nbd = 10
+
+#
+# Location of cached images.
+#
+# This is NOT the full path - just a folder name relative to
+# '$instances_path'.
+# For per-compute-host cached images, set to '_base_$my_ip'
+# (string value)
+#image_cache_subdirectory_name = _base
+
+# Should unused base images be removed? (boolean value)
+#remove_unused_base_images = true
+
+#
+# Unused unresized base images younger than this will not be removed.
+# (integer value)
+remove_unused_original_minimum_age_seconds = 86400
+
+#
+# Generic property to specify the pointer type.
+#
+# Input devices allow interaction with a graphical framebuffer. For
+# example to provide a graphic tablet for absolute cursor movement.
+#
+# If set, the 'hw_pointer_model' image property takes precedence over
+# this configuration option.
+#
+# Possible values:
+#
+# * None: Uses default behavior provided by drivers (mouse on PS2 for
+# libvirt x86)
+# * ps2mouse: Uses relative movement. Mouse connected by PS2
+# * usbtablet: Uses absolute movement. Tablet connect by USB
+#
+# Related options:
+#
+# * usbtablet must be configured with VNC enabled or SPICE enabled and
+# SPICE
+# agent disabled. When used with libvirt the instance mode should be
+# configured as HVM.
+# (string value)
+# Possible values:
+# <None> - <No description provided>
+# ps2mouse - <No description provided>
+# usbtablet - <No description provided>
+#pointer_model = usbtablet
+
+#
+# Defines which physical CPUs (pCPUs) can be used by instance
+# virtual CPUs (vCPUs).
+#
+# Possible values:
+#
+# * A comma-separated list of physical CPU numbers that virtual CPUs
+# can be
+# allocated to by default. Each element should be either a single
+# CPU number,
+# a range of CPU numbers, or a caret followed by a CPU number to be
+# excluded from a previous range. For example:
+#
+# vcpu_pin_set = "4-12,^8,15"
+# (string value)
+#vcpu_pin_set = <None>
+{%- if compute.vcpu_pin_set is defined %}
+vcpu_pin_set={{ compute.vcpu_pin_set }}
+{%- endif %}
+
+#
+# Number of huge/large memory pages to reserved per NUMA host cell.
+#
+# Possible values:
+#
+# * A list of valid key=value which reflect NUMA node ID, page size
+# (Default unit is KiB) and number of pages to be reserved.
+#
+# reserved_huge_pages = node:0,size:2048,count:64
+# reserved_huge_pages = node:1,size:1GB,count:1
+#
+# In this example we are reserving on NUMA node 0 64 pages of 2MiB
+# and on NUMA node 1 1 page of 1GiB.
+# (dict value)
+#reserved_huge_pages = <None>
+
+#
+# Amount of disk resources in MB to make them always available to
+# host. The
+# disk usage gets reported back to the scheduler from nova-compute
+# running
+# on the compute nodes. To prevent the disk resources from being
+# considered
+# as available, this option can be used to reserve disk space for that
+# host.
+#
+# Possible values:
+#
+# * Any positive integer representing amount of disk in MB to reserve
+# for the host.
+# (integer value)
+# Minimum value: 0
+#reserved_host_disk_mb = 0
+
+#
+# Amount of memory in MB to reserve for the host so that it is always
+# available
+# to host processes. The host resources usage is reported back to the
+# scheduler
+# continuously from nova-compute running on the compute node. To
+# prevent the host
+# memory from being considered as available, this option is used to
+# reserve
+# memory for the host.
+#
+# Possible values:
+#
+# * Any positive integer representing amount of memory in MB to
+# reserve
+# for the host.
+# (integer value)
+# Minimum value: 0
+#reserved_host_memory_mb = 512
+reserved_host_memory_mb = {{ compute.get('reserved_host_memory_mb', '512') }}
+
+#
+# Number of physical CPUs to reserve for the host. The host resources
+# usage is
+# reported back to the scheduler continuously from nova-compute
+# running on the
+# compute node. To prevent the host CPU from being considered as
+# available,
+# this option is used to reserve random pCPU(s) for the host.
+#
+# Possible values:
+#
+# * Any positive integer representing number of physical CPUs to
+# reserve
+# for the host.
+# (integer value)
+# Minimum value: 0
+#reserved_host_cpus = 0
+
+#
+# This option helps you specify virtual CPU to physical CPU allocation
+# ratio.
+#
+# From Ocata (15.0.0) this is used to influence the hosts selected by
+# the Placement API. Note that when Placement is used, the CoreFilter
+# is redundant, because the Placement API will have already filtered
+# out hosts that would have failed the CoreFilter.
+#
+# This configuration specifies ratio for CoreFilter which can be set
+# per compute node. For AggregateCoreFilter, it will fall back to this
+# configuration value if no per-aggregate setting is found.
+#
+# NOTE: This can be set per-compute, or if set to 0.0, the value
+# set on the scheduler node(s) or compute node(s) will be used
+# and defaulted to 16.0.
+#
+# NOTE: As of the 16.0.0 Pike release, this configuration option is
+# ignored
+# for the ironic.IronicDriver compute driver and is hardcoded to 1.0.
+#
+# Possible values:
+#
+# * Any valid positive integer or float value
+# (floating point value)
+# Minimum value: 0
+#cpu_allocation_ratio = 0.0
+{%- if compute.cpu_allocation_ratio is defined %}
+cpu_allocation_ratio = {{ compute.cpu_allocation_ratio }}
+{%- else %}
+#cpu_allocation_ratio=0.0
+{%- endif %}
+
+#
+# This option helps you specify virtual RAM to physical RAM
+# allocation ratio.
+#
+# From Ocata (15.0.0) this is used to influence the hosts selected by
+# the Placement API. Note that when Placement is used, the RamFilter
+# is redundant, because the Placement API will have already filtered
+# out hosts that would have failed the RamFilter.
+#
+# This configuration specifies ratio for RamFilter which can be set
+# per compute node. For AggregateRamFilter, it will fall back to this
+# configuration value if no per-aggregate setting found.
+#
+# NOTE: This can be set per-compute, or if set to 0.0, the value
+# set on the scheduler node(s) or compute node(s) will be used and
+# defaulted to 1.5.
+#
+# NOTE: As of the 16.0.0 Pike release, this configuration option is
+# ignored
+# for the ironic.IronicDriver compute driver and is hardcoded to 1.0.
+#
+# Possible values:
+#
+# * Any valid positive integer or float value
+# (floating point value)
+# Minimum value: 0
+#ram_allocation_ratio = 0.0
+{%- if compute.ram_allocation_ratio is defined %}
+ram_allocation_ratio = {{ compute.ram_allocation_ratio }}
+{%- else %}
+#ram_allocation_ratio=0.0
+{%- endif %}
+
+#
+# This option helps you specify virtual disk to physical disk
+# allocation ratio.
+#
+# From Ocata (15.0.0) this is used to influence the hosts selected by
+# the Placement API. Note that when Placement is used, the DiskFilter
+# is redundant, because the Placement API will have already filtered
+# out hosts that would have failed the DiskFilter.
+#
+# A ratio greater than 1.0 will result in over-subscription of the
+# available physical disk, which can be useful for more
+# efficiently packing instances created with images that do not
+# use the entire virtual disk, such as sparse or compressed
+# images. It can be set to a value between 0.0 and 1.0 in order
+# to preserve a percentage of the disk for uses other than
+# instances.
+#
+# NOTE: This can be set per-compute, or if set to 0.0, the value
+# set on the scheduler node(s) or compute node(s) will be used and
+# defaulted to 1.0.
+#
+# NOTE: As of the 16.0.0 Pike release, this configuration option is
+# ignored
+# for the ironic.IronicDriver compute driver and is hardcoded to 1.0.
+#
+# Possible values:
+#
+# * Any valid positive integer or float value
+# (floating point value)
+# Minimum value: 0
+#disk_allocation_ratio = 0.0
+
+#
+# Console proxy host to be used to connect to instances on this host.
+# It is the
+# publicly visible name for the console host.
+#
+# Possible values:
+#
+# * Current hostname (default) or any string representing hostname.
+# (string value)
+#console_host = <current_hostname>
+
+#
+# Name of the network to be used to set access IPs for instances. If
+# there are
+# multiple IPs to choose from, an arbitrary one will be chosen.
+#
+# Possible values:
+#
+# * None (default)
+# * Any string representing network name.
+# (string value)
+#default_access_ip_network_name = <None>
+
+#
+# Whether to batch up the application of IPTables rules during a host
+# restart
+# and apply all at the end of the init phase.
+# (boolean value)
+#defer_iptables_apply = false
+
+#
+# Specifies where instances are stored on the hypervisor's disk.
+# It can point to locally attached storage or a directory on NFS.
+#
+# Possible values:
+#
+# * $state_path/instances where state_path is a config option that
+# specifies
+# the top-level directory for maintaining nova's state. (default) or
+# Any string representing directory path.
+# (string value)
+instances_path = {{ compute.instances_path }}
+
+#
+# This option enables periodic compute.instance.exists notifications.
+# Each
+# compute node must be configured to generate system usage data. These
+# notifications are consumed by OpenStack Telemetry service.
+# (boolean value)
+#instance_usage_audit = false
+
+#
+# Maximum number of 1 second retries in live_migration. It specifies
+# number
+# of retries to iptables when it complains. It happens when an user
+# continuously
+# sends live-migration request to same host leading to concurrent
+# request
+# to iptables.
+#
+# Possible values:
+#
+# * Any positive integer representing retry count.
+# (integer value)
+# Minimum value: 0
+#live_migration_retry_count = 30
+
+#
+# This option specifies whether to start guests that were running
+# before the
+# host rebooted. It ensures that all of the instances on a Nova
+# compute node
+# resume their state each time the compute node boots or restarts.
+# (boolean value)
+resume_guests_state_on_host_boot = {{ compute.get('resume_guests_state_on_host_boot', True) }}
+
+#
+# Number of times to retry network allocation. It is required to
+# attempt network
+# allocation retries if the virtual interface plug fails.
+#
+# Possible values:
+#
+# * Any positive integer representing retry count.
+# (integer value)
+# Minimum value: 0
+#network_allocate_retries = 0
+
+#
+# Limits the maximum number of instance builds to run concurrently by
+# nova-compute. Compute service can attempt to build an infinite
+# number of
+# instances, if asked to do so. This limit is enforced to avoid
+# building
+# unlimited instance concurrently on a compute node. This value can be
+# set
+# per compute node.
+#
+# Possible Values:
+#
+# * 0 : treated as unlimited.
+# * Any positive integer representing maximum concurrent builds.
+# (integer value)
+# Minimum value: 0
+#max_concurrent_builds = 10
+
+#
+# Maximum number of live migrations to run concurrently. This limit is
+# enforced
+# to avoid outbound live migrations overwhelming the host/network and
+# causing
+# failures. It is not recommended that you change this unless you are
+# very sure
+# that doing so is safe and stable in your environment.
+#
+# Possible values:
+#
+# * 0 : treated as unlimited.
+# * Negative value defaults to 0.
+# * Any positive integer representing maximum number of live
+# migrations
+# to run concurrently.
+# (integer value)
+#max_concurrent_live_migrations = 1
+{%- if compute.max_concurrent_live_migrations is defined %}
+max_concurrent_live_migrations = {{ compute.max_concurrent_live_migrations }}
+{%- endif %}
+
+#
+# Number of times to retry block device allocation on failures.
+# Starting with
+# Liberty, Cinder can use image volume cache. This may help with block
+# device
+# allocation performance. Look at the cinder
+# image_volume_cache_enabled
+# configuration option.
+#
+# Possible values:
+#
+# * 60 (default)
+# * If value is 0, then one attempt is made.
+# * Any negative value is treated as 0.
+# * For any value > 0, total attempts are (value + 1)
+# (integer value)
+block_device_allocate_retries = {{ compute.get('block_device_allocate_retries', '600') }}
+
+#
+# Number of greenthreads available for use to sync power states.
+#
+# This option can be used to reduce the number of concurrent requests
+# made to the hypervisor or system with real instance power states
+# for performance reasons, for example, with Ironic.
+#
+# Possible values:
+#
+# * Any positive integer representing greenthreads count.
+# (integer value)
+#sync_power_state_pool_size = 1000
+
+#
+# Number of seconds to wait between runs of the image cache manager.
+#
+# Possible values:
+# * 0: run at the default rate.
+# * -1: disable
+# * Any other value
+# (integer value)
+# Minimum value: -1
+image_cache_manager_interval = 0
+
+#
+# Interval to pull network bandwidth usage info.
+#
+# Not supported on all hypervisors. If a hypervisor doesn't support
+# bandwidth
+# usage, it will not get the info in the usage events.
+#
+# Possible values:
+#
+# * 0: Will run at the default periodic interval.
+# * Any value < 0: Disables the option.
+# * Any positive integer in seconds.
+# (integer value)
+#bandwidth_poll_interval = 600
+
+#
+# Interval to sync power states between the database and the
+# hypervisor.
+#
+# The interval that Nova checks the actual virtual machine power state
+# and the power state that Nova has in its database. If a user powers
+# down their VM, Nova updates the API to report the VM has been
+# powered down. Should something turn on the VM unexpectedly,
+# Nova will turn the VM back off to keep the system in the expected
+# state.
+#
+# Possible values:
+#
+# * 0: Will run at the default periodic interval.
+# * Any value < 0: Disables the option.
+# * Any positive integer in seconds.
+#
+# Related options:
+#
+# * If ``handle_virt_lifecycle_events`` in workarounds_group is
+# false and this option is negative, then instances that get out
+# of sync between the hypervisor and the Nova database will have
+# to be synchronized manually.
+# (integer value)
+#sync_power_state_interval = 600
+
+#
+# Interval between instance network information cache updates.
+#
+# Number of seconds after which each compute node runs the task of
+# querying Neutron for all of its instances networking information,
+# then updates the Nova db with that information. Nova will never
+# update it's cache if this option is set to 0. If we don't update the
+# cache, the metadata service and nova-api endpoints will be proxying
+# incorrect network data about the instance. So, it is not recommended
+# to set this option to 0.
+#
+# Possible values:
+#
+# * Any positive integer in seconds.
+# * Any value <=0 will disable the sync. This is not recommended.
+# (integer value)
+#heal_instance_info_cache_interval = 60
+heal_instance_info_cache_interval = {{ compute.heal_instance_info_cache_interval }}
+
+#
+# Interval for reclaiming deleted instances.
+#
+# A value greater than 0 will enable SOFT_DELETE of instances.
+# This option decides whether the server to be deleted will be put
+# into
+# the SOFT_DELETED state. If this value is greater than 0, the deleted
+# server will not be deleted immediately, instead it will be put into
+# a queue until it's too old (deleted time greater than the value of
+# reclaim_instance_interval). The server can be recovered from the
+# delete queue by using the restore action. If the deleted server
+# remains
+# longer than the value of reclaim_instance_interval, it will be
+# deleted by a periodic task in the compute service automatically.
+#
+# Note that this option is read from both the API and compute nodes,
+# and
+# must be set globally otherwise servers could be put into a soft
+# deleted
+# state in the API and never actually reclaimed (deleted) on the
+# compute
+# node.
+#
+# Possible values:
+#
+# * Any positive integer(in seconds) greater than 0 will enable
+# this option.
+# * Any value <=0 will disable the option.
+# (integer value)
+#reclaim_instance_interval = 0
+
+#
+# Interval for gathering volume usages.
+#
+# This option updates the volume usage cache for every
+# volume_usage_poll_interval number of seconds.
+#
+# Possible values:
+#
+# * Any positive integer(in seconds) greater than 0 will enable
+# this option.
+# * Any value <=0 will disable the option.
+# (integer value)
+#volume_usage_poll_interval = 0
+
+#
+# Interval for polling shelved instances to offload.
+#
+# The periodic task runs for every shelved_poll_interval number
+# of seconds and checks if there are any shelved instances. If it
+# finds a shelved instance, based on the 'shelved_offload_time' config
+# value it offloads the shelved instances. Check
+# 'shelved_offload_time'
+# config option description for details.
+#
+# Possible values:
+#
+# * Any value <= 0: Disables the option.
+# * Any positive integer in seconds.
+#
+# Related options:
+#
+# * ``shelved_offload_time``
+# (integer value)
+#shelved_poll_interval = 3600
+
+#
+# Time before a shelved instance is eligible for removal from a host.
+#
+# By default this option is set to 0 and the shelved instance will be
+# removed from the hypervisor immediately after shelve operation.
+# Otherwise, the instance will be kept for the value of
+# shelved_offload_time(in seconds) so that during the time period the
+# unshelve action will be faster, then the periodic task will remove
+# the instance from hypervisor after shelved_offload_time passes.
+#
+# Possible values:
+#
+# * 0: Instance will be immediately offloaded after being
+# shelved.
+# * Any value < 0: An instance will never offload.
+# * Any positive integer in seconds: The instance will exist for
+# the specified number of seconds before being offloaded.
+# (integer value)
+#shelved_offload_time = 0
+
+#
+# Interval for retrying failed instance file deletes.
+#
+# This option depends on 'maximum_instance_delete_attempts'.
+# This option specifies how often to retry deletes whereas
+# 'maximum_instance_delete_attempts' specifies the maximum number
+# of retry attempts that can be made.
+#
+# Possible values:
+#
+# * 0: Will run at the default periodic interval.
+# * Any value < 0: Disables the option.
+# * Any positive integer in seconds.
+#
+# Related options:
+#
+# * ``maximum_instance_delete_attempts`` from instance_cleaning_opts
+# group.
+# (integer value)
+#instance_delete_interval = 300
+
+#
+# Interval (in seconds) between block device allocation retries on
+# failures.
+#
+# This option allows the user to specify the time interval between
+# consecutive retries. 'block_device_allocate_retries' option
+# specifies
+# the maximum number of retries.
+#
+# Possible values:
+#
+# * 0: Disables the option.
+# * Any positive integer in seconds enables the option.
+#
+# Related options:
+#
+# * ``block_device_allocate_retries`` in compute_manager_opts group.
+# (integer value)
+# Minimum value: 0
+block_device_allocate_retries_interval = {{ compute.get('block_device_allocate_retries_interval', '10') }}
+
+#
+# Interval between sending the scheduler a list of current instance
+# UUIDs to
+# verify that its view of instances is in sync with nova.
+#
+# If the CONF option 'scheduler_tracks_instance_changes' is
+# False, the sync calls will not be made. So, changing this option
+# will
+# have no effect.
+#
+# If the out of sync situations are not very common, this interval
+# can be increased to lower the number of RPC messages being sent.
+# Likewise, if sync issues turn out to be a problem, the interval
+# can be lowered to check more frequently.
+#
+# Possible values:
+#
+# * 0: Will run at the default periodic interval.
+# * Any value < 0: Disables the option.
+# * Any positive integer in seconds.
+#
+# Related options:
+#
+# * This option has no impact if ``scheduler_tracks_instance_changes``
+# is set to False.
+# (integer value)
+#scheduler_instance_sync_interval = 120
+
+#
+# Interval for updating compute resources.
+#
+# This option specifies how often the update_available_resources
+# periodic task should run. A number less than 0 means to disable the
+# task completely. Leaving this at the default of 0 will cause this to
+# run at the default periodic interval. Setting it to any positive
+# value will cause it to run at approximately that number of seconds.
+#
+# Possible values:
+#
+# * 0: Will run at the default periodic interval.
+# * Any value < 0: Disables the option.
+# * Any positive integer in seconds.
+# (integer value)
+#update_resources_interval = 0
+
+#
+# Time interval after which an instance is hard rebooted
+# automatically.
+#
+# When doing a soft reboot, it is possible that a guest kernel is
+# completely hung in a way that causes the soft reboot task
+# to not ever finish. Setting this option to a time period in seconds
+# will automatically hard reboot an instance if it has been stuck
+# in a rebooting state longer than N seconds.
+#
+# Possible values:
+#
+# * 0: Disables the option (default).
+# * Any positive integer in seconds: Enables the option.
+# (integer value)
+# Minimum value: 0
+#reboot_timeout = 0
+
+#
+# Maximum time in seconds that an instance can take to build.
+#
+# If this timer expires, instance status will be changed to ERROR.
+# Enabling this option will make sure an instance will not be stuck
+# in BUILD state for a longer period.
+#
+# Possible values:
+#
+# * 0: Disables the option (default)
+# * Any positive integer in seconds: Enables the option.
+# (integer value)
+# Minimum value: 0
+#instance_build_timeout = 0
+
+#
+# Interval to wait before un-rescuing an instance stuck in RESCUE.
+#
+# Possible values:
+#
+# * 0: Disables the option (default)
+# * Any positive integer in seconds: Enables the option.
+# (integer value)
+# Minimum value: 0
+#rescue_timeout = 0
+
+#
+# Automatically confirm resizes after N seconds.
+#
+# Resize functionality will save the existing server before resizing.
+# After the resize completes, user is requested to confirm the resize.
+# The user has the opportunity to either confirm or revert all
+# changes. Confirm resize removes the original server and changes
+# server status from resized to active. Setting this option to a time
+# period (in seconds) will automatically confirm the resize if the
+# server is in resized state longer than that time.
+#
+# Possible values:
+#
+# * 0: Disables the option (default)
+# * Any positive integer in seconds: Enables the option.
+# (integer value)
+# Minimum value: 0
+#resize_confirm_window = 0
+
+#
+# Total time to wait in seconds for an instance toperform a clean
+# shutdown.
+#
+# It determines the overall period (in seconds) a VM is allowed to
+# perform a clean shutdown. While performing stop, rescue and shelve,
+# rebuild operations, configuring this option gives the VM a chance
+# to perform a controlled shutdown before the instance is powered off.
+# The default timeout is 60 seconds.
+#
+# The timeout value can be overridden on a per image basis by means
+# of os_shutdown_timeout that is an image metadata setting allowing
+# different types of operating systems to specify how much time they
+# need to shut down cleanly.
+#
+# Possible values:
+#
+# * Any positive integer in seconds (default value is 60).
+# (integer value)
+# Minimum value: 1
+#shutdown_timeout = 60
+
+#
+# The compute service periodically checks for instances that have been
+# deleted in the database but remain running on the compute node. The
+# above option enables action to be taken when such instances are
+# identified.
+#
+# Possible values:
+#
+# * reap: Powers down the instances and deletes them(default)
+# * log: Logs warning message about deletion of the resource
+# * shutdown: Powers down instances and marks them as non-
+# bootable which can be later used for debugging/analysis
+# * noop: Takes no action
+#
+# Related options:
+#
+# * running_deleted_instance_poll_interval
+# * running_deleted_instance_timeout
+# (string value)
+# Possible values:
+# noop - <No description provided>
+# log - <No description provided>
+# shutdown - <No description provided>
+# reap - <No description provided>
+#running_deleted_instance_action = reap
+
+#
+# Time interval in seconds to wait between runs for the clean up
+# action.
+# If set to 0, above check will be disabled. If
+# "running_deleted_instance
+# _action" is set to "log" or "reap", a value greater than 0 must be
+# set.
+#
+# Possible values:
+#
+# * Any positive integer in seconds enables the option.
+# * 0: Disables the option.
+# * 1800: Default value.
+#
+# Related options:
+#
+# * running_deleted_instance_action
+# (integer value)
+#running_deleted_instance_poll_interval = 1800
+
+#
+# Time interval in seconds to wait for the instances that have
+# been marked as deleted in database to be eligible for cleanup.
+#
+# Possible values:
+#
+# * Any positive integer in seconds(default is 0).
+#
+# Related options:
+#
+# * "running_deleted_instance_action"
+# (integer value)
+#running_deleted_instance_timeout = 0
+
+#
+# The number of times to attempt to reap an instance's files.
+#
+# This option specifies the maximum number of retry attempts
+# that can be made.
+#
+# Possible values:
+#
+# * Any positive integer defines how many attempts are made.
+# * Any value <=0 means no delete attempts occur, but you should use
+# ``instance_delete_interval`` to disable the delete attempts.
+#
+# Related options:
+# * ``instance_delete_interval`` in interval_opts group can be used to
+# disable
+# this option.
+# (integer value)
+#maximum_instance_delete_attempts = 5
+
+#
+# Sets the scope of the check for unique instance names.
+#
+# The default doesn't check for unique names. If a scope for the name
+# check is
+# set, a launch of a new instance or an update of an existing instance
+# with a
+# duplicate name will result in an ''InstanceExists'' error. The
+# uniqueness is
+# case-insensitive. Setting this option can increase the usability for
+# end
+# users as they don't have to distinguish among instances with the
+# same name
+# by their IDs.
+#
+# Possible values:
+#
+# * '': An empty value means that no uniqueness check is done and
+# duplicate
+# names are possible.
+# * "project": The instance name check is done only for instances
+# within the
+# same project.
+# * "global": The instance name check is done for all instances
+# regardless of
+# the project.
+# (string value)
+# Possible values:
+# '' - <No description provided>
+# project - <No description provided>
+# global - <No description provided>
+#osapi_compute_unique_server_name_scope =
+
+#
+# Enable new nova-compute services on this host automatically.
+#
+# When a new nova-compute service starts up, it gets
+# registered in the database as an enabled service. Sometimes it can
+# be useful
+# to register new compute services in disabled state and then enabled
+# them at a
+# later point in time. This option only sets this behavior for nova-
+# compute
+# services, it does not auto-disable other services like nova-
+# conductor,
+# nova-scheduler, nova-consoleauth, or nova-osapi_compute.
+#
+# Possible values:
+#
+# * ``True``: Each new compute service is enabled as soon as it
+# registers itself.
+# * ``False``: Compute services must be enabled via an os-services
+# REST API call
+# or with the CLI with ``nova service-enable <hostname> <binary>``,
+# otherwise
+# they are not ready to use.
+# (boolean value)
+#enable_new_services = true
+
+#
+# Template string to be used to generate instance names.
+#
+# This template controls the creation of the database name of an
+# instance. This
+# is *not* the display name you enter when creating an instance (via
+# Horizon
+# or CLI). For a new deployment it is advisable to change the default
+# value
+# (which uses the database autoincrement) to another value which makes
+# use
+# of the attributes of an instance, like ``instance-%(uuid)s``. If you
+# already have instances in your deployment when you change this, your
+# deployment will break.
+#
+# Possible values:
+#
+# * A string which either uses the instance database ID (like the
+# default)
+# * A string with a list of named database columns, for example
+# ``%(id)d``
+# or ``%(uuid)s`` or ``%(hostname)s``.
+#
+# Related options:
+#
+# * not to be confused with: ``multi_instance_display_name_template``
+# (string value)
+#instance_name_template = instance-%08x
+
+#
+# Number of times to retry live-migration before failing.
+#
+# Possible values:
+#
+# * If == -1, try until out of hosts (default)
+# * If == 0, only try once, no retries
+# * Integer greater than 0
+# (integer value)
+# Minimum value: -1
+#migrate_max_retries = -1
+
+#
+# Configuration drive format
+#
+# Configuration drive format that will contain metadata attached to
+# the
+# instance when it boots.
+#
+# Possible values:
+#
+# * iso9660: A file system image standard that is widely supported
+# across
+# operating systems. NOTE: Mind the libvirt bug
+# (https://bugs.launchpad.net/nova/+bug/1246201) - If your
+# hypervisor
+# driver is libvirt, and you want live migrate to work without
+# shared storage,
+# then use VFAT.
+# * vfat: For legacy reasons, you can configure the configuration
+# drive to
+# use VFAT format instead of ISO 9660.
+#
+# Related options:
+#
+# * This option is meaningful when one of the following alternatives
+# occur:
+# 1. force_config_drive option set to 'true'
+# 2. the REST API call to create the instance contains an enable
+# flag for
+# config drive option
+# 3. the image used to create the instance requires a config drive,
+# this is defined by img_config_drive property for that image.
+# * A compute node running Hyper-V hypervisor can be configured to
+# attach
+# configuration drive as a CD drive. To attach the configuration
+# drive as a CD
+# drive, set config_drive_cdrom option at hyperv section, to true.
+# (string value)
+# Possible values:
+# iso9660 - <No description provided>
+# vfat - <No description provided>
+#config_drive_format = iso9660
+config_drive_format={{ compute.get('config_drive_format', compute.get('config_drive', {}).get('format', 'vfat')) }}
+
+#
+# Force injection to take place on a config drive
+#
+# When this option is set to true configuration drive functionality
+# will be
+# forced enabled by default, otherwise user can still enable
+# configuration
+# drives via the REST API or image metadata properties.
+#
+# Possible values:
+#
+# * True: Force to use of configuration drive regardless the user's
+# input in the
+# REST API call.
+# * False: Do not force use of configuration drive. Config drives can
+# still be
+# enabled via the REST API or image metadata properties.
+#
+# Related options:
+#
+# * Use the 'mkisofs_cmd' flag to set the path where you install the
+# genisoimage program. If genisoimage is in same path as the
+# nova-compute service, you do not need to set this flag.
+# * To use configuration drive with Hyper-V, you must set the
+# 'mkisofs_cmd' value to the full path to an mkisofs.exe
+# installation.
+# Additionally, you must set the qemu_img_cmd value in the hyperv
+# configuration section to the full path to an qemu-img command
+# installation.
+# (boolean value)
+#force_config_drive = false
+force_config_drive={{ compute.get('config_drive', {}).get('forced', True)|lower }}
+
+#
+# Name or path of the tool used for ISO image creation
+#
+# Use the mkisofs_cmd flag to set the path where you install the
+# genisoimage
+# program. If genisoimage is on the system path, you do not need to
+# change
+# the default value.
+#
+# To use configuration drive with Hyper-V, you must set the
+# mkisofs_cmd value
+# to the full path to an mkisofs.exe installation. Additionally, you
+# must set
+# the qemu_img_cmd value in the hyperv configuration section to the
+# full path
+# to an qemu-img command installation.
+#
+# Possible values:
+#
+# * Name of the ISO image creator program, in case it is in the same
+# directory
+# as the nova-compute service
+# * Path to ISO image creator program
+#
+# Related options:
+#
+# * This option is meaningful when config drives are enabled.
+# * To use configuration drive with Hyper-V, you must set the
+# qemu_img_cmd
+# value in the hyperv configuration section to the full path to an
+# qemu-img
+# command installation.
+# (string value)
+#mkisofs_cmd = genisoimage
+
+# DEPRECATED: The driver to use for database access (string value)
+# This option is deprecated for removal since 13.0.0.
+# Its value may be silently ignored in the future.
+#db_driver = nova.db
+
+# DEPRECATED:
+# Default flavor to use for the EC2 API only.
+# The Nova API does not support a default flavor.
+# (string value)
+# This option is deprecated for removal since 14.0.0.
+# Its value may be silently ignored in the future.
+# Reason: The EC2 API is deprecated.
+#default_flavor = m1.small
+
+#
+# The IP address which the host is using to connect to the management
+# network.
+#
+# Possible values:
+#
+# * String with valid IP address. Default is IPv4 address of this
+# host.
+#
+# Related options:
+#
+# * metadata_host
+# * my_block_storage_ip
+# * routing_source_ip
+# * vpn_ip
+# (string value)
+#my_ip = <host_ipv4>
+{%- if compute.my_ip is defined %}
+my_ip={{ compute.my_ip }}
+{%- endif %}
+
+#
+# The IP address which is used to connect to the block storage
+# network.
+#
+# Possible values:
+#
+# * String with valid IP address. Default is IP address of this host.
+#
+# Related options:
+#
+# * my_ip - if my_block_storage_ip is not set, then my_ip value is
+# used.
+# (string value)
+#my_block_storage_ip = $my_ip
+
+#
+# Hostname, FQDN or IP address of this host.
+#
+# Used as:
+#
+# * the oslo.messaging queue name for nova-compute worker
+# * we use this value for the binding_host sent to neutron. This means
+# if you use
+# a neutron agent, it should have the same value for host.
+# * cinder host attachment information
+#
+# Must be valid within AMQP key.
+#
+# Possible values:
+#
+# * String with hostname, FQDN or IP address. Default is hostname of
+# this host.
+# (string value)
+#host = <current_hostname>
+{%- if compute.host is defined %}
+host={{ compute.host }}
+{%- endif %}
+
+# DEPRECATED:
+# This option is a list of full paths to one or more configuration
+# files for
+# dhcpbridge. In most cases the default path of '/etc/nova/nova-
+# dhcpbridge.conf'
+# should be sufficient, but if you have special needs for configuring
+# dhcpbridge,
+# you can change or add to this list.
+#
+# Possible values
+#
+# * A list of strings, where each string is the full path to a
+# dhcpbridge
+# configuration file.
+# (multi valued)
+# This option is deprecated for removal since 16.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# nova-network is deprecated, as are any related configuration
+# options.
+#dhcpbridge_flagfile = /etc/nova/nova.conf
+
+# DEPRECATED:
+# The location where the network configuration files will be kept. The
+# default is
+# the 'networks' directory off of the location where nova's Python
+# module is
+# installed.
+#
+# Possible values
+#
+# * A string containing the full path to the desired configuration
+# directory
+# (string value)
+# This option is deprecated for removal since 16.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# nova-network is deprecated, as are any related configuration
+# options.
+#networks_path = $state_path/networks
+
+# DEPRECATED:
+# This is the name of the network interface for public IP addresses.
+# The default
+# is 'eth0'.
+#
+# Possible values:
+#
+# * Any string representing a network interface name
+# (string value)
+# This option is deprecated for removal since 16.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# nova-network is deprecated, as are any related configuration
+# options.
+#public_interface = eth0
+
+# DEPRECATED:
+# The location of the binary nova-dhcpbridge. By default it is the
+# binary named
+# 'nova-dhcpbridge' that is installed with all the other nova
+# binaries.
+#
+# Possible values:
+#
+# * Any string representing the full path to the binary for dhcpbridge
+# (string value)
+# This option is deprecated for removal since 16.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# nova-network is deprecated, as are any related configuration
+# options.
+#dhcpbridge = $bindir/nova-dhcpbridge
+
+# DEPRECATED:
+# The public IP address of the network host.
+#
+# This is used when creating an SNAT rule.
+#
+# Possible values:
+#
+# * Any valid IP address
+#
+# Related options:
+#
+# * ``force_snat_range``
+# (string value)
+# This option is deprecated for removal since 16.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# nova-network is deprecated, as are any related configuration
+# options.
+#routing_source_ip = $my_ip
+
+# DEPRECATED:
+# The lifetime of a DHCP lease, in seconds. The default is 86400 (one
+# day).
+#
+# Possible values:
+#
+# * Any positive integer value.
+# (integer value)
+# Minimum value: 1
+# This option is deprecated for removal since 16.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# nova-network is deprecated, as are any related configuration
+# options.
+#dhcp_lease_time = 86400
+
+# DEPRECATED:
+# Despite the singular form of the name of this option, it is actually
+# a list of
+# zero or more server addresses that dnsmasq will use for DNS
+# nameservers. If
+# this is not empty, dnsmasq will not read /etc/resolv.conf, but will
+# only use
+# the servers specified in this option. If the option
+# use_network_dns_servers is
+# True, the dns1 and dns2 servers from the network will be appended to
+# this list,
+# and will be used as DNS servers, too.
+#
+# Possible values:
+#
+# * A list of strings, where each string is either an IP address or a
+# FQDN.
+#
+# Related options:
+#
+# * ``use_network_dns_servers``
+# (multi valued)
+# This option is deprecated for removal since 16.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# nova-network is deprecated, as are any related configuration
+# options.
+#dns_server =
+
+# DEPRECATED:
+# When this option is set to True, the dns1 and dns2 servers for the
+# network
+# specified by the user on boot will be used for DNS, as well as any
+# specified in
+# the `dns_server` option.
+#
+# Related options:
+#
+# * ``dns_server``
+# (boolean value)
+# This option is deprecated for removal since 16.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# nova-network is deprecated, as are any related configuration
+# options.
+#use_network_dns_servers = false
+
+# DEPRECATED:
+# This option is a list of zero or more IP address ranges in your
+# network's DMZ
+# that should be accepted.
+#
+# Possible values:
+#
+# * A list of strings, each of which should be a valid CIDR.
+# (list value)
+# This option is deprecated for removal since 16.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# nova-network is deprecated, as are any related configuration
+# options.
+#dmz_cidr =
+
+# DEPRECATED:
+# This is a list of zero or more IP ranges that traffic from the
+# `routing_source_ip` will be SNATted to. If the list is empty, then
+# no SNAT
+# rules are created.
+#
+# Possible values:
+#
+# * A list of strings, each of which should be a valid CIDR.
+#
+# Related options:
+#
+# * ``routing_source_ip``
+# (multi valued)
+# This option is deprecated for removal since 16.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# nova-network is deprecated, as are any related configuration
+# options.
+#force_snat_range =
+
+# DEPRECATED:
+# The path to the custom dnsmasq configuration file, if any.
+#
+# Possible values:
+#
+# * The full path to the configuration file, or an empty string if
+# there is no
+# custom dnsmasq configuration file.
+# (string value)
+# This option is deprecated for removal since 16.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# nova-network is deprecated, as are any related configuration
+# options.
+#dnsmasq_config_file =
+
+# DEPRECATED:
+# This is the class used as the ethernet device driver for linuxnet
+# bridge
+# operations. The default value should be all you need for most cases,
+# but if you
+# wish to use a customized class, set this option to the full dot-
+# separated
+# import path for that class.
+#
+# Possible values:
+#
+# * Any string representing a dot-separated class path that Nova can
+# import.
+# (string value)
+# This option is deprecated for removal since 16.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# nova-network is deprecated, as are any related configuration
+# options.
+#linuxnet_interface_driver = nova.network.linux_net.LinuxBridgeInterfaceDriver
+
+# DEPRECATED:
+# The name of the Open vSwitch bridge that is used with linuxnet when
+# connecting
+# with Open vSwitch."
+#
+# Possible values:
+#
+# * Any string representing a valid bridge name.
+# (string value)
+# This option is deprecated for removal since 16.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# nova-network is deprecated, as are any related configuration
+# options.
+#linuxnet_ovs_integration_bridge = br-int
+
+#
+# When True, when a device starts up, and upon binding floating IP
+# addresses, arp
+# messages will be sent to ensure that the arp caches on the compute
+# hosts are
+# up-to-date.
+#
+# Related options:
+#
+# * ``send_arp_for_ha_count``
+# (boolean value)
+#send_arp_for_ha = false
+
+#
+# When arp messages are configured to be sent, they will be sent with
+# the count
+# set to the value of this option. Of course, if this is set to zero,
+# no arp
+# messages will be sent.
+#
+# Possible values:
+#
+# * Any integer greater than or equal to 0
+#
+# Related options:
+#
+# * ``send_arp_for_ha``
+# (integer value)
+#send_arp_for_ha_count = 3
+
+# DEPRECATED:
+# When set to True, only the firt nic of a VM will get its default
+# gateway from
+# the DHCP server.
+# (boolean value)
+# This option is deprecated for removal since 16.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# nova-network is deprecated, as are any related configuration
+# options.
+#use_single_default_gateway = false
+
+# DEPRECATED:
+# One or more interfaces that bridges can forward traffic to. If any
+# of the items
+# in this list is the special keyword 'all', then all traffic will be
+# forwarded.
+#
+# Possible values:
+#
+# * A list of zero or more interface names, or the word 'all'.
+# (multi valued)
+# This option is deprecated for removal since 16.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# nova-network is deprecated, as are any related configuration
+# options.
+#forward_bridge_interface = all
+
+#
+# This option determines the IP address for the network metadata API
+# server.
+#
+# This is really the client side of the metadata host equation that
+# allows
+# nova-network to find the metadata server when doing a default multi
+# host
+# networking.
+#
+# Possible values:
+#
+# * Any valid IP address. The default is the address of the Nova API
+# server.
+#
+# Related options:
+#
+# * ``metadata_port``
+# (string value)
+#metadata_host = $my_ip
+
+# DEPRECATED:
+# This option determines the port used for the metadata API server.
+#
+# Related options:
+#
+# * ``metadata_host``
+# (port value)
+# Minimum value: 0
+# Maximum value: 65535
+# This option is deprecated for removal since 16.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# nova-network is deprecated, as are any related configuration
+# options.
+#metadata_port = 8775
+
+# DEPRECATED:
+# This expression, if defined, will select any matching iptables rules
+# and place
+# them at the top when applying metadata changes to the rules.
+#
+# Possible values:
+#
+# * Any string representing a valid regular expression, or an empty
+# string
+#
+# Related options:
+#
+# * ``iptables_bottom_regex``
+# (string value)
+# This option is deprecated for removal since 16.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# nova-network is deprecated, as are any related configuration
+# options.
+#iptables_top_regex =
+
+# DEPRECATED:
+# This expression, if defined, will select any matching iptables rules
+# and place
+# them at the bottom when applying metadata changes to the rules.
+#
+# Possible values:
+#
+# * Any string representing a valid regular expression, or an empty
+# string
+#
+# Related options:
+#
+# * iptables_top_regex
+# (string value)
+# This option is deprecated for removal since 16.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# nova-network is deprecated, as are any related configuration
+# options.
+#iptables_bottom_regex =
+
+# DEPRECATED:
+# By default, packets that do not pass the firewall are DROPped. In
+# many cases,
+# though, an operator may find it more useful to change this from DROP
+# to REJECT,
+# so that the user issuing those packets may have a better idea as to
+# what's
+# going on, or LOGDROP in order to record the blocked traffic before
+# DROPping.
+#
+# Possible values:
+#
+# * A string representing an iptables chain. The default is DROP.
+# (string value)
+# This option is deprecated for removal since 16.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# nova-network is deprecated, as are any related configuration
+# options.
+#iptables_drop_action = DROP
+
+# DEPRECATED:
+# This option represents the period of time, in seconds, that the
+# ovs_vsctl calls
+# will wait for a response from the database before timing out. A
+# setting of 0
+# means that the utility should wait forever for a response.
+#
+# Possible values:
+#
+# * Any positive integer if a limited timeout is desired, or zero if
+# the calls
+# should wait forever for a response.
+# (integer value)
+# Minimum value: 0
+# This option is deprecated for removal since 16.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# nova-network is deprecated, as are any related configuration
+# options.
+#ovs_vsctl_timeout = 120
+
+# DEPRECATED:
+# This option is used mainly in testing to avoid calls to the
+# underlying network
+# utilities.
+# (boolean value)
+# This option is deprecated for removal since 16.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# nova-network is deprecated, as are any related configuration
+# options.
+#fake_network = false
+
+# DEPRECATED:
+# This option determines the number of times to retry ebtables
+# commands before
+# giving up. The minimum number of retries is 1.
+#
+# Possible values:
+#
+# * Any positive integer
+#
+# Related options:
+#
+# * ``ebtables_retry_interval``
+# (integer value)
+# Minimum value: 1
+# This option is deprecated for removal since 16.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# nova-network is deprecated, as are any related configuration
+# options.
+#ebtables_exec_attempts = 3
+
+# DEPRECATED:
+# This option determines the time, in seconds, that the system will
+# sleep in
+# between ebtables retries. Note that each successive retry waits a
+# multiple of
+# this value, so for example, if this is set to the default of 1.0
+# seconds, and
+# ebtables_exec_attempts is 4, after the first failure, the system
+# will sleep for
+# 1 * 1.0 seconds, after the second failure it will sleep 2 * 1.0
+# seconds, and
+# after the third failure it will sleep 3 * 1.0 seconds.
+#
+# Possible values:
+#
+# * Any non-negative float or integer. Setting this to zero will
+# result in no
+# waiting between attempts.
+#
+# Related options:
+#
+# * ebtables_exec_attempts
+# (floating point value)
+# This option is deprecated for removal since 16.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# nova-network is deprecated, as are any related configuration
+# options.
+#ebtables_retry_interval = 1.0
+
+# DEPRECATED:
+# Enable neutron as the backend for networking.
+#
+# Determine whether to use Neutron or Nova Network as the back end.
+# Set to true
+# to use neutron.
+# (boolean value)
+# This option is deprecated for removal since 15.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# nova-network is deprecated, as are any related configuration
+# options.
+#use_neutron = true
+
+#
+# This option determines whether the network setup information is
+# injected into
+# the VM before it is booted. While it was originally designed to be
+# used only
+# by nova-network, it is also used by the vmware and xenapi virt
+# drivers to
+# control whether network information is injected into a VM. The
+# libvirt virt
+# driver also uses it when we use config_drive to configure network to
+# control
+# whether network information is injected into a VM.
+# (boolean value)
+#flat_injected = false
+
+# DEPRECATED:
+# This option determines the bridge used for simple network interfaces
+# when no
+# bridge is specified in the VM creation request.
+#
+# Please note that this option is only used when using nova-network
+# instead of
+# Neutron in your deployment.
+#
+# Possible values:
+#
+# * Any string representing a valid network bridge, such as 'br100'
+#
+# Related options:
+#
+# * ``use_neutron``
+# (string value)
+# This option is deprecated for removal since 15.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# nova-network is deprecated, as are any related configuration
+# options.
+#flat_network_bridge = <None>
+
+# DEPRECATED:
+# This is the address of the DNS server for a simple network. If this
+# option is
+# not specified, the default of '8.8.4.4' is used.
+#
+# Please note that this option is only used when using nova-network
+# instead of
+# Neutron in your deployment.
+#
+# Possible values:
+#
+# * Any valid IP address.
+#
+# Related options:
+#
+# * ``use_neutron``
+# (string value)
+# This option is deprecated for removal since 15.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# nova-network is deprecated, as are any related configuration
+# options.
+#flat_network_dns = 8.8.4.4
+
+# DEPRECATED:
+# This option is the name of the virtual interface of the VM on which
+# the bridge
+# will be built. While it was originally designed to be used only by
+# nova-network, it is also used by libvirt for the bridge interface
+# name.
+#
+# Possible values:
+#
+# * Any valid virtual interface name, such as 'eth0'
+# (string value)
+# This option is deprecated for removal since 15.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# nova-network is deprecated, as are any related configuration
+# options.
+#flat_interface = <None>
+
+# DEPRECATED:
+# This is the VLAN number used for private networks. Note that the
+# when creating
+# the networks, if the specified number has already been assigned,
+# nova-network
+# will increment this number until it finds an available VLAN.
+#
+# Please note that this option is only used when using nova-network
+# instead of
+# Neutron in your deployment. It also will be ignored if the
+# configuration option
+# for `network_manager` is not set to the default of
+# 'nova.network.manager.VlanManager'.
+#
+# Possible values:
+#
+# * Any integer between 1 and 4094. Values outside of that range will
+# raise a
+# ValueError exception.
+#
+# Related options:
+#
+# * ``network_manager``
+# * ``use_neutron``
+# (integer value)
+# Minimum value: 1
+# Maximum value: 4094
+# This option is deprecated for removal since 15.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# nova-network is deprecated, as are any related configuration
+# options.
+#vlan_start = 100
+
+# DEPRECATED:
+# This option is the name of the virtual interface of the VM on which
+# the VLAN
+# bridge will be built. While it was originally designed to be used
+# only by
+# nova-network, it is also used by libvirt and xenapi for the bridge
+# interface
+# name.
+#
+# Please note that this setting will be ignored in nova-network if the
+# configuration option for `network_manager` is not set to the default
+# of
+# 'nova.network.manager.VlanManager'.
+#
+# Possible values:
+#
+# * Any valid virtual interface name, such as 'eth0'
+# (string value)
+# This option is deprecated for removal since 15.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# nova-network is deprecated, as are any related configuration
+# options. While
+# this option has an effect when using neutron, it incorrectly
+# override the value
+# provided by neutron and should therefore not be used.
+#vlan_interface = <None>
+
+# DEPRECATED:
+# This option represents the number of networks to create if not
+# explicitly
+# specified when the network is created. The only time this is used is
+# if a CIDR
+# is specified, but an explicit network_size is not. In that case, the
+# subnets
+# are created by diving the IP address space of the CIDR by
+# num_networks. The
+# resulting subnet sizes cannot be larger than the configuration
+# option
+# `network_size`; in that event, they are reduced to `network_size`,
+# and a
+# warning is logged.
+#
+# Please note that this option is only used when using nova-network
+# instead of
+# Neutron in your deployment.
+#
+# Possible values:
+#
+# * Any positive integer is technically valid, although there are
+# practical
+# limits based upon available IP address space and virtual
+# interfaces.
+#
+# Related options:
+#
+# * ``use_neutron``
+# * ``network_size``
+# (integer value)
+# Minimum value: 1
+# This option is deprecated for removal since 15.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# nova-network is deprecated, as are any related configuration
+# options.
+#num_networks = 1
+
+# DEPRECATED:
+# This option is no longer used since the /os-cloudpipe API was
+# removed in the
+# 16.0.0 Pike release. This is the public IP address for the cloudpipe
+# VPN
+# servers. It defaults to the IP address of the host.
+#
+# Please note that this option is only used when using nova-network
+# instead of
+# Neutron in your deployment. It also will be ignored if the
+# configuration option
+# for `network_manager` is not set to the default of
+# 'nova.network.manager.VlanManager'.
+#
+# Possible values:
+#
+# * Any valid IP address. The default is ``$my_ip``, the IP address of
+# the VM.
+#
+# Related options:
+#
+# * ``network_manager``
+# * ``use_neutron``
+# * ``vpn_start``
+# (string value)
+# This option is deprecated for removal since 15.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# nova-network is deprecated, as are any related configuration
+# options.
+#vpn_ip = $my_ip
+
+# DEPRECATED:
+# This is the port number to use as the first VPN port for private
+# networks.
+#
+# Please note that this option is only used when using nova-network
+# instead of
+# Neutron in your deployment. It also will be ignored if the
+# configuration option
+# for `network_manager` is not set to the default of
+# 'nova.network.manager.VlanManager', or if you specify a value the
+# 'vpn_start'
+# parameter when creating a network.
+#
+# Possible values:
+#
+# * Any integer representing a valid port number. The default is 1000.
+#
+# Related options:
+#
+# * ``use_neutron``
+# * ``vpn_ip``
+# * ``network_manager``
+# (port value)
+# Minimum value: 0
+# Maximum value: 65535
+# This option is deprecated for removal since 15.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# nova-network is deprecated, as are any related configuration
+# options.
+#vpn_start = 1000
+
+# DEPRECATED:
+# This option determines the number of addresses in each private
+# subnet.
+#
+# Please note that this option is only used when using nova-network
+# instead of
+# Neutron in your deployment.
+#
+# Possible values:
+#
+# * Any positive integer that is less than or equal to the available
+# network
+# size. Note that if you are creating multiple networks, they must
+# all fit in
+# the available IP address space. The default is 256.
+#
+# Related options:
+#
+# * ``use_neutron``
+# * ``num_networks``
+# (integer value)
+# Minimum value: 1
+# This option is deprecated for removal since 15.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# nova-network is deprecated, as are any related configuration
+# options.
+#network_size = 256
+
+# DEPRECATED:
+# This option determines the fixed IPv6 address block when creating a
+# network.
+#
+# Please note that this option is only used when using nova-network
+# instead of
+# Neutron in your deployment.
+#
+# Possible values:
+#
+# * Any valid IPv6 CIDR
+#
+# Related options:
+#
+# * ``use_neutron``
+# (string value)
+# This option is deprecated for removal since 15.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# nova-network is deprecated, as are any related configuration
+# options.
+#fixed_range_v6 = fd00::/48
+
+# DEPRECATED:
+# This is the default IPv4 gateway. It is used only in the testing
+# suite.
+#
+# Please note that this option is only used when using nova-network
+# instead of
+# Neutron in your deployment.
+#
+# Possible values:
+#
+# * Any valid IP address.
+#
+# Related options:
+#
+# * ``use_neutron``
+# * ``gateway_v6``
+# (string value)
+# This option is deprecated for removal since 15.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# nova-network is deprecated, as are any related configuration
+# options.
+#gateway = <None>
+
+# DEPRECATED:
+# This is the default IPv6 gateway. It is used only in the testing
+# suite.
+#
+# Please note that this option is only used when using nova-network
+# instead of
+# Neutron in your deployment.
+#
+# Possible values:
+#
+# * Any valid IP address.
+#
+# Related options:
+#
+# * ``use_neutron``
+# * ``gateway``
+# (string value)
+# This option is deprecated for removal since 15.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# nova-network is deprecated, as are any related configuration
+# options.
+#gateway_v6 = <None>
+
+# DEPRECATED:
+# This option represents the number of IP addresses to reserve at the
+# top of the
+# address range for VPN clients. It also will be ignored if the
+# configuration
+# option for `network_manager` is not set to the default of
+# 'nova.network.manager.VlanManager'.
+#
+# Possible values:
+#
+# * Any integer, 0 or greater.
+#
+# Related options:
+#
+# * ``use_neutron``
+# * ``network_manager``
+# (integer value)
+# Minimum value: 0
+# This option is deprecated for removal since 15.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# nova-network is deprecated, as are any related configuration
+# options.
+#cnt_vpn_clients = 0
+
+# DEPRECATED:
+# This is the number of seconds to wait before disassociating a
+# deallocated fixed
+# IP address. This is only used with the nova-network service, and has
+# no effect
+# when using neutron for networking.
+#
+# Possible values:
+#
+# * Any integer, zero or greater.
+#
+# Related options:
+#
+# * ``use_neutron``
+# (integer value)
+# Minimum value: 0
+# This option is deprecated for removal since 15.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# nova-network is deprecated, as are any related configuration
+# options.
+#fixed_ip_disassociate_timeout = 600
+
+# DEPRECATED:
+# This option determines how many times nova-network will attempt to
+# create a
+# unique MAC address before giving up and raising a
+# `VirtualInterfaceMacAddressException` error.
+#
+# Possible values:
+#
+# * Any positive integer. The default is 5.
+#
+# Related options:
+#
+# * ``use_neutron``
+# (integer value)
+# Minimum value: 1
+# This option is deprecated for removal since 15.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# nova-network is deprecated, as are any related configuration
+# options.
+#create_unique_mac_address_attempts = 5
+
+# DEPRECATED:
+# Determines whether unused gateway devices, both VLAN and bridge, are
+# deleted if
+# the network is in nova-network VLAN mode and is multi-hosted.
+#
+# Related options:
+#
+# * ``use_neutron``
+# * ``vpn_ip``
+# * ``fake_network``
+# (boolean value)
+# This option is deprecated for removal since 15.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# nova-network is deprecated, as are any related configuration
+# options.
+#teardown_unused_network_gateway = false
+
+# DEPRECATED:
+# When this option is True, a call is made to release the DHCP for the
+# instance
+# when that instance is terminated.
+#
+# Related options:
+#
+# * ``use_neutron``
+# (boolean value)
+# This option is deprecated for removal since 15.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# nova-network is deprecated, as are any related configuration
+# options.
+force_dhcp_release = {{ compute.get('force_dhcp_release', 'true') }}
+
+# DEPRECATED:
+# When this option is True, whenever a DNS entry must be updated, a
+# fanout cast
+# message is sent to all network hosts to update their DNS entries in
+# multi-host
+# mode.
+#
+# Related options:
+#
+# * ``use_neutron``
+# (boolean value)
+# This option is deprecated for removal since 15.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# nova-network is deprecated, as are any related configuration
+# options.
+#update_dns_entries = false
+
+# DEPRECATED:
+# This option determines the time, in seconds, to wait between
+# refreshing DNS
+# entries for the network.
+#
+# Possible values:
+#
+# * A positive integer
+# * -1 to disable updates
+#
+# Related options:
+#
+# * ``use_neutron``
+# (integer value)
+# Minimum value: -1
+# This option is deprecated for removal since 15.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# nova-network is deprecated, as are any related configuration
+# options.
+#dns_update_periodic_interval = -1
+
+# DEPRECATED:
+# This option allows you to specify the domain for the DHCP server.
+#
+# Possible values:
+#
+# * Any string that is a valid domain name.
+#
+# Related options:
+#
+# * ``use_neutron``
+# (string value)
+# This option is deprecated for removal since 15.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# nova-network is deprecated, as are any related configuration
+# options.
+#dhcp_domain = novalocal
+dhcp_domain={{ compute.get('dhcp_domain', 'novalocal') }}
+
+# DEPRECATED:
+# This option allows you to specify the L3 management library to be
+# used.
+#
+# Possible values:
+#
+# * Any dot-separated string that represents the import path to an L3
+# networking
+# library.
+#
+# Related options:
+#
+# * ``use_neutron``
+# (string value)
+# This option is deprecated for removal since 15.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# nova-network is deprecated, as are any related configuration
+# options.
+#l3_lib = nova.network.l3.LinuxNetL3
+
+# DEPRECATED:
+# THIS VALUE SHOULD BE SET WHEN CREATING THE NETWORK.
+#
+# If True in multi_host mode, all compute hosts share the same dhcp
+# address. The
+# same IP address used for DHCP will be added on each nova-network
+# node which is
+# only visible to the VMs on the same host.
+#
+# The use of this configuration has been deprecated and may be removed
+# in any
+# release after Mitaka. It is recommended that instead of relying on
+# this option,
+# an explicit value should be passed to 'create_networks()' as a
+# keyword argument
+# with the name 'share_address'.
+# (boolean value)
+# This option is deprecated for removal since 2014.2.
+# Its value may be silently ignored in the future.
+#share_dhcp_address = false
+
+# DEPRECATED:
+# URL for LDAP server which will store DNS entries
+#
+# Possible values:
+#
+# * A valid LDAP URL representing the server
+# (uri value)
+# This option is deprecated for removal since 16.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# nova-network is deprecated, as are any related configuration
+# options.
+#ldap_dns_url = ldap://ldap.example.com:389
+
+# DEPRECATED: Bind user for LDAP server (string value)
+# This option is deprecated for removal since 16.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# nova-network is deprecated, as are any related configuration
+# options.
+#ldap_dns_user = uid=admin,ou=people,dc=example,dc=org
+
+# DEPRECATED: Bind user's password for LDAP server (string value)
+# This option is deprecated for removal since 16.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# nova-network is deprecated, as are any related configuration
+# options.
+#ldap_dns_password = password
+
+# DEPRECATED:
+# Hostmaster for LDAP DNS driver Statement of Authority
+#
+# Possible values:
+#
+# * Any valid string representing LDAP DNS hostmaster.
+# (string value)
+# This option is deprecated for removal since 16.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# nova-network is deprecated, as are any related configuration
+# options.
+#ldap_dns_soa_hostmaster = hostmaster@example.org
+
+# DEPRECATED:
+# DNS Servers for LDAP DNS driver
+#
+# Possible values:
+#
+# * A valid URL representing a DNS server
+# (multi valued)
+# This option is deprecated for removal since 16.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# nova-network is deprecated, as are any related configuration
+# options.
+#ldap_dns_servers = dns.example.org
+
+# DEPRECATED:
+# Base distinguished name for the LDAP search query
+#
+# This option helps to decide where to look up the host in LDAP.
+# (string value)
+# This option is deprecated for removal since 16.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# nova-network is deprecated, as are any related configuration
+# options.
+#ldap_dns_base_dn = ou=hosts,dc=example,dc=org
+
+# DEPRECATED:
+# Refresh interval (in seconds) for LDAP DNS driver Start of Authority
+#
+# Time interval, a secondary/slave DNS server waits before requesting
+# for
+# primary DNS server's current SOA record. If the records are
+# different,
+# secondary DNS server will request a zone transfer from primary.
+#
+# NOTE: Lower values would cause more traffic.
+# (integer value)
+# This option is deprecated for removal since 16.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# nova-network is deprecated, as are any related configuration
+# options.
+#ldap_dns_soa_refresh = 1800
+
+# DEPRECATED:
+# Retry interval (in seconds) for LDAP DNS driver Start of Authority
+#
+# Time interval, a secondary/slave DNS server should wait, if an
+# attempt to transfer zone failed during the previous refresh
+# interval.
+# (integer value)
+# This option is deprecated for removal since 16.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# nova-network is deprecated, as are any related configuration
+# options.
+#ldap_dns_soa_retry = 3600
+
+# DEPRECATED:
+# Expiry interval (in seconds) for LDAP DNS driver Start of Authority
+#
+# Time interval, a secondary/slave DNS server holds the information
+# before it is no longer considered authoritative.
+# (integer value)
+# This option is deprecated for removal since 16.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# nova-network is deprecated, as are any related configuration
+# options.
+#ldap_dns_soa_expiry = 86400
+
+# DEPRECATED:
+# Minimum interval (in seconds) for LDAP DNS driver Start of Authority
+#
+# It is Minimum time-to-live applies for all resource records in the
+# zone file. This value is supplied to other servers how long they
+# should keep the data in cache.
+# (integer value)
+# This option is deprecated for removal since 16.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# nova-network is deprecated, as are any related configuration
+# options.
+#ldap_dns_soa_minimum = 7200
+
+# DEPRECATED:
+# Default value for multi_host in networks.
+#
+# nova-network service can operate in a multi-host or single-host
+# mode.
+# In multi-host mode each compute node runs a copy of nova-network and
+# the
+# instances on that compute node use the compute node as a gateway to
+# the
+# Internet. Where as in single-host mode, a central server runs the
+# nova-network
+# service. All compute nodes forward traffic from the instances to the
+# cloud controller which then forwards traffic to the Internet.
+#
+# If this options is set to true, some rpc network calls will be sent
+# directly
+# to host.
+#
+# Note that this option is only used when using nova-network instead
+# of
+# Neutron in your deployment.
+#
+# Related options:
+#
+# * ``use_neutron``
+# (boolean value)
+# This option is deprecated for removal since 15.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# nova-network is deprecated, as are any related configuration
+# options.
+#multi_host = false
+
+# DEPRECATED:
+# Driver to use for network creation.
+#
+# Network driver initializes (creates bridges and so on) only when the
+# first VM lands on a host node. All network managers configure the
+# network using network drivers. The driver is not tied to any
+# particular
+# network manager.
+#
+# The default Linux driver implements vlans, bridges, and iptables
+# rules
+# using linux utilities.
+#
+# Note that this option is only used when using nova-network instead
+# of Neutron in your deployment.
+#
+# Related options:
+#
+# * ``use_neutron``
+# (string value)
+# This option is deprecated for removal since 15.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# nova-network is deprecated, as are any related configuration
+# options.
+#network_driver = nova.network.linux_net
+
+# DEPRECATED:
+# Firewall driver to use with ``nova-network`` service.
+#
+# This option only applies when using the ``nova-network`` service.
+# When using
+# another networking services, such as Neutron, this should be to set
+# to the
+# ``nova.virt.firewall.NoopFirewallDriver``.
+#
+# Possible values:
+#
+# * ``nova.virt.firewall.IptablesFirewallDriver``
+# * ``nova.virt.firewall.NoopFirewallDriver``
+# * ``nova.virt.libvirt.firewall.IptablesFirewallDriver``
+# * [...]
+#
+# Related options:
+#
+# * ``use_neutron``: This must be set to ``False`` to enable ``nova-
+# network``
+# networking
+# (string value)
+# This option is deprecated for removal since 16.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# nova-network is deprecated, as are any related configuration
+# options.
+firewall_driver = nova.virt.firewall.NoopFirewallDriver
+
+# DEPRECATED:
+# Determine whether to allow network traffic from same network.
+#
+# When set to true, hosts on the same subnet are not filtered and are
+# allowed
+# to pass all types of traffic between them. On a flat network, this
+# allows
+# all instances from all projects unfiltered communication. With VLAN
+# networking, this allows access between instances within the same
+# project.
+#
+# This option only applies when using the ``nova-network`` service.
+# When using
+# another networking services, such as Neutron, security groups or
+# other
+# approaches should be used.
+#
+# Possible values:
+#
+# * True: Network traffic should be allowed pass between all instances
+# on the
+# same network, regardless of their tenant and security policies
+# * False: Network traffic should not be allowed pass between
+# instances unless
+# it is unblocked in a security group
+#
+# Related options:
+#
+# * ``use_neutron``: This must be set to ``False`` to enable ``nova-
+# network``
+# networking
+# * ``firewall_driver``: This must be set to
+# ``nova.virt.libvirt.firewall.IptablesFirewallDriver`` to ensure
+# the
+# libvirt firewall driver is enabled.
+# (boolean value)
+# This option is deprecated for removal since 16.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# nova-network is deprecated, as are any related configuration
+# options.
+#allow_same_net_traffic = true
+
+# DEPRECATED:
+# Default pool for floating IPs.
+#
+# This option specifies the default floating IP pool for allocating
+# floating IPs.
+#
+# While allocating a floating ip, users can optionally pass in the
+# name of the
+# pool they want to allocate from, otherwise it will be pulled from
+# the
+# default pool.
+#
+# If this option is not set, then 'nova' is used as default floating
+# pool.
+#
+# Possible values:
+#
+# * Any string representing a floating IP pool name
+# (string value)
+# This option is deprecated for removal since 16.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# This option was used for two purposes: to set the floating IP pool
+# name for
+# nova-network and to do the same for neutron. nova-network is
+# deprecated, as are
+# any related configuration options. Users of neutron, meanwhile,
+# should use the
+# 'default_floating_pool' option in the '[neutron]' group.
+#default_floating_pool = nova
+
+# DEPRECATED:
+# Autoassigning floating IP to VM
+#
+# When set to True, floating IP is auto allocated and associated
+# to the VM upon creation.
+#
+# Related options:
+#
+# * use_neutron: this options only works with nova-network.
+# (boolean value)
+# This option is deprecated for removal since 15.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# nova-network is deprecated, as are any related configuration
+# options.
+#auto_assign_floating_ip = false
+
+# DEPRECATED:
+# Full class name for the DNS Manager for floating IPs.
+#
+# This option specifies the class of the driver that provides
+# functionality
+# to manage DNS entries associated with floating IPs.
+#
+# When a user adds a DNS entry for a specified domain to a floating
+# IP,
+# nova will add a DNS entry using the specified floating DNS driver.
+# When a floating IP is deallocated, its DNS entry will automatically
+# be deleted.
+#
+# Possible values:
+#
+# * Full Python path to the class to be used
+#
+# Related options:
+#
+# * use_neutron: this options only works with nova-network.
+# (string value)
+# This option is deprecated for removal since 15.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# nova-network is deprecated, as are any related configuration
+# options.
+#floating_ip_dns_manager = nova.network.noop_dns_driver.NoopDNSDriver
+
+# DEPRECATED:
+# Full class name for the DNS Manager for instance IPs.
+#
+# This option specifies the class of the driver that provides
+# functionality
+# to manage DNS entries for instances.
+#
+# On instance creation, nova will add DNS entries for the instance
+# name and
+# id, using the specified instance DNS driver and domain. On instance
+# deletion,
+# nova will remove the DNS entries.
+#
+# Possible values:
+#
+# * Full Python path to the class to be used
+#
+# Related options:
+#
+# * use_neutron: this options only works with nova-network.
+# (string value)
+# This option is deprecated for removal since 15.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# nova-network is deprecated, as are any related configuration
+# options.
+#instance_dns_manager = nova.network.noop_dns_driver.NoopDNSDriver
+
+# DEPRECATED:
+# If specified, Nova checks if the availability_zone of every instance
+# matches
+# what the database says the availability_zone should be for the
+# specified
+# dns_domain.
+#
+# Related options:
+#
+# * use_neutron: this options only works with nova-network.
+# (string value)
+# This option is deprecated for removal since 15.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# nova-network is deprecated, as are any related configuration
+# options.
+#instance_dns_domain =
+
+# DEPRECATED:
+# Assign IPv6 and IPv4 addresses when creating instances.
+#
+# Related options:
+#
+# * use_neutron: this only works with nova-network.
+# (boolean value)
+# This option is deprecated for removal since 16.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# nova-network is deprecated, as are any related configuration
+# options.
+#use_ipv6 = false
+
+# DEPRECATED:
+# Abstracts out IPv6 address generation to pluggable backends.
+#
+# nova-network can be put into dual-stack mode, so that it uses
+# both IPv4 and IPv6 addresses. In dual-stack mode, by default,
+# instances
+# acquire IPv6 global unicast addresses with the help of stateless
+# address
+# auto-configuration mechanism.
+#
+# Related options:
+#
+# * use_neutron: this option only works with nova-network.
+# * use_ipv6: this option only works if ipv6 is enabled for nova-
+# network.
+# (string value)
+# Possible values:
+# rfc2462 - <No description provided>
+# account_identifier - <No description provided>
+# This option is deprecated for removal since 16.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# nova-network is deprecated, as are any related configuration
+# options.
+#ipv6_backend = rfc2462
+
+# DEPRECATED:
+# This option is used to enable or disable quota checking for tenant
+# networks.
+#
+# Related options:
+#
+# * quota_networks
+# (boolean value)
+# This option is deprecated for removal since 14.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# CRUD operations on tenant networks are only available when using
+# nova-network
+# and nova-network is itself deprecated.
+#enable_network_quota = false
+
+# DEPRECATED:
+# This option controls the number of private networks that can be
+# created per
+# project (or per tenant).
+#
+# Related options:
+#
+# * enable_network_quota
+# (integer value)
+# Minimum value: 0
+# This option is deprecated for removal since 14.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# CRUD operations on tenant networks are only available when using
+# nova-network
+# and nova-network is itself deprecated.
+#quota_networks = 3
+
+#
+# Filename that will be used for storing websocket frames received
+# and sent by a proxy service (like VNC, spice, serial) running on
+# this host.
+# If this is not set, no recording will be done.
+# (string value)
+#record = <None>
+
+# Run as a background process. (boolean value)
+#daemon = false
+
+# Disallow non-encrypted connections. (boolean value)
+#ssl_only = false
+
+# Set to True if source host is addressed with IPv6. (boolean value)
+#source_is_ipv6 = false
+
+# Path to SSL certificate file. (string value)
+#cert = self.pem
+
+# SSL key file (if separate from cert). (string value)
+#key = <None>
+
+#
+# Path to directory with content which will be served by a web server.
+# (string value)
+#web = /usr/share/spice-html5
+
+#
+# The directory where the Nova python modules are installed.
+#
+# This directory is used to store template files for networking and
+# remote
+# console access. It is also the default path for other config options
+# which
+# need to persist Nova internal data. It is very unlikely that you
+# need to
+# change this option from its default value.
+#
+# Possible values:
+#
+# * The full path to a directory.
+#
+# Related options:
+#
+# * ``state_path``
+# (string value)
+#pybasedir = /usr/lib/python2.7/dist-packages
+
+#
+# The directory where the Nova binaries are installed.
+#
+# This option is only relevant if the networking capabilities from
+# Nova are
+# used (see services below). Nova's networking capabilities are
+# targeted to
+# be fully replaced by Neutron in the future. It is very unlikely that
+# you need
+# to change this option from its default value.
+#
+# Possible values:
+#
+# * The full path to a directory.
+# (string value)
+#bindir = /usr/local/bin
+
+#
+# The top-level directory for maintaining Nova's state.
+#
+# This directory is used to store Nova's internal state. It is used by
+# a
+# variety of other config options which derive from this. In some
+# scenarios
+# (for example migrations) it makes sense to use a storage location
+# which is
+# shared between multiple compute hosts (for example via NFS). Unless
+# the
+# option ``instances_path`` gets overwritten, this directory can grow
+# very
+# large.
+#
+# Possible values:
+#
+# * The full path to a directory. Defaults to value provided in
+# ``pybasedir``.
+# (string value)
+state_path = /var/lib/nova
+
+#
+# Number of seconds indicating how frequently the state of services on
+# a
+# given hypervisor is reported. Nova needs to know this to determine
+# the
+# overall health of the deployment.
+#
+# Related Options:
+#
+# * service_down_time
+# report_interval should be less than service_down_time. If
+# service_down_time
+# is less than report_interval, services will routinely be
+# considered down,
+# because they report in too rarely.
+# (integer value)
+#report_interval = 10
+report_interval = {{ compute.get('report_interval', '60') }}
+
+#
+# Maximum time in seconds since last check-in for up service
+#
+# Each compute node periodically updates their database status based
+# on the
+# specified report interval. If the compute node hasn't updated the
+# status
+# for more than service_down_time, then the compute node is considered
+# down.
+#
+# Related Options:
+#
+# * report_interval (service_down_time should not be less than
+# report_interval)
+# (integer value)
+service_down_time = 90
+
+#
+# Enable periodic tasks.
+#
+# If set to true, this option allows services to periodically run
+# tasks
+# on the manager.
+#
+# In case of running multiple schedulers or conductors you may want to
+# run
+# periodic tasks on only one host - in this case disable this option
+# for all
+# hosts but one.
+# (boolean value)
+#periodic_enable = true
+
+#
+# Number of seconds to randomly delay when starting the periodic task
+# scheduler to reduce stampeding.
+#
+# When compute workers are restarted in unison across a cluster,
+# they all end up running the periodic tasks at the same time
+# causing problems for the external services. To mitigate this
+# behavior, periodic_fuzzy_delay option allows you to introduce a
+# random initial delay when starting the periodic task scheduler.
+#
+# Possible Values:
+#
+# * Any positive integer (in seconds)
+# * 0 : disable the random delay
+# (integer value)
+# Minimum value: 0
+#periodic_fuzzy_delay = 60
+
+# List of APIs to be enabled by default. (list value)
+enabled_apis = osapi_compute,metadata
+
+#
+# List of APIs with enabled SSL.
+#
+# Nova provides SSL support for the API servers. enabled_ssl_apis
+# option
+# allows configuring the SSL support.
+# (list value)
+#enabled_ssl_apis =
+
+#
+# IP address on which the OpenStack API will listen.
+#
+# The OpenStack API service listens on this IP address for incoming
+# requests.
+# (string value)
+#osapi_compute_listen = 0.0.0.0
+
+#
+# Port on which the OpenStack API will listen.
+#
+# The OpenStack API service listens on this port number for incoming
+# requests.
+# (port value)
+# Minimum value: 0
+# Maximum value: 65535
+#osapi_compute_listen_port = 8774
+
+#
+# Number of workers for OpenStack API service. The default will be the
+# number
+# of CPUs available.
+#
+# OpenStack API services can be configured to run as multi-process
+# (workers).
+# This overcomes the problem of reduction in throughput when API
+# request
+# concurrency increases. OpenStack API service will run in the
+# specified
+# number of processes.
+#
+# Possible Values:
+#
+# * Any positive integer
+# * None (default value)
+# (integer value)
+# Minimum value: 1
+#osapi_compute_workers = <None>
+
+#
+# IP address on which the metadata API will listen.
+#
+# The metadata API service listens on this IP address for incoming
+# requests.
+# (string value)
+#metadata_listen = 0.0.0.0
+
+#
+# Port on which the metadata API will listen.
+#
+# The metadata API service listens on this port number for incoming
+# requests.
+# (port value)
+# Minimum value: 0
+# Maximum value: 65535
+#metadata_listen_port = 8775
+
+#
+# Number of workers for metadata service. If not specified the number
+# of
+# available CPUs will be used.
+#
+# The metadata service can be configured to run as multi-process
+# (workers).
+# This overcomes the problem of reduction in throughput when API
+# request
+# concurrency increases. The metadata service will run in the
+# specified
+# number of processes.
+#
+# Possible Values:
+#
+# * Any positive integer
+# * None (default value)
+# (integer value)
+# Minimum value: 1
+#metadata_workers = <None>
+
+# Full class name for the Manager for network (string value)
+# Possible values:
+# nova.network.manager.FlatManager - <No description provided>
+# nova.network.manager.FlatDHCPManager - <No description provided>
+# nova.network.manager.VlanManager - <No description provided>
+#network_manager = nova.network.manager.VlanManager
+
+#
+# This option specifies the driver to be used for the servicegroup
+# service.
+#
+# ServiceGroup API in nova enables checking status of a compute node.
+# When a
+# compute worker running the nova-compute daemon starts, it calls the
+# join API
+# to join the compute group. Services like nova scheduler can query
+# the
+# ServiceGroup API to check if a node is alive. Internally, the
+# ServiceGroup
+# client driver automatically updates the compute worker status. There
+# are
+# multiple backend implementations for this service: Database
+# ServiceGroup driver
+# and Memcache ServiceGroup driver.
+#
+# Possible Values:
+#
+# * db : Database ServiceGroup driver
+# * mc : Memcache ServiceGroup driver
+#
+# Related Options:
+#
+# * service_down_time (maximum time since last check-in for up
+# service)
+# (string value)
+# Possible values:
+# db - <No description provided>
+# mc - <No description provided>
+#servicegroup_driver = db
+
+#
+# From oslo.service.periodic_task
+#
+
+# Some periodic tasks can be run in a separate process. Should we run
+# them here? (boolean value)
+#run_external_periodic_tasks = true
+
+#
+# From oslo.service.service
+#
+
+# Enable eventlet backdoor. Acceptable values are 0, <port>, and
+# <start>:<end>, where 0 results in listening on a random tcp port
+# number; <port> results in listening on the specified port number
+# (and not enabling backdoor if that port is in use); and
+# <start>:<end> results in listening on the smallest unused port
+# number within the specified range of port numbers. The chosen port
+# is displayed in the service's log file. (string value)
+#backdoor_port = <None>
+
+# Enable eventlet backdoor, using the provided path as a unix socket
+# that can receive connections. This option is mutually exclusive with
+# 'backdoor_port' in that only one should be provided. If both are
+# provided then the existence of this option overrides the usage of
+# that option. (string value)
+#backdoor_socket = <None>
+
+# Enables or disables logging values of all registered options when
+# starting a service (at DEBUG level). (boolean value)
+#log_options = true
+
+# Specify a timeout after which a gracefully shutdown server will
+# exit. Zero value means endless wait. (integer value)
+#graceful_shutdown_timeout = 60
+
+{%- if compute.logging is defined %}
+{%- set _data = compute.logging %}
+{%- include "oslo_templates/files/queens/oslo/_log.conf" %}
+{%- endif %}
+
+{%- set _data = compute.message_queue %}
+{%- include "oslo_templates/files/queens/oslo/_messaging_default.conf" %}
+
+[api]
+#
+# Options under this group are used to define Nova API.
+
+#
+# From nova.conf
+#
+
+#
+# This determines the strategy to use for authentication: keystone or
+# noauth2.
+# 'noauth2' is designed for testing only, as it does no actual
+# credential
+# checking. 'noauth2' provides administrative credentials only if
+# 'admin' is
+# specified as the username.
+# (string value)
+# Possible values:
+# keystone - <No description provided>
+# noauth2 - <No description provided>
+auth_strategy = keystone
+
+#
+# When True, the 'X-Forwarded-For' header is treated as the canonical
+# remote
+# address. When False (the default), the 'remote_address' header is
+# used.
+#
+# You should only enable this if you have an HTML sanitizing proxy.
+# (boolean value)
+#use_forwarded_for = false
+
+#
+# When gathering the existing metadata for a config drive, the
+# EC2-style
+# metadata is returned for all versions that don't appear in this
+# option.
+# As of the Liberty release, the available versions are:
+#
+# * 1.0
+# * 2007-01-19
+# * 2007-03-01
+# * 2007-08-29
+# * 2007-10-10
+# * 2007-12-15
+# * 2008-02-01
+# * 2008-09-01
+# * 2009-04-04
+#
+# The option is in the format of a single string, with each version
+# separated
+# by a space.
+#
+# Possible values:
+#
+# * Any string that represents zero or more versions, separated by
+# spaces.
+# (string value)
+#config_drive_skip_versions = 1.0 2007-01-19 2007-03-01 2007-08-29 2007-10-10 2007-12-15 2008-02-01 2008-09-01
+
+#
+# A list of vendordata providers.
+#
+# vendordata providers are how deployers can provide metadata via
+# configdrive
+# and metadata that is specific to their deployment. There are
+# currently two
+# supported providers: StaticJSON and DynamicJSON.
+#
+# StaticJSON reads a JSON file configured by the flag
+# vendordata_jsonfile_path
+# and places the JSON from that file into vendor_data.json and
+# vendor_data2.json.
+#
+# DynamicJSON is configured via the vendordata_dynamic_targets flag,
+# which is
+# documented separately. For each of the endpoints specified in that
+# flag, a
+# section is added to the vendor_data2.json.
+#
+# For more information on the requirements for implementing a
+# vendordata
+# dynamic endpoint, please see the vendordata.rst file in the nova
+# developer
+# reference.
+#
+# Possible values:
+#
+# * A list of vendordata providers, with StaticJSON and DynamicJSON
+# being
+# current options.
+#
+# Related options:
+#
+# * vendordata_dynamic_targets
+# * vendordata_dynamic_ssl_certfile
+# * vendordata_dynamic_connect_timeout
+# * vendordata_dynamic_read_timeout
+# * vendordata_dynamic_failure_fatal
+# (list value)
+#vendordata_providers = StaticJSON
+
+#
+# A list of targets for the dynamic vendordata provider. These targets
+# are of
+# the form <name>@<url>.
+#
+# The dynamic vendordata provider collects metadata by contacting
+# external REST
+# services and querying them for information about the instance. This
+# behaviour
+# is documented in the vendordata.rst file in the nova developer
+# reference.
+# (list value)
+#vendordata_dynamic_targets =
+
+#
+# Path to an optional certificate file or CA bundle to verify dynamic
+# vendordata REST services ssl certificates against.
+#
+# Possible values:
+#
+# * An empty string, or a path to a valid certificate file
+#
+# Related options:
+#
+# * vendordata_providers
+# * vendordata_dynamic_targets
+# * vendordata_dynamic_connect_timeout
+# * vendordata_dynamic_read_timeout
+# * vendordata_dynamic_failure_fatal
+# (string value)
+#vendordata_dynamic_ssl_certfile =
+
+#
+# Maximum wait time for an external REST service to connect.
+#
+# Possible values:
+#
+# * Any integer with a value greater than three (the TCP packet
+# retransmission
+# timeout). Note that instance start may be blocked during this wait
+# time,
+# so this value should be kept small.
+#
+# Related options:
+#
+# * vendordata_providers
+# * vendordata_dynamic_targets
+# * vendordata_dynamic_ssl_certfile
+# * vendordata_dynamic_read_timeout
+# * vendordata_dynamic_failure_fatal
+# (integer value)
+# Minimum value: 3
+#vendordata_dynamic_connect_timeout = 5
+
+#
+# Maximum wait time for an external REST service to return data once
+# connected.
+#
+# Possible values:
+#
+# * Any integer. Note that instance start is blocked during this wait
+# time,
+# so this value should be kept small.
+#
+# Related options:
+#
+# * vendordata_providers
+# * vendordata_dynamic_targets
+# * vendordata_dynamic_ssl_certfile
+# * vendordata_dynamic_connect_timeout
+# * vendordata_dynamic_failure_fatal
+# (integer value)
+# Minimum value: 0
+#vendordata_dynamic_read_timeout = 5
+
+#
+# Should failures to fetch dynamic vendordata be fatal to instance
+# boot?
+#
+# Related options:
+#
+# * vendordata_providers
+# * vendordata_dynamic_targets
+# * vendordata_dynamic_ssl_certfile
+# * vendordata_dynamic_connect_timeout
+# * vendordata_dynamic_read_timeout
+# (boolean value)
+#vendordata_dynamic_failure_fatal = false
+
+#
+# This option is the time (in seconds) to cache metadata. When set to
+# 0,
+# metadata caching is disabled entirely; this is generally not
+# recommended for
+# performance reasons. Increasing this setting should improve response
+# times
+# of the metadata API when under heavy load. Higher values may
+# increase memory
+# usage, and result in longer times for host metadata changes to take
+# effect.
+# (integer value)
+# Minimum value: 0
+#metadata_cache_expiration = 15
+
+#
+# Cloud providers may store custom data in vendor data file that will
+# then be
+# available to the instances via the metadata service, and to the
+# rendering of
+# config-drive. The default class for this, JsonFileVendorData, loads
+# this
+# information from a JSON file, whose path is configured by this
+# option. If
+# there is no path set by this option, the class returns an empty
+# dictionary.
+#
+# Possible values:
+#
+# * Any string representing the path to the data file, or an empty
+# string
+# (default).
+# (string value)
+#vendordata_jsonfile_path = <None>
+
+#
+# As a query can potentially return many thousands of items, you can
+# limit the
+# maximum number of items in a single response by setting this option.
+# (integer value)
+# Minimum value: 0
+# Deprecated group/name - [DEFAULT]/osapi_max_limit
+#max_limit = 1000
+
+#
+# This string is prepended to the normal URL that is returned in links
+# to the
+# OpenStack Compute API. If it is empty (the default), the URLs are
+# returned
+# unchanged.
+#
+# Possible values:
+#
+# * Any string, including an empty string (the default).
+# (string value)
+# Deprecated group/name - [DEFAULT]/osapi_compute_link_prefix
+#compute_link_prefix = <None>
+
+#
+# This string is prepended to the normal URL that is returned in links
+# to
+# Glance resources. If it is empty (the default), the URLs are
+# returned
+# unchanged.
+#
+# Possible values:
+#
+# * Any string, including an empty string (the default).
+# (string value)
+# Deprecated group/name - [DEFAULT]/osapi_glance_link_prefix
+#glance_link_prefix = <None>
+
+# DEPRECATED:
+# Operators can turn off the ability for a user to take snapshots of
+# their
+# instances by setting this option to False. When disabled, any
+# attempt to
+# take a snapshot will result in a HTTP 400 response ("Bad Request").
+# (boolean value)
+# This option is deprecated for removal since 16.0.0.
+# Its value may be silently ignored in the future.
+# Reason: This option disables the createImage server action API in a
+# non-discoverable way and is thus a barrier to interoperability.
+# Also, it is not used for other APIs that create snapshots like
+# shelve or createBackup. Disabling snapshots should be done via
+# policy if so desired.
+#allow_instance_snapshots = true
+
+# DEPRECATED:
+# This option is a list of all instance states for which network
+# address
+# information should not be returned from the API.
+#
+# Possible values:
+#
+# A list of strings, where each string is a valid VM state, as
+# defined in
+# nova/compute/vm_states.py. As of the Newton release, they are:
+#
+# * "active"
+# * "building"
+# * "paused"
+# * "suspended"
+# * "stopped"
+# * "rescued"
+# * "resized"
+# * "soft-delete"
+# * "deleted"
+# * "error"
+# * "shelved"
+# * "shelved_offloaded"
+# (list value)
+# Deprecated group/name - [DEFAULT]/osapi_hide_server_address_states
+# This option is deprecated for removal since 17.0.0.
+# Its value may be silently ignored in the future.
+# Reason: This option hide the server address in server representation
+# for configured server states. Which makes GET server API controlled
+# by this config options. Due to this config options, user would not
+# be able to discover the API behavior on different clouds which leads
+# to the interop issue.
+#hide_server_address_states = building
+
+# The full path to the fping binary. (string value)
+#fping_path = /usr/sbin/fping
+
+#
+# When True, the TenantNetworkController will query the Neutron API to
+# get the
+# default networks to use.
+#
+# Related options:
+#
+# * neutron_default_tenant_id
+# (boolean value)
+#use_neutron_default_nets = false
+
+#
+# Tenant ID for getting the default network from Neutron API (also
+# referred in
+# some places as the 'project ID') to use.
+#
+# Related options:
+#
+# * use_neutron_default_nets
+# (string value)
+#neutron_default_tenant_id = default
+
+#
+# Enables returning of the instance password by the relevant server
+# API calls
+# such as create, rebuild, evacuate, or rescue. If the hypervisor does
+# not
+# support password injection, then the password returned will not be
+# correct,
+# so if your hypervisor does not support password injection, set this
+# to False.
+# (boolean value)
+#enable_instance_password = true
+
+
+[api_database]
+#
+# The *Nova API Database* is a separate database which is used for
+# information
+# which is used across *cells*. This database is mandatory since the
+# Mitaka
+# release (13.0.0).
+
+#
+# From nova.conf
+#
+
+# The SQLAlchemy connection string to use to connect to the database.
+# (string value)
+#connection = <None>
+connection=sqlite:////var/lib/nova/nova.sqlite
+
+# If True, SQLite uses synchronous mode. (boolean value)
+#sqlite_synchronous = true
+
+# The SQLAlchemy connection string to use to connect to the slave
+# database. (string value)
+#slave_connection = <None>
+
+# The SQL mode to be used for MySQL sessions. This option, including
+# the default, overrides any server-set SQL mode. To use whatever SQL
+# mode is set by the server configuration, set this to no value.
+# Example: mysql_sql_mode= (string value)
+#mysql_sql_mode = TRADITIONAL
+
+# Connections which have been present in the connection pool longer
+# than this number of seconds will be replaced with a new one the next
+# time they are checked out from the pool. (integer value)
+# Deprecated group/name - [api_database]/idle_timeout
+#connection_recycle_time = 3600
+
+# Maximum number of SQL connections to keep open in a pool. Setting a
+# value of 0 indicates no limit. (integer value)
+#max_pool_size = <None>
+
+# Maximum number of database connection retries during startup. Set to
+# -1 to specify an infinite retry count. (integer value)
+#max_retries = 10
+
+# Interval between retries of opening a SQL connection. (integer
+# value)
+#retry_interval = 10
+
+# If set, use this value for max_overflow with SQLAlchemy. (integer
+# value)
+#max_overflow = <None>
+
+# Verbosity of SQL debugging information: 0=None, 100=Everything.
+# (integer value)
+#connection_debug = 0
+
+# Add Python stack traces to SQL as comment strings. (boolean value)
+#connection_trace = false
+
+# If set, use this value for pool_timeout with SQLAlchemy. (integer
+# value)
+#pool_timeout = <None>
+
+{%- if compute.get('barbican', {}).get('enabled', False) %}
+{%- set _data = compute.identity %}
+[barbican]
+{%- include "oslo_templates/files/queens/castellan/_barbican.conf" %}
+{%- endif %}
+
+[cache]
+
+#
+# From nova.conf
+#
+{%- if compute.cache is defined %}
+backend = oslo_cache.memcache_pool
+enabled = true
+memcache_servers={%- for member in compute.cache.members %}{{ member.host }}:11211{% if not loop.last %},{% endif %}{%- endfor %}
+{%- endif %}
+
+# Prefix for building the configuration dictionary for the cache
+# region. This should not need to be changed unless there is another
+# dogpile.cache region with the same configuration name. (string
+# value)
+#config_prefix = cache.oslo
+
+# Default TTL, in seconds, for any cached item in the dogpile.cache
+# region. This applies to any cached method that doesn't have an
+# explicit cache expiration time defined for it. (integer value)
+#expiration_time = 600
+
+# Cache backend module. For eventlet-based or environments with
+# hundreds of threaded servers, Memcache with pooling
+# (oslo_cache.memcache_pool) is recommended. For environments with
+# less than 100 threaded servers, Memcached (dogpile.cache.memcached)
+# or Redis (dogpile.cache.redis) is recommended. Test environments
+# with a single instance of the server can use the
+# dogpile.cache.memory backend. (string value)
+# Possible values:
+# oslo_cache.memcache_pool - <No description provided>
+# oslo_cache.dict - <No description provided>
+# oslo_cache.mongo - <No description provided>
+# oslo_cache.etcd3gw - <No description provided>
+# dogpile.cache.memcached - <No description provided>
+# dogpile.cache.pylibmc - <No description provided>
+# dogpile.cache.bmemcached - <No description provided>
+# dogpile.cache.dbm - <No description provided>
+# dogpile.cache.redis - <No description provided>
+# dogpile.cache.memory - <No description provided>
+# dogpile.cache.memory_pickle - <No description provided>
+# dogpile.cache.null - <No description provided>
+#backend = dogpile.cache.null
+
+# Arguments supplied to the backend module. Specify this option once
+# per argument to be passed to the dogpile.cache backend. Example
+# format: "<argname>:<value>". (multi valued)
+#backend_argument =
+
+# Proxy classes to import that will affect the way the dogpile.cache
+# backend functions. See the dogpile.cache documentation on changing-
+# backend-behavior. (list value)
+#proxies =
+
+# Global toggle for caching. (boolean value)
+#enabled = false
+
+# Extra debugging from the cache backend (cache keys,
+# get/set/delete/etc calls). This is only really useful if you need to
+# see the specific cache-backend get/set/delete calls with the
+# keys/values. Typically this should be left set to false. (boolean
+# value)
+#debug_cache_backend = false
+
+# Memcache servers in the format of "host:port".
+# (dogpile.cache.memcache and oslo_cache.memcache_pool backends only).
+# (list value)
+#memcache_servers = localhost:11211
+
+# Number of seconds memcached server is considered dead before it is
+# tried again. (dogpile.cache.memcache and oslo_cache.memcache_pool
+# backends only). (integer value)
+#memcache_dead_retry = 300
+
+# Timeout in seconds for every call to a server.
+# (dogpile.cache.memcache and oslo_cache.memcache_pool backends only).
+# (integer value)
+#memcache_socket_timeout = 3
+
+# Max total number of open connections to every memcached server.
+# (oslo_cache.memcache_pool backend only). (integer value)
+#memcache_pool_maxsize = 10
+
+# Number of seconds a connection to memcached is held unused in the
+# pool before it is closed. (oslo_cache.memcache_pool backend only).
+# (integer value)
+#memcache_pool_unused_timeout = 60
+
+# Number of seconds that an operation will wait to get a memcache
+# client connection. (integer value)
+#memcache_pool_connection_get_timeout = 10
+
+
+[cells]
+#
+# DEPRECATED: Cells options allow you to use cells v1 functionality in
+# an
+# OpenStack deployment.
+#
+# Note that the options in this group are only for cells v1
+# functionality, which
+# is considered experimental and not recommended for new deployments.
+# Cells v1
+# is being replaced with cells v2, which starting in the 15.0.0 Ocata
+# release is
+# required and all Nova deployments will be at least a cells v2 cell
+# of one.
+#
+
+#
+# From nova.conf
+#
+
+# DEPRECATED:
+# Enable cell v1 functionality.
+#
+# Note that cells v1 is considered experimental and not recommended
+# for new
+# Nova deployments. Cells v1 is being replaced by cells v2 which
+# starting in
+# the 15.0.0 Ocata release, all Nova deployments are at least a cells
+# v2 cell
+# of one. Setting this option, or any other options in the [cells]
+# group, is
+# not required for cells v2.
+#
+# When this functionality is enabled, it lets you to scale an
+# OpenStack
+# Compute cloud in a more distributed fashion without having to use
+# complicated technologies like database and message queue clustering.
+# Cells are configured as a tree. The top-level cell should have a
+# host
+# that runs a nova-api service, but no nova-compute services. Each
+# child cell should run all of the typical nova-* services in a
+# regular
+# Compute cloud except for nova-api. You can think of cells as a
+# normal
+# Compute deployment in that each cell has its own database server and
+# message queue broker.
+#
+# Related options:
+#
+# * name: A unique cell name must be given when this functionality
+# is enabled.
+# * cell_type: Cell type should be defined for all cells.
+# (boolean value)
+# This option is deprecated for removal since 16.0.0.
+# Its value may be silently ignored in the future.
+# Reason: Cells v1 is being replaced with Cells v2.
+#enable = false
+
+# DEPRECATED:
+# Name of the current cell.
+#
+# This value must be unique for each cell. Name of a cell is used as
+# its id, leaving this option unset or setting the same name for
+# two or more cells may cause unexpected behaviour.
+#
+# Related options:
+#
+# * enabled: This option is meaningful only when cells service
+# is enabled
+# (string value)
+# This option is deprecated for removal since 16.0.0.
+# Its value may be silently ignored in the future.
+# Reason: Cells v1 is being replaced with Cells v2.
+#name = nova
+
+# DEPRECATED:
+# Cell capabilities.
+#
+# List of arbitrary key=value pairs defining capabilities of the
+# current cell to be sent to the parent cells. These capabilities
+# are intended to be used in cells scheduler filters/weighers.
+#
+# Possible values:
+#
+# * key=value pairs list for example;
+# ``hypervisor=xenserver;kvm,os=linux;windows``
+# (list value)
+# This option is deprecated for removal since 16.0.0.
+# Its value may be silently ignored in the future.
+# Reason: Cells v1 is being replaced with Cells v2.
+#capabilities = hypervisor=xenserver;kvm,os=linux;windows
+
+# DEPRECATED:
+# Call timeout.
+#
+# Cell messaging module waits for response(s) to be put into the
+# eventlet queue. This option defines the seconds waited for
+# response from a call to a cell.
+#
+# Possible values:
+#
+# * An integer, corresponding to the interval time in seconds.
+# (integer value)
+# Minimum value: 0
+# This option is deprecated for removal since 16.0.0.
+# Its value may be silently ignored in the future.
+# Reason: Cells v1 is being replaced with Cells v2.
+#call_timeout = 60
+
+# DEPRECATED:
+# Reserve percentage
+#
+# Percentage of cell capacity to hold in reserve, so the minimum
+# amount of free resource is considered to be;
+#
+# min_free = total * (reserve_percent / 100.0)
+#
+# This option affects both memory and disk utilization.
+#
+# The primary purpose of this reserve is to ensure some space is
+# available for users who want to resize their instance to be larger.
+# Note that currently once the capacity expands into this reserve
+# space this option is ignored.
+#
+# Possible values:
+#
+# * An integer or float, corresponding to the percentage of cell
+# capacity to
+# be held in reserve.
+# (floating point value)
+# This option is deprecated for removal since 16.0.0.
+# Its value may be silently ignored in the future.
+# Reason: Cells v1 is being replaced with Cells v2.
+#reserve_percent = 10.0
+
+# DEPRECATED:
+# Type of cell.
+#
+# When cells feature is enabled the hosts in the OpenStack Compute
+# cloud are partitioned into groups. Cells are configured as a tree.
+# The top-level cell's cell_type must be set to ``api``. All other
+# cells are defined as a ``compute cell`` by default.
+#
+# Related option:
+#
+# * quota_driver: Disable quota checking for the child cells.
+# (nova.quota.NoopQuotaDriver)
+# (string value)
+# Possible values:
+# api - <No description provided>
+# compute - <No description provided>
+# This option is deprecated for removal since 16.0.0.
+# Its value may be silently ignored in the future.
+# Reason: Cells v1 is being replaced with Cells v2.
+#cell_type = compute
+
+# DEPRECATED:
+# Mute child interval.
+#
+# Number of seconds after which a lack of capability and capacity
+# update the child cell is to be treated as a mute cell. Then the
+# child cell will be weighed as recommend highly that it be skipped.
+#
+# Possible values:
+#
+# * An integer, corresponding to the interval time in seconds.
+# (integer value)
+# This option is deprecated for removal since 16.0.0.
+# Its value may be silently ignored in the future.
+# Reason: Cells v1 is being replaced with Cells v2.
+#mute_child_interval = 300
+
+# DEPRECATED:
+# Bandwidth update interval.
+#
+# Seconds between bandwidth usage cache updates for cells.
+#
+# Possible values:
+#
+# * An integer, corresponding to the interval time in seconds.
+# (integer value)
+# This option is deprecated for removal since 16.0.0.
+# Its value may be silently ignored in the future.
+# Reason: Cells v1 is being replaced with Cells v2.
+#bandwidth_update_interval = 600
+
+# DEPRECATED:
+# Instance update sync database limit.
+#
+# Number of instances to pull from the database at one time for
+# a sync. If there are more instances to update the results will
+# be paged through.
+#
+# Possible values:
+#
+# * An integer, corresponding to a number of instances.
+# (integer value)
+# This option is deprecated for removal since 16.0.0.
+# Its value may be silently ignored in the future.
+# Reason: Cells v1 is being replaced with Cells v2.
+#instance_update_sync_database_limit = 100
+
+# DEPRECATED:
+# Mute weight multiplier.
+#
+# Multiplier used to weigh mute children. Mute children cells are
+# recommended to be skipped so their weight is multiplied by this
+# negative value.
+#
+# Possible values:
+#
+# * Negative numeric number
+# (floating point value)
+# This option is deprecated for removal since 16.0.0.
+# Its value may be silently ignored in the future.
+# Reason: Cells v1 is being replaced with Cells v2.
+#mute_weight_multiplier = -10000.0
+
+# DEPRECATED:
+# Ram weight multiplier.
+#
+# Multiplier used for weighing ram. Negative numbers indicate that
+# Compute should stack VMs on one host instead of spreading out new
+# VMs to more hosts in the cell.
+#
+# Possible values:
+#
+# * Numeric multiplier
+# (floating point value)
+# This option is deprecated for removal since 16.0.0.
+# Its value may be silently ignored in the future.
+# Reason: Cells v1 is being replaced with Cells v2.
+#ram_weight_multiplier = 10.0
+
+# DEPRECATED:
+# Offset weight multiplier
+#
+# Multiplier used to weigh offset weigher. Cells with higher
+# weight_offsets in the DB will be preferred. The weight_offset
+# is a property of a cell stored in the database. It can be used
+# by a deployer to have scheduling decisions favor or disfavor
+# cells based on the setting.
+#
+# Possible values:
+#
+# * Numeric multiplier
+# (floating point value)
+# This option is deprecated for removal since 16.0.0.
+# Its value may be silently ignored in the future.
+# Reason: Cells v1 is being replaced with Cells v2.
+#offset_weight_multiplier = 1.0
+
+# DEPRECATED:
+# Instance updated at threshold
+#
+# Number of seconds after an instance was updated or deleted to
+# continue to update cells. This option lets cells manager to only
+# attempt to sync instances that have been updated recently.
+# i.e., a threshold of 3600 means to only update instances that
+# have modified in the last hour.
+#
+# Possible values:
+#
+# * Threshold in seconds
+#
+# Related options:
+#
+# * This value is used with the ``instance_update_num_instances``
+# value in a periodic task run.
+# (integer value)
+# This option is deprecated for removal since 16.0.0.
+# Its value may be silently ignored in the future.
+# Reason: Cells v1 is being replaced with Cells v2.
+#instance_updated_at_threshold = 3600
+
+# DEPRECATED:
+# Instance update num instances
+#
+# On every run of the periodic task, nova cells manager will attempt
+# to
+# sync instance_updated_at_threshold number of instances. When the
+# manager gets the list of instances, it shuffles them so that
+# multiple
+# nova-cells services do not attempt to sync the same instances in
+# lockstep.
+#
+# Possible values:
+#
+# * Positive integer number
+#
+# Related options:
+#
+# * This value is used with the ``instance_updated_at_threshold``
+# value in a periodic task run.
+# (integer value)
+# This option is deprecated for removal since 16.0.0.
+# Its value may be silently ignored in the future.
+# Reason: Cells v1 is being replaced with Cells v2.
+#instance_update_num_instances = 1
+
+# DEPRECATED:
+# Maximum hop count
+#
+# When processing a targeted message, if the local cell is not the
+# target, a route is defined between neighbouring cells. And the
+# message is processed across the whole routing path. This option
+# defines the maximum hop counts until reaching the target.
+#
+# Possible values:
+#
+# * Positive integer value
+# (integer value)
+# This option is deprecated for removal since 16.0.0.
+# Its value may be silently ignored in the future.
+# Reason: Cells v1 is being replaced with Cells v2.
+#max_hop_count = 10
+
+# DEPRECATED:
+# Cells scheduler.
+#
+# The class of the driver used by the cells scheduler. This should be
+# the full Python path to the class to be used. If nothing is
+# specified
+# in this option, the CellsScheduler is used.
+# (string value)
+# This option is deprecated for removal since 16.0.0.
+# Its value may be silently ignored in the future.
+# Reason: Cells v1 is being replaced with Cells v2.
+#scheduler = nova.cells.scheduler.CellsScheduler
+
+# DEPRECATED:
+# RPC driver queue base.
+#
+# When sending a message to another cell by JSON-ifying the message
+# and making an RPC cast to 'process_message', a base queue is used.
+# This option defines the base queue name to be used when
+# communicating
+# between cells. Various topics by message type will be appended to
+# this.
+#
+# Possible values:
+#
+# * The base queue name to be used when communicating between cells.
+# (string value)
+# This option is deprecated for removal since 16.0.0.
+# Its value may be silently ignored in the future.
+# Reason: Cells v1 is being replaced with Cells v2.
+#rpc_driver_queue_base = cells.intercell
+
+# DEPRECATED:
+# Scheduler filter classes.
+#
+# Filter classes the cells scheduler should use. An entry of
+# "nova.cells.filters.all_filters" maps to all cells filters
+# included with nova. As of the Mitaka release the following
+# filter classes are available:
+#
+# Different cell filter: A scheduler hint of 'different_cell'
+# with a value of a full cell name may be specified to route
+# a build away from a particular cell.
+#
+# Image properties filter: Image metadata named
+# 'hypervisor_version_requires' with a version specification
+# may be specified to ensure the build goes to a cell which
+# has hypervisors of the required version. If either the version
+# requirement on the image or the hypervisor capability of the
+# cell is not present, this filter returns without filtering out
+# the cells.
+#
+# Target cell filter: A scheduler hint of 'target_cell' with a
+# value of a full cell name may be specified to route a build to
+# a particular cell. No error handling is done as there's no way
+# to know whether the full path is a valid.
+#
+# As an admin user, you can also add a filter that directs builds
+# to a particular cell.
+#
+# (list value)
+# This option is deprecated for removal since 16.0.0.
+# Its value may be silently ignored in the future.
+# Reason: Cells v1 is being replaced with Cells v2.
+#scheduler_filter_classes = nova.cells.filters.all_filters
+
+# DEPRECATED:
+# Scheduler weight classes.
+#
+# Weigher classes the cells scheduler should use. An entry of
+# "nova.cells.weights.all_weighers" maps to all cell weighers
+# included with nova. As of the Mitaka release the following
+# weight classes are available:
+#
+# mute_child: Downgrades the likelihood of child cells being
+# chosen for scheduling requests, which haven't sent capacity
+# or capability updates in a while. Options include
+# mute_weight_multiplier (multiplier for mute children; value
+# should be negative).
+#
+# ram_by_instance_type: Select cells with the most RAM capacity
+# for the instance type being requested. Because higher weights
+# win, Compute returns the number of available units for the
+# instance type requested. The ram_weight_multiplier option defaults
+# to 10.0 that adds to the weight by a factor of 10. Use a negative
+# number to stack VMs on one host instead of spreading out new VMs
+# to more hosts in the cell.
+#
+# weight_offset: Allows modifying the database to weight a particular
+# cell. The highest weight will be the first cell to be scheduled for
+# launching an instance. When the weight_offset of a cell is set to 0,
+# it is unlikely to be picked but it could be picked if other cells
+# have a lower weight, like if they're full. And when the
+# weight_offset
+# is set to a very high value (for example, '999999999999999'), it is
+# likely to be picked if another cell do not have a higher weight.
+# (list value)
+# This option is deprecated for removal since 16.0.0.
+# Its value may be silently ignored in the future.
+# Reason: Cells v1 is being replaced with Cells v2.
+#scheduler_weight_classes = nova.cells.weights.all_weighers
+
+# DEPRECATED:
+# Scheduler retries.
+#
+# How many retries when no cells are available. Specifies how many
+# times the scheduler tries to launch a new instance when no cells
+# are available.
+#
+# Possible values:
+#
+# * Positive integer value
+#
+# Related options:
+#
+# * This value is used with the ``scheduler_retry_delay`` value
+# while retrying to find a suitable cell.
+# (integer value)
+# This option is deprecated for removal since 16.0.0.
+# Its value may be silently ignored in the future.
+# Reason: Cells v1 is being replaced with Cells v2.
+#scheduler_retries = 10
+
+# DEPRECATED:
+# Scheduler retry delay.
+#
+# Specifies the delay (in seconds) between scheduling retries when no
+# cell can be found to place the new instance on. When the instance
+# could not be scheduled to a cell after ``scheduler_retries`` in
+# combination with ``scheduler_retry_delay``, then the scheduling
+# of the instance failed.
+#
+# Possible values:
+#
+# * Time in seconds.
+#
+# Related options:
+#
+# * This value is used with the ``scheduler_retries`` value
+# while retrying to find a suitable cell.
+# (integer value)
+# This option is deprecated for removal since 16.0.0.
+# Its value may be silently ignored in the future.
+# Reason: Cells v1 is being replaced with Cells v2.
+#scheduler_retry_delay = 2
+
+# DEPRECATED:
+# DB check interval.
+#
+# Cell state manager updates cell status for all cells from the DB
+# only after this particular interval time is passed. Otherwise cached
+# status are used. If this value is 0 or negative all cell status are
+# updated from the DB whenever a state is needed.
+#
+# Possible values:
+#
+# * Interval time, in seconds.
+#
+# (integer value)
+# This option is deprecated for removal since 16.0.0.
+# Its value may be silently ignored in the future.
+# Reason: Cells v1 is being replaced with Cells v2.
+#db_check_interval = 60
+
+# DEPRECATED:
+# Optional cells configuration.
+#
+# Configuration file from which to read cells configuration. If given,
+# overrides reading cells from the database.
+#
+# Cells store all inter-cell communication data, including user names
+# and passwords, in the database. Because the cells data is not
+# updated
+# very frequently, use this option to specify a JSON file to store
+# cells data. With this configuration, the database is no longer
+# consulted when reloading the cells data. The file must have columns
+# present in the Cell model (excluding common database fields and the
+# id column). You must specify the queue connection information
+# through
+# a transport_url field, instead of username, password, and so on.
+#
+# The transport_url has the following form:
+# rabbit://USERNAME:PASSWORD@HOSTNAME:PORT/VIRTUAL_HOST
+#
+# Possible values:
+#
+# The scheme can be either qpid or rabbit, the following sample shows
+# this optional configuration:
+#
+# {
+# "parent": {
+# "name": "parent",
+# "api_url": "http://api.example.com:8774",
+# "transport_url": "rabbit://rabbit.example.com",
+# "weight_offset": 0.0,
+# "weight_scale": 1.0,
+# "is_parent": true
+# },
+# "cell1": {
+# "name": "cell1",
+# "api_url": "http://api.example.com:8774",
+# "transport_url": "rabbit://rabbit1.example.com",
+# "weight_offset": 0.0,
+# "weight_scale": 1.0,
+# "is_parent": false
+# },
+# "cell2": {
+# "name": "cell2",
+# "api_url": "http://api.example.com:8774",
+# "transport_url": "rabbit://rabbit2.example.com",
+# "weight_offset": 0.0,
+# "weight_scale": 1.0,
+# "is_parent": false
+# }
+# }
+#
+# (string value)
+# This option is deprecated for removal since 16.0.0.
+# Its value may be silently ignored in the future.
+# Reason: Cells v1 is being replaced with Cells v2.
+#cells_config = <None>
+
+
+[cinder]
+
+#
+# From nova.conf
+#
+os_region_name = {{ compute.identity.region }}
+catalog_info=volumev2:cinderv2:internalURL
+{%- if compute.image.get('protocol', 'http') == 'https' %}
+cafile={{ compute.identity.get('cacert_file', compute.cacert_file) }}
+{%- endif %}
+
+#
+# Info to match when looking for cinder in the service catalog.
+#
+# Possible values:
+#
+# * Format is separated values of the form:
+# <service_type>:<service_name>:<endpoint_type>
+#
+# Note: Nova does not support the Cinder v2 API since the Nova 17.0.0
+# Queens
+# release.
+#
+# Related options:
+#
+# * endpoint_template - Setting this option will override catalog_info
+# (string value)
+#catalog_info = volumev3:cinderv3:publicURL
+
+#
+# If this option is set then it will override service catalog lookup
+# with
+# this template for cinder endpoint
+#
+# Possible values:
+#
+# * URL for cinder endpoint API
+# e.g. http://localhost:8776/v3/%(project_id)s
+#
+# Note: Nova does not support the Cinder v2 API since the Nova 17.0.0
+# Queens
+# release.
+#
+# Related options:
+#
+# * catalog_info - If endpoint_template is not set, catalog_info will
+# be used.
+# (string value)
+#endpoint_template = <None>
+
+#
+# Region name of this node. This is used when picking the URL in the
+# service
+# catalog.
+#
+# Possible values:
+#
+# * Any string representing region name
+# (string value)
+#os_region_name = <None>
+
+#
+# Number of times cinderclient should retry on any failed http call.
+# 0 means connection is attempted only once. Setting it to any
+# positive integer
+# means that on failure connection is retried that many times e.g.
+# setting it
+# to 3 means total attempts to connect will be 4.
+#
+# Possible values:
+#
+# * Any integer value. 0 means connection is attempted only once
+# (integer value)
+# Minimum value: 0
+#http_retries = 3
+
+#
+# Allow attach between instance and volume in different availability
+# zones.
+#
+# If False, volumes attached to an instance must be in the same
+# availability
+# zone in Cinder as the instance availability zone in Nova.
+# This also means care should be taken when booting an instance from a
+# volume
+# where source is not "volume" because Nova will attempt to create a
+# volume using
+# the same availability zone as what is assigned to the instance.
+# If that AZ is not in Cinder (or
+# allow_availability_zone_fallback=False in
+# cinder.conf), the volume create request will fail and the instance
+# will fail
+# the build request.
+# By default there is no availability zone restriction on volume
+# attach.
+# (boolean value)
+#cross_az_attach = true
+{%- if compute.cross_az_attach is defined %}
+cross_az_attach={{ compute.cross_az_attach }}
+{%- endif %}
+
+# PEM encoded Certificate Authority to use when verifying HTTPs
+# connections. (string value)
+#cafile = <None>
+
+# PEM encoded client certificate cert file (string value)
+#certfile = <None>
+
+# PEM encoded client certificate key file (string value)
+#keyfile = <None>
+
+# Verify HTTPS connections. (boolean value)
+#insecure = false
+
+# Timeout value for http requests (integer value)
+#timeout = <None>
+
+# Authentication type to load (string value)
+# Deprecated group/name - [cinder]/auth_plugin
+#auth_type = <None>
+
+# Config Section from which to load plugin specific options (string
+# value)
+#auth_section = <None>
+
+# Authentication URL (string value)
+#auth_url = <None>
+
+# Scope for system operations (string value)
+#system_scope = <None>
+
+# Domain ID to scope to (string value)
+#domain_id = <None>
+
+# Domain name to scope to (string value)
+#domain_name = <None>
+
+# Project ID to scope to (string value)
+#project_id = <None>
+
+# Project name to scope to (string value)
+#project_name = <None>
+
+# Domain ID containing project (string value)
+#project_domain_id = <None>
+
+# Domain name containing project (string value)
+#project_domain_name = <None>
+
+# Trust ID (string value)
+#trust_id = <None>
+
+# Optional domain ID to use with v3 and v2 parameters. It will be used
+# for both the user and project domain in v3 and ignored in v2
+# authentication. (string value)
+#default_domain_id = <None>
+
+# Optional domain name to use with v3 API and v2 parameters. It will
+# be used for both the user and project domain in v3 and ignored in v2
+# authentication. (string value)
+#default_domain_name = <None>
+
+# User ID (string value)
+#user_id = <None>
+
+# Username (string value)
+# Deprecated group/name - [cinder]/user_name
+#username = <None>
+
+# User's domain id (string value)
+#user_domain_id = <None>
+
+# User's domain name (string value)
+#user_domain_name = <None>
+
+# User's password (string value)
+#password = <None>
+
+# Tenant ID (string value)
+#tenant_id = <None>
+
+# Tenant Name (string value)
+#tenant_name = <None>
+
+
+[compute]
+
+#
+# From nova.conf
+#
+
+#
+# Number of consecutive failed builds that result in disabling a
+# compute service.
+#
+# This option will cause nova-compute to set itself to a disabled
+# state
+# if a certain number of consecutive build failures occur. This will
+# prevent the scheduler from continuing to send builds to a compute
+# node that is
+# consistently failing. Note that all failures qualify and count
+# towards this
+# score, including reschedules that may have been due to racy
+# scheduler behavior.
+# Since the failures must be consecutive, it is unlikely that
+# occasional expected
+# reschedules will actually disable a compute node.
+#
+# Possible values:
+#
+# * Any positive integer representing a build failure count.
+# * Zero to never auto-disable.
+# (integer value)
+#consecutive_build_service_disable_threshold = 10
+{%- if compute.get('compute', {}).consecutive_build_service_disable_threshold is defined %}
+consecutive_build_service_disable_threshold = {{ compute.compute.consecutive_build_service_disable_threshold }}
+{%- endif %}
+
+#
+# Interval for updating nova-compute-side cache of the compute node
+# resource
+# provider's aggregates and traits info.
+#
+# This option specifies the number of seconds between attempts to
+# update a
+# provider's aggregates and traits information in the local cache of
+# the compute
+# node.
+#
+# Possible values:
+#
+# * Any positive integer in seconds.
+# (integer value)
+# Minimum value: 1
+#resource_provider_association_refresh = 300
+
+
+[conductor]
+#
+# Options under this group are used to define Conductor's
+# communication,
+# which manager should be act as a proxy between computes and
+# database,
+# and finally, how many worker processes will be used.
+
+#
+# From nova.conf
+#
+
+# DEPRECATED:
+# Topic exchange name on which conductor nodes listen.
+# (string value)
+# This option is deprecated for removal since 15.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# There is no need to let users choose the RPC topic for all services
+# - there
+# is little gain from this. Furthermore, it makes it really easy to
+# break Nova
+# by using this option.
+#topic = conductor
+
+#
+# Number of workers for OpenStack Conductor service. The default will
+# be the
+# number of CPUs available.
+# (integer value)
+#workers = <None>
+
+
+[console]
+#
+# Options under this group allow to tune the configuration of the
+# console proxy
+# service.
+#
+# Note: in configuration of every compute is a ``console_host``
+# option,
+# which allows to select the console proxy service to connect to.
+
+#
+# From nova.conf
+#
+
+#
+# Adds list of allowed origins to the console websocket proxy to allow
+# connections from other origin hostnames.
+# Websocket proxy matches the host header with the origin header to
+# prevent cross-site requests. This list specifies if any there are
+# values other than host are allowed in the origin header.
+#
+# Possible values:
+#
+# * A list where each element is an allowed origin hostnames, else an
+# empty list
+# (list value)
+# Deprecated group/name - [DEFAULT]/console_allowed_origins
+#allowed_origins =
+
+
+[consoleauth]
+
+#
+# From nova.conf
+#
+
+#
+# The lifetime of a console auth token (in seconds).
+#
+# A console auth token is used in authorizing console access for a
+# user.
+# Once the auth token time to live count has elapsed, the token is
+# considered expired. Expired tokens are then deleted.
+# (integer value)
+# Minimum value: 0
+# Deprecated group/name - [DEFAULT]/console_token_ttl
+#token_ttl = 600
+
+
+[crypto]
+
+#
+# From nova.conf
+#
+
+#
+# Filename of root CA (Certificate Authority). This is a container
+# format
+# and includes root certificates.
+#
+# Possible values:
+#
+# * Any file name containing root CA, cacert.pem is default
+#
+# Related options:
+#
+# * ca_path
+# (string value)
+#ca_file = cacert.pem
+
+#
+# Filename of a private key.
+#
+# Related options:
+#
+# * keys_path
+# (string value)
+#key_file = private/cakey.pem
+
+#
+# Filename of root Certificate Revocation List (CRL). This is a list
+# of
+# certificates that have been revoked, and therefore, entities
+# presenting
+# those (revoked) certificates should no longer be trusted.
+#
+# Related options:
+#
+# * ca_path
+# (string value)
+#crl_file = crl.pem
+
+#
+# Directory path where keys are located.
+#
+# Related options:
+#
+# * key_file
+# (string value)
+#keys_path = $state_path/keys
+
+#
+# Directory path where root CA is located.
+#
+# Related options:
+#
+# * ca_file
+# (string value)
+#ca_path = $state_path/CA
+
+# Option to enable/disable use of CA for each project. (boolean value)
+#use_project_ca = false
+
+#
+# Subject for certificate for users, %s for
+# project, user, timestamp
+# (string value)
+#user_cert_subject = /C=US/ST=California/O=OpenStack/OU=NovaDev/CN=%.16s-%.16s-%s
+
+#
+# Subject for certificate for projects, %s for
+# project, timestamp
+# (string value)
+#project_cert_subject = /C=US/ST=California/O=OpenStack/OU=NovaDev/CN=project-ca-%.16s-%s
+
+
+[devices]
+
+#
+# From nova.conf
+#
+
+#
+# A list of the vGPU types enabled in the compute node.
+#
+# Some pGPUs (e.g. NVIDIA GRID K1) support different vGPU types. User
+# can use
+# this option to specify a list of enabled vGPU types that may be
+# assigned to a
+# guest instance. But please note that Nova only supports a single
+# type in the
+# Queens release. If more than one vGPU type is specified (as a comma-
+# separated
+# list), only the first one will be used. An example is as the
+# following:
+# [devices]
+# enabled_vgpu_types = GRID K100,Intel GVT-g,MxGPU.2,nvidia-11
+# (list value)
+#enabled_vgpu_types =
+
+
+[ephemeral_storage_encryption]
+
+#
+# From nova.conf
+#
+
+#
+# Enables/disables LVM ephemeral storage encryption.
+# (boolean value)
+#enabled = false
+
+#
+# Cipher-mode string to be used.
+#
+# The cipher and mode to be used to encrypt ephemeral storage. The set
+# of
+# cipher-mode combinations available depends on kernel support.
+# According
+# to the dm-crypt documentation, the cipher is expected to be in the
+# format:
+# "<cipher>-<chainmode>-<ivmode>".
+#
+# Possible values:
+#
+# * Any crypto option listed in ``/proc/crypto``.
+# (string value)
+#cipher = aes-xts-plain64
+
+#
+# Encryption key length in bits.
+#
+# The bit length of the encryption key to be used to encrypt ephemeral
+# storage.
+# In XTS mode only half of the bits are used for encryption key.
+# (integer value)
+# Minimum value: 1
+#key_size = 512
+
+
+[filter_scheduler]
+
+#
+# From nova.conf
+#
+
+#
+# Size of subset of best hosts selected by scheduler.
+#
+# New instances will be scheduled on a host chosen randomly from a
+# subset of the
+# N best hosts, where N is the value set by this option.
+#
+# Setting this to a value greater than 1 will reduce the chance that
+# multiple
+# scheduler processes handling similar requests will select the same
+# host,
+# creating a potential race condition. By selecting a host randomly
+# from the N
+# hosts that best fit the request, the chance of a conflict is
+# reduced. However,
+# the higher you set this value, the less optimal the chosen host may
+# be for a
+# given request.
+#
+# This option is only used by the FilterScheduler and its subclasses;
+# if you use
+# a different scheduler, this option has no effect.
+#
+# Possible values:
+#
+# * An integer, where the integer corresponds to the size of a host
+# subset. Any
+# integer is valid, although any value less than 1 will be treated
+# as 1
+# (integer value)
+# Minimum value: 1
+# Deprecated group/name - [DEFAULT]/scheduler_host_subset_size
+#host_subset_size = 1
+
+#
+# The number of instances that can be actively performing IO on a
+# host.
+#
+# Instances performing IO includes those in the following states:
+# build, resize,
+# snapshot, migrate, rescue, unshelve.
+#
+# This option is only used by the FilterScheduler and its subclasses;
+# if you use
+# a different scheduler, this option has no effect. Also note that
+# this setting
+# only affects scheduling if the 'io_ops_filter' filter is enabled.
+#
+# Possible values:
+#
+# * An integer, where the integer corresponds to the max number of
+# instances
+# that can be actively performing IO on any given host.
+# (integer value)
+#max_io_ops_per_host = 8
+
+#
+# Maximum number of instances that be active on a host.
+#
+# If you need to limit the number of instances on any given host, set
+# this option
+# to the maximum number of instances you want to allow. The
+# num_instances_filter
+# will reject any host that has at least as many instances as this
+# option's
+# value.
+#
+# This option is only used by the FilterScheduler and its subclasses;
+# if you use
+# a different scheduler, this option has no effect. Also note that
+# this setting
+# only affects scheduling if the 'num_instances_filter' filter is
+# enabled.
+#
+# Possible values:
+#
+# * An integer, where the integer corresponds to the max instances
+# that can be
+# scheduled on a host.
+# (integer value)
+# Minimum value: 1
+#max_instances_per_host = 50
+
+#
+# Enable querying of individual hosts for instance information.
+#
+# The scheduler may need information about the instances on a host in
+# order to
+# evaluate its filters and weighers. The most common need for this
+# information is
+# for the (anti-)affinity filters, which need to choose a host based
+# on the
+# instances already running on a host.
+#
+# If the configured filters and weighers do not need this information,
+# disabling
+# this option will improve performance. It may also be disabled when
+# the tracking
+# overhead proves too heavy, although this will cause classes
+# requiring host
+# usage data to query the database on each request instead.
+#
+# This option is only used by the FilterScheduler and its subclasses;
+# if you use
+# a different scheduler, this option has no effect.
+#
+# NOTE: In a multi-cell (v2) setup where the cell MQ is separated from
+# the
+# top-level, computes cannot directly communicate with the scheduler.
+# Thus,
+# this option cannot be enabled in that scenario. See also the
+# [workarounds]/disable_group_policy_check_upcall option.
+# (boolean value)
+# Deprecated group/name - [DEFAULT]/scheduler_tracks_instance_changes
+#track_instance_changes = true
+
+#
+# Filters that the scheduler can use.
+#
+# An unordered list of the filter classes the nova scheduler may
+# apply. Only the
+# filters specified in the 'enabled_filters' option will be used, but
+# any filter appearing in that option must also be included in this
+# list.
+#
+# By default, this is set to all filters that are included with nova.
+#
+# This option is only used by the FilterScheduler and its subclasses;
+# if you use
+# a different scheduler, this option has no effect.
+#
+# Possible values:
+#
+# * A list of zero or more strings, where each string corresponds to
+# the name of
+# a filter that may be used for selecting a host
+#
+# Related options:
+#
+# * enabled_filters
+# (multi valued)
+# Deprecated group/name - [DEFAULT]/scheduler_available_filters
+#available_filters = nova.scheduler.filters.all_filters
+
+#
+# Filters that the scheduler will use.
+#
+# An ordered list of filter class names that will be used for
+# filtering
+# hosts. These filters will be applied in the order they are listed so
+# place your most restrictive filters first to make the filtering
+# process more
+# efficient.
+#
+# This option is only used by the FilterScheduler and its subclasses;
+# if you use
+# a different scheduler, this option has no effect.
+#
+# Possible values:
+#
+# * A list of zero or more strings, where each string corresponds to
+# the name of
+# a filter to be used for selecting a host
+#
+# Related options:
+#
+# * All of the filters in this option *must* be present in the
+# 'scheduler_available_filters' option, or a
+# SchedulerHostFilterNotFound
+# exception will be raised.
+# (list value)
+# Deprecated group/name - [DEFAULT]/scheduler_default_filters
+#enabled_filters = RetryFilter,AvailabilityZoneFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,ServerGroupAntiAffinityFilter,ServerGroupAffinityFilter
+
+# DEPRECATED:
+# Filters used for filtering baremetal hosts.
+#
+# Filters are applied in order, so place your most restrictive filters
+# first to
+# make the filtering process more efficient.
+#
+# This option is only used by the FilterScheduler and its subclasses;
+# if you use
+# a different scheduler, this option has no effect.
+#
+# Possible values:
+#
+# * A list of zero or more strings, where each string corresponds to
+# the name of
+# a filter to be used for selecting a baremetal host
+#
+# Related options:
+#
+# * If the 'scheduler_use_baremetal_filters' option is False, this
+# option has
+# no effect.
+# (list value)
+# Deprecated group/name - [DEFAULT]/baremetal_scheduler_default_filters
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+# Reason:
+# These filters were used to overcome some of the baremetal scheduling
+# limitations in Nova prior to the use of the Placement API. Now
+# scheduling will
+# use the custom resource class defined for each baremetal node to
+# make its
+# selection.
+#baremetal_enabled_filters = RetryFilter,AvailabilityZoneFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,ExactRamFilter,ExactDiskFilter,ExactCoreFilter
+
+# DEPRECATED:
+# Enable baremetal filters.
+#
+# Set this to True to tell the nova scheduler that it should use the
+# filters
+# specified in the 'baremetal_enabled_filters' option. If you are not
+# scheduling baremetal nodes, leave this at the default setting of
+# False.
+#
+# This option is only used by the FilterScheduler and its subclasses;
+# if you use
+# a different scheduler, this option has no effect.
+#
+# Related options:
+#
+# * If this option is set to True, then the filters specified in the
+# 'baremetal_enabled_filters' are used instead of the filters
+# specified in 'enabled_filters'.
+# (boolean value)
+# Deprecated group/name - [DEFAULT]/scheduler_use_baremetal_filters
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+# Reason:
+# These filters were used to overcome some of the baremetal scheduling
+# limitations in Nova prior to the use of the Placement API. Now
+# scheduling will
+# use the custom resource class defined for each baremetal node to
+# make its
+# selection.
+#use_baremetal_filters = false
+
+#
+# Weighers that the scheduler will use.
+#
+# Only hosts which pass the filters are weighed. The weight for any
+# host starts
+# at 0, and the weighers order these hosts by adding to or subtracting
+# from the
+# weight assigned by the previous weigher. Weights may become
+# negative. An
+# instance will be scheduled to one of the N most-weighted hosts,
+# where N is
+# 'scheduler_host_subset_size'.
+#
+# By default, this is set to all weighers that are included with Nova.
+#
+# This option is only used by the FilterScheduler and its subclasses;
+# if you use
+# a different scheduler, this option has no effect.
+#
+# Possible values:
+#
+# * A list of zero or more strings, where each string corresponds to
+# the name of
+# a weigher that will be used for selecting a host
+# (list value)
+# Deprecated group/name - [DEFAULT]/scheduler_weight_classes
+#weight_classes = nova.scheduler.weights.all_weighers
+
+#
+# Ram weight multipler ratio.
+#
+# This option determines how hosts with more or less available RAM are
+# weighed. A
+# positive value will result in the scheduler preferring hosts with
+# more
+# available RAM, and a negative number will result in the scheduler
+# preferring
+# hosts with less available RAM. Another way to look at it is that
+# positive
+# values for this option will tend to spread instances across many
+# hosts, while
+# negative values will tend to fill up (stack) hosts as much as
+# possible before
+# scheduling to a less-used host. The absolute value, whether positive
+# or
+# negative, controls how strong the RAM weigher is relative to other
+# weighers.
+#
+# This option is only used by the FilterScheduler and its subclasses;
+# if you use
+# a different scheduler, this option has no effect. Also note that
+# this setting
+# only affects scheduling if the 'ram' weigher is enabled.
+#
+# Possible values:
+#
+# * An integer or float value, where the value corresponds to the
+# multipler
+# ratio for this weigher.
+# (floating point value)
+#ram_weight_multiplier = 1.0
+
+#
+# Disk weight multipler ratio.
+#
+# Multiplier used for weighing free disk space. Negative numbers mean
+# to
+# stack vs spread.
+#
+# This option is only used by the FilterScheduler and its subclasses;
+# if you use
+# a different scheduler, this option has no effect. Also note that
+# this setting
+# only affects scheduling if the 'disk' weigher is enabled.
+#
+# Possible values:
+#
+# * An integer or float value, where the value corresponds to the
+# multipler
+# ratio for this weigher.
+# (floating point value)
+#disk_weight_multiplier = 1.0
+
+#
+# IO operations weight multipler ratio.
+#
+# This option determines how hosts with differing workloads are
+# weighed. Negative
+# values, such as the default, will result in the scheduler preferring
+# hosts with
+# lighter workloads whereas positive values will prefer hosts with
+# heavier
+# workloads. Another way to look at it is that positive values for
+# this option
+# will tend to schedule instances onto hosts that are already busy,
+# while
+# negative values will tend to distribute the workload across more
+# hosts. The
+# absolute value, whether positive or negative, controls how strong
+# the io_ops
+# weigher is relative to other weighers.
+#
+# This option is only used by the FilterScheduler and its subclasses;
+# if you use
+# a different scheduler, this option has no effect. Also note that
+# this setting
+# only affects scheduling if the 'io_ops' weigher is enabled.
+#
+# Possible values:
+#
+# * An integer or float value, where the value corresponds to the
+# multipler
+# ratio for this weigher.
+# (floating point value)
+#io_ops_weight_multiplier = -1.0
+
+#
+# PCI device affinity weight multiplier.
+#
+# The PCI device affinity weighter computes a weighting based on the
+# number of
+# PCI devices on the host and the number of PCI devices requested by
+# the
+# instance. The ``NUMATopologyFilter`` filter must be enabled for this
+# to have
+# any significance. For more information, refer to the filter
+# documentation:
+#
+# https://docs.openstack.org/nova/latest/user/filter-
+# scheduler.html
+#
+# Possible values:
+#
+# * A positive integer or float value, where the value corresponds to
+# the
+# multiplier ratio for this weigher.
+# (floating point value)
+# Minimum value: 0
+#pci_weight_multiplier = 1.0
+
+#
+# Multiplier used for weighing hosts for group soft-affinity.
+#
+# Possible values:
+#
+# * An integer or float value, where the value corresponds to weight
+# multiplier
+# for hosts with group soft affinity. Only a positive value are
+# meaningful, as
+# negative values would make this behave as a soft anti-affinity
+# weigher.
+# (floating point value)
+#soft_affinity_weight_multiplier = 1.0
+
+#
+# Multiplier used for weighing hosts for group soft-anti-affinity.
+#
+# Possible values:
+#
+# * An integer or float value, where the value corresponds to weight
+# multiplier
+# for hosts with group soft anti-affinity. Only a positive value are
+# meaningful, as negative values would make this behave as a soft
+# affinity
+# weigher.
+# (floating point value)
+#soft_anti_affinity_weight_multiplier = 1.0
+
+#
+# Enable spreading the instances between hosts with the same best
+# weight.
+#
+# Enabling it is beneficial for cases when host_subset_size is 1
+# (default), but there is a large number of hosts with same maximal
+# weight.
+# This scenario is common in Ironic deployments where there are
+# typically many
+# baremetal nodes with identical weights returned to the scheduler.
+# In such case enabling this option will reduce contention and chances
+# for
+# rescheduling events.
+# At the same time it will make the instance packing (even in
+# unweighed case)
+# less dense.
+# (boolean value)
+#shuffle_best_same_weighed_hosts = false
+
+#
+# The default architecture to be used when using the image properties
+# filter.
+#
+# When using the ImagePropertiesFilter, it is possible that you want
+# to define
+# a default architecture to make the user experience easier and avoid
+# having
+# something like x86_64 images landing on aarch64 compute nodes
+# because the
+# user did not specify the 'hw_architecture' property in Glance.
+#
+# Possible values:
+#
+# * CPU Architectures such as x86_64, aarch64, s390x.
+# (string value)
+# Possible values:
+# alpha - <No description provided>
+# armv6 - <No description provided>
+# armv7l - <No description provided>
+# armv7b - <No description provided>
+# aarch64 - <No description provided>
+# cris - <No description provided>
+# i686 - <No description provided>
+# ia64 - <No description provided>
+# lm32 - <No description provided>
+# m68k - <No description provided>
+# microblaze - <No description provided>
+# microblazeel - <No description provided>
+# mips - <No description provided>
+# mipsel - <No description provided>
+# mips64 - <No description provided>
+# mips64el - <No description provided>
+# openrisc - <No description provided>
+# parisc - <No description provided>
+# parisc64 - <No description provided>
+# ppc - <No description provided>
+# ppcle - <No description provided>
+# ppc64 - <No description provided>
+# ppc64le - <No description provided>
+# ppcemb - <No description provided>
+# s390 - <No description provided>
+# s390x - <No description provided>
+# sh4 - <No description provided>
+# sh4eb - <No description provided>
+# sparc - <No description provided>
+# sparc64 - <No description provided>
+# unicore32 - <No description provided>
+# x86_64 - <No description provided>
+# xtensa - <No description provided>
+# xtensaeb - <No description provided>
+#image_properties_default_architecture = <None>
+
+#
+# List of UUIDs for images that can only be run on certain hosts.
+#
+# If there is a need to restrict some images to only run on certain
+# designated
+# hosts, list those image UUIDs here.
+#
+# This option is only used by the FilterScheduler and its subclasses;
+# if you use
+# a different scheduler, this option has no effect. Also note that
+# this setting
+# only affects scheduling if the 'IsolatedHostsFilter' filter is
+# enabled.
+#
+# Possible values:
+#
+# * A list of UUID strings, where each string corresponds to the UUID
+# of an
+# image
+#
+# Related options:
+#
+# * scheduler/isolated_hosts
+# * scheduler/restrict_isolated_hosts_to_isolated_images
+# (list value)
+#isolated_images =
+
+#
+# List of hosts that can only run certain images.
+#
+# If there is a need to restrict some images to only run on certain
+# designated
+# hosts, list those host names here.
+#
+# This option is only used by the FilterScheduler and its subclasses;
+# if you use
+# a different scheduler, this option has no effect. Also note that
+# this setting
+# only affects scheduling if the 'IsolatedHostsFilter' filter is
+# enabled.
+#
+# Possible values:
+#
+# * A list of strings, where each string corresponds to the name of a
+# host
+#
+# Related options:
+#
+# * scheduler/isolated_images
+# * scheduler/restrict_isolated_hosts_to_isolated_images
+# (list value)
+#isolated_hosts =
+
+#
+# Prevent non-isolated images from being built on isolated hosts.
+#
+# This option is only used by the FilterScheduler and its subclasses;
+# if you use
+# a different scheduler, this option has no effect. Also note that
+# this setting
+# only affects scheduling if the 'IsolatedHostsFilter' filter is
+# enabled. Even
+# then, this option doesn't affect the behavior of requests for
+# isolated images,
+# which will *always* be restricted to isolated hosts.
+#
+# Related options:
+#
+# * scheduler/isolated_images
+# * scheduler/isolated_hosts
+# (boolean value)
+#restrict_isolated_hosts_to_isolated_images = true
+
+#
+# Image property namespace for use in the host aggregate.
+#
+# Images and hosts can be configured so that certain images can only
+# be scheduled
+# to hosts in a particular aggregate. This is done with metadata
+# values set on
+# the host aggregate that are identified by beginning with the value
+# of this
+# option. If the host is part of an aggregate with such a metadata
+# key, the image
+# in the request spec must have the value of that metadata in its
+# properties in
+# order for the scheduler to consider the host as acceptable.
+#
+# This option is only used by the FilterScheduler and its subclasses;
+# if you use
+# a different scheduler, this option has no effect. Also note that
+# this setting
+# only affects scheduling if the
+# 'aggregate_image_properties_isolation' filter is
+# enabled.
+#
+# Possible values:
+#
+# * A string, where the string corresponds to an image property
+# namespace
+#
+# Related options:
+#
+# * aggregate_image_properties_isolation_separator
+# (string value)
+#aggregate_image_properties_isolation_namespace = <None>
+
+#
+# Separator character(s) for image property namespace and name.
+#
+# When using the aggregate_image_properties_isolation filter, the
+# relevant
+# metadata keys are prefixed with the namespace defined in the
+# aggregate_image_properties_isolation_namespace configuration option
+# plus a
+# separator. This option defines the separator to be used.
+#
+# This option is only used by the FilterScheduler and its subclasses;
+# if you use
+# a different scheduler, this option has no effect. Also note that
+# this setting
+# only affects scheduling if the
+# 'aggregate_image_properties_isolation' filter
+# is enabled.
+#
+# Possible values:
+#
+# * A string, where the string corresponds to an image property
+# namespace
+# separator character
+#
+# Related options:
+#
+# * aggregate_image_properties_isolation_namespace
+# (string value)
+#aggregate_image_properties_isolation_separator = .
+
+
+[glance]
+# Configuration options for the Image service
+
+#
+# From nova.conf
+#
+
+#
+# List of glance api servers endpoints available to nova.
+#
+# https is used for ssl-based glance api servers.
+#
+# NOTE: The preferred mechanism for endpoint discovery is via
+# keystoneauth1
+# loading options. Only use api_servers if you need multiple endpoints
+# and are
+# unable to use a load balancer for some reason.
+#
+# Possible values:
+#
+# * A list of any fully qualified url of the form
+# "scheme://hostname:port[/path]"
+# (i.e. "http://10.0.1.0:9292" or "https://my.glance.server/image").
+# (list value)
+#api_servers = <None>
+{%- if compute.image is defined %}
+api_servers = {{ compute.image.get('protocol', 'http') }}://{{ compute.image.host }}:{{ compute.image.get('port', 9292) }}
+{% endif %}
+
+#
+# Enable glance operation retries.
+#
+# Specifies the number of retries when uploading / downloading
+# an image to / from glance. 0 means no retries.
+# (integer value)
+# Minimum value: 0
+#num_retries = 0
+
+# DEPRECATED:
+# List of url schemes that can be directly accessed.
+#
+# This option specifies a list of url schemes that can be downloaded
+# directly via the direct_url. This direct_URL can be fetched from
+# Image metadata which can be used by nova to get the
+# image more efficiently. nova-compute could benefit from this by
+# invoking a copy when it has access to the same file system as
+# glance.
+#
+# Possible values:
+#
+# * [file], Empty list (default)
+# (list value)
+# This option is deprecated for removal since 17.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# This was originally added for the 'nova.image.download.file'
+# FileTransfer
+# extension which was removed in the 16.0.0 Pike release. The
+# 'nova.image.download.modules' extension point is not maintained
+# and there is no indication of its use in production clouds.
+#allowed_direct_url_schemes =
+
+#
+# Enable image signature verification.
+#
+# nova uses the image signature metadata from glance and verifies the
+# signature
+# of a signed image while downloading that image. If the image
+# signature cannot
+# be verified or if the image signature metadata is either incomplete
+# or
+# unavailable, then nova will not boot the image and instead will
+# place the
+# instance into an error state. This provides end users with stronger
+# assurances
+# of the integrity of the image data they are using to create servers.
+#
+# Related options:
+#
+# * The options in the `key_manager` group, as the key_manager is used
+# for the signature validation.
+# * Both enable_certificate_validation and
+# default_trusted_certificate_ids
+# below depend on this option being enabled.
+# (boolean value)
+{%- if compute.get('barbican', {}).get('enabled', False) %}
+verify_glance_signatures=true
+{%- else %}
+#verify_glance_signatures=false
+{%- endif %}
+
+# DEPRECATED:
+# Enable certificate validation for image signature verification.
+#
+# During image signature verification nova will first verify the
+# validity of the
+# image's signing certificate using the set of trusted certificates
+# associated
+# with the instance. If certificate validation fails, signature
+# verification
+# will not be performed and the image will be placed into an error
+# state. This
+# provides end users with stronger assurances that the image data is
+# unmodified
+# and trustworthy. If left disabled, image signature verification can
+# still
+# occur but the end user will not have any assurance that the signing
+# certificate used to generate the image signature is still
+# trustworthy.
+#
+# Related options:
+#
+# * This option only takes effect if verify_glance_signatures is
+# enabled.
+# * The value of default_trusted_certificate_ids may be used when this
+# option
+# is enabled.
+# (boolean value)
+# This option is deprecated for removal since 16.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# This option is intended to ease the transition for deployments
+# leveraging
+# image signature verification. The intended state long-term is for
+# signature
+# verification and certificate validation to always happen together.
+#enable_certificate_validation = false
+
+#
+# List of certificate IDs for certificates that should be trusted.
+#
+# May be used as a default list of trusted certificate IDs for
+# certificate
+# validation. The value of this option will be ignored if the user
+# provides a
+# list of trusted certificate IDs with an instance API request. The
+# value of
+# this option will be persisted with the instance data if signature
+# verification
+# and certificate validation are enabled and if the user did not
+# provide an
+# alternative list. If left empty when certificate validation is
+# enabled the
+# user must provide a list of trusted certificate IDs otherwise
+# certificate
+# validation will fail.
+#
+# Related options:
+#
+# * The value of this option may be used if both
+# verify_glance_signatures and
+# enable_certificate_validation are enabled.
+# (list value)
+#default_trusted_certificate_ids =
+
+# Enable or disable debug logging with glanceclient. (boolean value)
+#debug = false
+
+# PEM encoded Certificate Authority to use when verifying HTTPs
+# connections. (string value)
+#cafile = <None>
+
+# PEM encoded client certificate cert file (string value)
+#certfile = <None>
+
+# PEM encoded client certificate key file (string value)
+#keyfile = <None>
+
+# Verify HTTPS connections. (boolean value)
+#insecure = false
+
+# Timeout value for http requests (integer value)
+#timeout = <None>
+
+# The default service_type for endpoint URL discovery. (string value)
+#service_type = image
+
+# The default service_name for endpoint URL discovery. (string value)
+#service_name = <None>
+
+# List of interfaces, in order of preference, for endpoint URL. (list
+# value)
+#valid_interfaces = internal,public
+
+# The default region_name for endpoint URL discovery. (string value)
+#region_name = <None>
+
+# Always use this endpoint URL for requests for this client. NOTE: The
+# unversioned endpoint should be specified here; to request a
+# particular API version, use the `version`, `min-version`, and/or
+# `max-version` options. (string value)
+#endpoint_override = <None>
+
+
+[guestfs]
+#
+# libguestfs is a set of tools for accessing and modifying virtual
+# machine (VM) disk images. You can use this for viewing and editing
+# files inside guests, scripting changes to VMs, monitoring disk
+# used/free statistics, creating guests, P2V, V2V, performing backups,
+# cloning VMs, building VMs, formatting disks and resizing disks.
+
+#
+# From nova.conf
+#
+
+#
+# Enable/disables guestfs logging.
+#
+# This configures guestfs to debug messages and push them to OpenStack
+# logging system. When set to True, it traces libguestfs API calls and
+# enable verbose debug messages. In order to use the above feature,
+# "libguestfs" package must be installed.
+#
+# Related options:
+# Since libguestfs access and modifies VM's managed by libvirt, below
+# options
+# should be set to give access to those VM's.
+# * libvirt.inject_key
+# * libvirt.inject_partition
+# * libvirt.inject_password
+# (boolean value)
+#debug = false
+
+
+[hyperv]
+#
+# The hyperv feature allows you to configure the Hyper-V hypervisor
+# driver to be used within an OpenStack deployment.
+
+#
+# From nova.conf
+#
+
+#
+# Dynamic memory ratio
+#
+# Enables dynamic memory allocation (ballooning) when set to a value
+# greater than 1. The value expresses the ratio between the total RAM
+# assigned to an instance and its startup RAM amount. For example a
+# ratio of 2.0 for an instance with 1024MB of RAM implies 512MB of
+# RAM allocated at startup.
+#
+# Possible values:
+#
+# * 1.0: Disables dynamic memory allocation (Default).
+# * Float values greater than 1.0: Enables allocation of total implied
+# RAM divided by this value for startup.
+# (floating point value)
+#dynamic_memory_ratio = 1.0
+
+#
+# Enable instance metrics collection
+#
+# Enables metrics collections for an instance by using Hyper-V's
+# metric APIs. Collected data can be retrieved by other apps and
+# services, e.g.: Ceilometer.
+# (boolean value)
+#enable_instance_metrics_collection = false
+
+#
+# Instances path share
+#
+# The name of a Windows share mapped to the "instances_path" dir
+# and used by the resize feature to copy files to the target host.
+# If left blank, an administrative share (hidden network share) will
+# be used, looking for the same "instances_path" used locally.
+#
+# Possible values:
+#
+# * "": An administrative share will be used (Default).
+# * Name of a Windows share.
+#
+# Related options:
+#
+# * "instances_path": The directory which will be used if this option
+# here is left blank.
+# (string value)
+#instances_path_share =
+
+#
+# Limit CPU features
+#
+# This flag is needed to support live migration to hosts with
+# different CPU features and checked during instance creation
+# in order to limit the CPU features used by the instance.
+# (boolean value)
+#limit_cpu_features = false
+
+#
+# Mounted disk query retry count
+#
+# The number of times to retry checking for a mounted disk.
+# The query runs until the device can be found or the retry
+# count is reached.
+#
+# Possible values:
+#
+# * Positive integer values. Values greater than 1 is recommended
+# (Default: 10).
+#
+# Related options:
+#
+# * Time interval between disk mount retries is declared with
+# "mounted_disk_query_retry_interval" option.
+# (integer value)
+# Minimum value: 0
+#mounted_disk_query_retry_count = 10
+
+#
+# Mounted disk query retry interval
+#
+# Interval between checks for a mounted disk, in seconds.
+#
+# Possible values:
+#
+# * Time in seconds (Default: 5).
+#
+# Related options:
+#
+# * This option is meaningful when the mounted_disk_query_retry_count
+# is greater than 1.
+# * The retry loop runs with mounted_disk_query_retry_count and
+# mounted_disk_query_retry_interval configuration options.
+# (integer value)
+# Minimum value: 0
+#mounted_disk_query_retry_interval = 5
+
+#
+# Power state check timeframe
+#
+# The timeframe to be checked for instance power state changes.
+# This option is used to fetch the state of the instance from Hyper-V
+# through the WMI interface, within the specified timeframe.
+#
+# Possible values:
+#
+# * Timeframe in seconds (Default: 60).
+# (integer value)
+# Minimum value: 0
+#power_state_check_timeframe = 60
+
+#
+# Power state event polling interval
+#
+# Instance power state change event polling frequency. Sets the
+# listener interval for power state events to the given value.
+# This option enhances the internal lifecycle notifications of
+# instances that reboot themselves. It is unlikely that an operator
+# has to change this value.
+#
+# Possible values:
+#
+# * Time in seconds (Default: 2).
+# (integer value)
+# Minimum value: 0
+#power_state_event_polling_interval = 2
+
+#
+# qemu-img command
+#
+# qemu-img is required for some of the image related operations
+# like converting between different image types. You can get it
+# from here: (http://qemu.weilnetz.de/) or you can install the
+# Cloudbase OpenStack Hyper-V Compute Driver
+# (https://cloudbase.it/openstack-hyperv-driver/) which automatically
+# sets the proper path for this config option. You can either give the
+# full path of qemu-img.exe or set its path in the PATH environment
+# variable and leave this option to the default value.
+#
+# Possible values:
+#
+# * Name of the qemu-img executable, in case it is in the same
+# directory as the nova-compute service or its path is in the
+# PATH environment variable (Default).
+# * Path of qemu-img command (DRIVELETTER:\PATH\TO\QEMU-IMG\COMMAND).
+#
+# Related options:
+#
+# * If the config_drive_cdrom option is False, qemu-img will be used
+# to
+# convert the ISO to a VHD, otherwise the configuration drive will
+# remain an ISO. To use configuration drive with Hyper-V, you must
+# set the mkisofs_cmd value to the full path to an mkisofs.exe
+# installation.
+# (string value)
+#qemu_img_cmd = qemu-img.exe
+
+#
+# External virtual switch name
+#
+# The Hyper-V Virtual Switch is a software-based layer-2 Ethernet
+# network switch that is available with the installation of the
+# Hyper-V server role. The switch includes programmatically managed
+# and extensible capabilities to connect virtual machines to both
+# virtual networks and the physical network. In addition, Hyper-V
+# Virtual Switch provides policy enforcement for security, isolation,
+# and service levels. The vSwitch represented by this config option
+# must be an external one (not internal or private).
+#
+# Possible values:
+#
+# * If not provided, the first of a list of available vswitches
+# is used. This list is queried using WQL.
+# * Virtual switch name.
+# (string value)
+#vswitch_name = <None>
+
+#
+# Wait soft reboot seconds
+#
+# Number of seconds to wait for instance to shut down after soft
+# reboot request is made. We fall back to hard reboot if instance
+# does not shutdown within this window.
+#
+# Possible values:
+#
+# * Time in seconds (Default: 60).
+# (integer value)
+# Minimum value: 0
+#wait_soft_reboot_seconds = 60
+
+#
+# Configuration drive cdrom
+#
+# OpenStack can be configured to write instance metadata to
+# a configuration drive, which is then attached to the
+# instance before it boots. The configuration drive can be
+# attached as a disk drive (default) or as a CD drive.
+#
+# Possible values:
+#
+# * True: Attach the configuration drive image as a CD drive.
+# * False: Attach the configuration drive image as a disk drive
+# (Default).
+#
+# Related options:
+#
+# * This option is meaningful with force_config_drive option set to
+# 'True'
+# or when the REST API call to create an instance will have
+# '--config-drive=True' flag.
+# * config_drive_format option must be set to 'iso9660' in order to
+# use
+# CD drive as the configuration drive image.
+# * To use configuration drive with Hyper-V, you must set the
+# mkisofs_cmd value to the full path to an mkisofs.exe installation.
+# Additionally, you must set the qemu_img_cmd value to the full path
+# to an qemu-img command installation.
+# * You can configure the Compute service to always create a
+# configuration
+# drive by setting the force_config_drive option to 'True'.
+# (boolean value)
+#config_drive_cdrom = false
+config_drive_cdrom = {{ compute.get('config_drive', {}).get('cdrom', False)|lower }}
+
+#
+# Configuration drive inject password
+#
+# Enables setting the admin password in the configuration drive image.
+#
+# Related options:
+#
+# * This option is meaningful when used with other options that enable
+# configuration drive usage with Hyper-V, such as
+# force_config_drive.
+# * Currently, the only accepted config_drive_format is 'iso9660'.
+# (boolean value)
+#config_drive_inject_password = false
+config_drive_inject_password = {{ compute.get('config_drive', {}).get('inject_password', False)|lower }}
+
+#
+# Volume attach retry count
+#
+# The number of times to retry attaching a volume. Volume attachment
+# is retried until success or the given retry count is reached.
+#
+# Possible values:
+#
+# * Positive integer values (Default: 10).
+#
+# Related options:
+#
+# * Time interval between attachment attempts is declared with
+# volume_attach_retry_interval option.
+# (integer value)
+# Minimum value: 0
+#volume_attach_retry_count = 10
+
+#
+# Volume attach retry interval
+#
+# Interval between volume attachment attempts, in seconds.
+#
+# Possible values:
+#
+# * Time in seconds (Default: 5).
+#
+# Related options:
+#
+# * This options is meaningful when volume_attach_retry_count
+# is greater than 1.
+# * The retry loop runs with volume_attach_retry_count and
+# volume_attach_retry_interval configuration options.
+# (integer value)
+# Minimum value: 0
+#volume_attach_retry_interval = 5
+
+#
+# Enable RemoteFX feature
+#
+# This requires at least one DirectX 11 capable graphics adapter for
+# Windows / Hyper-V Server 2012 R2 or newer and RDS-Virtualization
+# feature has to be enabled.
+#
+# Instances with RemoteFX can be requested with the following flavor
+# extra specs:
+#
+# **os:resolution**. Guest VM screen resolution size. Acceptable
+# values::
+#
+# 1024x768, 1280x1024, 1600x1200, 1920x1200, 2560x1600, 3840x2160
+#
+# ``3840x2160`` is only available on Windows / Hyper-V Server 2016.
+#
+# **os:monitors**. Guest VM number of monitors. Acceptable values::
+#
+# [1, 4] - Windows / Hyper-V Server 2012 R2
+# [1, 8] - Windows / Hyper-V Server 2016
+#
+# **os:vram**. Guest VM VRAM amount. Only available on
+# Windows / Hyper-V Server 2016. Acceptable values::
+#
+# 64, 128, 256, 512, 1024
+# (boolean value)
+#enable_remotefx = false
+
+#
+# Use multipath connections when attaching iSCSI or FC disks.
+#
+# This requires the Multipath IO Windows feature to be enabled. MPIO
+# must be
+# configured to claim such devices.
+# (boolean value)
+#use_multipath_io = false
+
+#
+# List of iSCSI initiators that will be used for estabilishing iSCSI
+# sessions.
+#
+# If none are specified, the Microsoft iSCSI initiator service will
+# choose the
+# initiator.
+# (list value)
+#iscsi_initiator_list =
+
+{% if compute.ironic is defined -%}
+[ironic]
+#
+# Configuration options for Ironic driver (Bare Metal).
+# If using the Ironic driver following options must be set:
+# * auth_type
+# * auth_url
+# * project_name
+# * username
+# * password
+# * project_domain_id or project_domain_name
+# * user_domain_id or user_domain_name
+
+#
+# From nova.conf
+#
+
+# DEPRECATED: URL override for the Ironic API endpoint. (uri value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+# Reason: Endpoint lookup uses the service catalog via common
+# keystoneauth1 Adapter configuration options. In the current release,
+# api_endpoint will override this behavior, but will be ignored and/or
+# removed in a future release. To achieve the same result, use the
+# endpoint_override option instead.
+#api_endpoint = http://ironic.example.org:6385/
+api_endpoint={{ compute.ironic.get('protocol', 'http') }}://{{ compute.ironic.host }}:{{ compute.ironic.port }}
+
+#
+# The number of times to retry when a request conflicts.
+# If set to 0, only try once, no retries.
+#
+# Related options:
+#
+# * api_retry_interval
+# (integer value)
+# Minimum value: 0
+#api_max_retries = 60
+
+#
+# The number of seconds to wait before retrying the request.
+#
+# Related options:
+#
+# * api_max_retries
+# (integer value)
+# Minimum value: 0
+#api_retry_interval = 2
+
+# Timeout (seconds) to wait for node serial console state changed. Set
+# to 0 to disable timeout. (integer value)
+# Minimum value: 0
+#serial_console_state_timeout = 10
+
+# PEM encoded Certificate Authority to use when verifying HTTPs
+# connections. (string value)
+#cafile = <None>
+{%- if compute.ironic.get('protocol', 'http') == 'https' %}
+cafile={{ compute.identity.get('cacert_file', compute.cacert_file) }}
+{%- endif %}
+
+# PEM encoded client certificate cert file (string value)
+#certfile = <None>
+
+# PEM encoded client certificate key file (string value)
+#keyfile = <None>
+
+# Verify HTTPS connections. (boolean value)
+#insecure = false
+
+# Timeout value for http requests (integer value)
+#timeout = <None>
+
+# Authentication type to load (string value)
+# Deprecated group/name - [ironic]/auth_plugin
+#auth_type = <None>
+auth_type={{ compute.ironic.auth_type }}
+
+# Config Section from which to load plugin specific options (string
+# value)
+#auth_section = <None>
+
+# Authentication URL (string value)
+#auth_url = <None>
+auth_url={{ compute.identity.get('protocol', 'http') }}://{{ compute.identity.host }}:{{ compute.identity.port }}/v3
+
+# Scope for system operations (string value)
+#system_scope = <None>
+
+# Domain ID to scope to (string value)
+#domain_id = <None>
+
+# Domain name to scope to (string value)
+#domain_name = <None>
+
+# Project ID to scope to (string value)
+#project_id = <None>
+
+# Project name to scope to (string value)
+#project_name = <None>
+project_name={{ compute.identity.tenant }}
+
+# Domain ID containing project (string value)
+#project_domain_id = <None>
+
+# Domain name containing project (string value)
+#project_domain_name = <None>
+project_domain_name={{ compute.ironic.project_domain_name }}
+
+# Trust ID (string value)
+#trust_id = <None>
+
+# User ID (string value)
+#user_id = <None>
+
+# Username (string value)
+# Deprecated group/name - [ironic]/user_name
+#username = <None>
+username={{ compute.ironic.user }}
+
+# User's domain id (string value)
+#user_domain_id = <None>
+
+# User's domain name (string value)
+#user_domain_name = <None>
+user_domain_name={{ compute.ironic.user_domain_name }}
+
+
+# User's password (string value)
+#password = <None>
+password={{ compute.ironic.password }}
+# The default service_type for endpoint URL discovery. (string value)
+#service_type = baremetal
+
+# The default service_name for endpoint URL discovery. (string value)
+#service_name = <None>
+
+# List of interfaces, in order of preference, for endpoint URL. (list
+# value)
+#valid_interfaces = internal,public
+
+# The default region_name for endpoint URL discovery. (string value)
+#region_name = <None>
+
+# Always use this endpoint URL for requests for this client. NOTE: The
+# unversioned endpoint should be specified here; to request a
+# particular API version, use the `version`, `min-version`, and/or
+# `max-version` options. (string value)
+# Deprecated group/name - [ironic]/api_endpoint
+#endpoint_override = <None>
+{%- endif %}
+
+
+[key_manager]
+
+#
+# From nova.conf
+#
+
+#
+# Fixed key returned by key manager, specified in hex.
+#
+# Possible values:
+#
+# * Empty string or a key in hex value
+# (string value)
+#fixed_key = <None>
+{%- if compute.get('barbican', {}).get('enabled', False) %}
+api_class=castellan.key_manager.barbican_key_manager.BarbicanKeyManager
+{%- endif %}
+
+# Specify the key manager implementation. Options are "barbican" and
+# "vault". Default is "barbican". Will support the values earlier
+# set using [key_manager]/api_class for some time. (string value)
+# Deprecated group/name - [key_manager]/api_class
+#backend = barbican
+
+# The type of authentication credential to create. Possible values are
+# 'token', 'password', 'keystone_token', and 'keystone_password'.
+# Required if no context is passed to the credential factory. (string
+# value)
+#auth_type = <None>
+
+# Token for authentication. Required for 'token' and 'keystone_token'
+# auth_type if no context is passed to the credential factory. (string
+# value)
+#token = <None>
+
+# Username for authentication. Required for 'password' auth_type.
+# Optional for the 'keystone_password' auth_type. (string value)
+#username = <None>
+
+# Password for authentication. Required for 'password' and
+# 'keystone_password' auth_type. (string value)
+#password = <None>
+
+# Use this endpoint to connect to Keystone. (string value)
+#auth_url = <None>
+
+# User ID for authentication. Optional for 'keystone_token' and
+# 'keystone_password' auth_type. (string value)
+#user_id = <None>
+
+# User's domain ID for authentication. Optional for 'keystone_token'
+# and 'keystone_password' auth_type. (string value)
+#user_domain_id = <None>
+
+# User's domain name for authentication. Optional for 'keystone_token'
+# and 'keystone_password' auth_type. (string value)
+#user_domain_name = <None>
+
+# Trust ID for trust scoping. Optional for 'keystone_token' and
+# 'keystone_password' auth_type. (string value)
+#trust_id = <None>
+
+# Domain ID for domain scoping. Optional for 'keystone_token' and
+# 'keystone_password' auth_type. (string value)
+#domain_id = <None>
+
+# Domain name for domain scoping. Optional for 'keystone_token' and
+# 'keystone_password' auth_type. (string value)
+#domain_name = <None>
+
+# Project ID for project scoping. Optional for 'keystone_token' and
+# 'keystone_password' auth_type. (string value)
+#project_id = <None>
+
+# Project name for project scoping. Optional for 'keystone_token' and
+# 'keystone_password' auth_type. (string value)
+#project_name = <None>
+
+# Project's domain ID for project. Optional for 'keystone_token' and
+# 'keystone_password' auth_type. (string value)
+#project_domain_id = <None>
+
+# Project's domain name for project. Optional for 'keystone_token' and
+# 'keystone_password' auth_type. (string value)
+#project_domain_name = <None>
+
+# Allow fetching a new token if the current one is going to expire.
+# Optional for 'keystone_token' and 'keystone_password' auth_type.
+# (boolean value)
+#reauthenticate = true
+
+
+[keystone]
+# Configuration options for the identity service
+
+#
+# From nova.conf
+#
+
+# PEM encoded Certificate Authority to use when verifying HTTPs
+# connections. (string value)
+#cafile = <None>
+
+# PEM encoded client certificate cert file (string value)
+#certfile = <None>
+
+# PEM encoded client certificate key file (string value)
+#keyfile = <None>
+
+# Verify HTTPS connections. (boolean value)
+#insecure = false
+
+# Timeout value for http requests (integer value)
+#timeout = <None>
+
+# The default service_type for endpoint URL discovery. (string value)
+#service_type = identity
+
+# The default service_name for endpoint URL discovery. (string value)
+#service_name = <None>
+
+# List of interfaces, in order of preference, for endpoint URL. (list
+# value)
+#valid_interfaces = internal,public
+
+# The default region_name for endpoint URL discovery. (string value)
+#region_name = <None>
+
+# Always use this endpoint URL for requests for this client. NOTE: The
+# unversioned endpoint should be specified here; to request a
+# particular API version, use the `version`, `min-version`, and/or
+# `max-version` options. (string value)
+#endpoint_override = <None>
+
+
+[libvirt]
+#
+# Libvirt options allows cloud administrator to configure related
+# libvirt hypervisor driver to be used within an OpenStack deployment.
+#
+# Almost all of the libvirt config options are influence by
+# ``virt_type`` config
+# which describes the virtualization type (or so called domain type)
+# libvirt
+# should use for specific features such as live migration, snapshot.
+
+#
+# From nova.conf
+#
+cpu_mode = {{ compute.cpu_mode }}
+{%- if compute.libvirt.virt_type is defined %}
+virt_type = {{ compute.libvirt.virt_type }}
+{%- else %}
+virt_type = kvm
+{%- endif%}
+
+inject_partition={{ compute.libvirt.inject_partition }}
+{%- if compute.libvirt.get('inject_partition', '-2')|string == '-2' %}
+inject_password=False
+{%- else %}
+inject_password={{ compute.libvirt.inject_password }}
+{%- endif %}
+
+disk_cachemodes="{{ compute.get('disk_cachemodes', 'network=writeback,block=none') }}"
+block_migration_flag=VIR_MIGRATE_UNDEFINE_SOURCE,VIR_MIGRATE_PEER2PEER,VIR_MIGRATE_LIVE,VIR_MIGRATE_NON_SHARED_INC
+live_migration_flag=VIR_MIGRATE_UNDEFINE_SOURCE,VIR_MIGRATE_PEER2PEER,VIR_MIGRATE_LIVE,VIR_MIGRATE_PERSIST_DEST
+inject_key=True
+vif_driver=nova.virt.libvirt.vif.LibvirtGenericVIFDriver
+
+{%- if compute.get('ceph', {}).get('ephemeral', False) %}
+images_type=rbd
+images_rbd_pool={{ compute.ceph.rbd_pool }}
+images_rbd_ceph_conf=/etc/ceph/ceph.conf
+rbd_user={{ compute.ceph.rbd_user }}
+rbd_secret_uuid={{ compute.ceph.secret_uuid }}
+inject_password=false
+inject_key=false
+{%- elif compute.get('lvm', {}).get('ephemeral', False) %}
+images_type=lvm
+images_volume_group={{ compute.lvm.images_volume_group }}
+{%- if compute.lvm.volume_clear is defined %}
+volume_clear={{ compute.lvm.volume_clear }}
+{%- endif %}
+{%- if compute.lvm.volume_clear_size is defined %}
+volume_clear_size={{ compute.lvm.volume_clear_size }}
+{%- endif %}
+{%- endif %}
+
+{%- if compute.get('libvirt', {}).uri is defined %}
+connection_uri={{ compute.libvirt.uri }}
+{%- endif %}
+
+#
+# The ID of the image to boot from to rescue data from a corrupted
+# instance.
+#
+# If the rescue REST API operation doesn't provide an ID of an image
+# to
+# use, the image which is referenced by this ID is used. If this
+# option is not set, the image from the instance is used.
+#
+# Possible values:
+#
+# * An ID of an image or nothing. If it points to an *Amazon Machine
+# Image* (AMI), consider to set the config options
+# ``rescue_kernel_id``
+# and ``rescue_ramdisk_id`` too. If nothing is set, the image of the
+# instance
+# is used.
+#
+# Related options:
+#
+# * ``rescue_kernel_id``: If the chosen rescue image allows the
+# separate
+# definition of its kernel disk, the value of this option is used,
+# if specified. This is the case when *Amazon*'s AMI/AKI/ARI image
+# format is used for the rescue image.
+# * ``rescue_ramdisk_id``: If the chosen rescue image allows the
+# separate
+# definition of its RAM disk, the value of this option is used if,
+# specified. This is the case when *Amazon*'s AMI/AKI/ARI image
+# format is used for the rescue image.
+# (string value)
+#rescue_image_id = <None>
+
+#
+# The ID of the kernel (AKI) image to use with the rescue image.
+#
+# If the chosen rescue image allows the separate definition of its
+# kernel
+# disk, the value of this option is used, if specified. This is the
+# case
+# when *Amazon*'s AMI/AKI/ARI image format is used for the rescue
+# image.
+#
+# Possible values:
+#
+# * An ID of an kernel image or nothing. If nothing is specified, the
+# kernel
+# disk from the instance is used if it was launched with one.
+#
+# Related options:
+#
+# * ``rescue_image_id``: If that option points to an image in
+# *Amazon*'s
+# AMI/AKI/ARI image format, it's useful to use ``rescue_kernel_id``
+# too.
+# (string value)
+#rescue_kernel_id = <None>
+
+#
+# The ID of the RAM disk (ARI) image to use with the rescue image.
+#
+# If the chosen rescue image allows the separate definition of its RAM
+# disk, the value of this option is used, if specified. This is the
+# case
+# when *Amazon*'s AMI/AKI/ARI image format is used for the rescue
+# image.
+#
+# Possible values:
+#
+# * An ID of a RAM disk image or nothing. If nothing is specified, the
+# RAM
+# disk from the instance is used if it was launched with one.
+#
+# Related options:
+#
+# * ``rescue_image_id``: If that option points to an image in
+# *Amazon*'s
+# AMI/AKI/ARI image format, it's useful to use ``rescue_ramdisk_id``
+# too.
+# (string value)
+#rescue_ramdisk_id = <None>
+
+#
+# Describes the virtualization type (or so called domain type) libvirt
+# should
+# use.
+#
+# The choice of this type must match the underlying virtualization
+# strategy
+# you have chosen for this host.
+#
+# Possible values:
+#
+# * See the predefined set of case-sensitive values.
+#
+# Related options:
+#
+# * ``connection_uri``: depends on this
+# * ``disk_prefix``: depends on this
+# * ``cpu_mode``: depends on this
+# * ``cpu_model``: depends on this
+# (string value)
+# Possible values:
+# kvm - <No description provided>
+# lxc - <No description provided>
+# qemu - <No description provided>
+# uml - <No description provided>
+# xen - <No description provided>
+# parallels - <No description provided>
+#virt_type = kvm
+
+#
+# Overrides the default libvirt URI of the chosen virtualization type.
+#
+# If set, Nova will use this URI to connect to libvirt.
+#
+# Possible values:
+#
+# * An URI like ``qemu:///system`` or ``xen+ssh://oirase/`` for
+# example.
+# This is only necessary if the URI differs to the commonly known
+# URIs
+# for the chosen virtualization type.
+#
+# Related options:
+#
+# * ``virt_type``: Influences what is used as default value here.
+# (string value)
+#connection_uri =
+
+#
+# Algorithm used to hash the injected password.
+# Note that it must be supported by libc on the compute host
+# _and_ by libc inside *any guest image* that will be booted by this
+# compute
+# host whith requested password injection.
+# In case the specified algorithm is not supported by libc on the
+# compute host,
+# a fallback to DES algorithm will be performed.
+#
+# Related options:
+#
+# * ``inject_password``
+# * ``inject_partition``
+# (string value)
+# Possible values:
+# SHA-512 - <No description provided>
+# SHA-256 - <No description provided>
+# MD5 - <No description provided>
+#inject_password_algorithm = MD5
+
+#
+# Allow the injection of an admin password for instance only at
+# ``create`` and
+# ``rebuild`` process.
+#
+# There is no agent needed within the image to do this. If
+# *libguestfs* is
+# available on the host, it will be used. Otherwise *nbd* is used. The
+# file
+# system of the image will be mounted and the admin password, which is
+# provided
+# in the REST API call will be injected as password for the root user.
+# If no
+# root user is available, the instance won't be launched and an error
+# is thrown.
+# Be aware that the injection is *not* possible when the instance gets
+# launched
+# from a volume.
+#
+# Possible values:
+#
+# * True: Allows the injection.
+# * False (default): Disallows the injection. Any via the REST API
+# provided
+# admin password will be silently ignored.
+#
+# Related options:
+#
+# * ``inject_partition``: That option will decide about the discovery
+# and usage
+# of the file system. It also can disable the injection at all.
+# (boolean value)
+#inject_password = false
+
+#
+# Allow the injection of an SSH key at boot time.
+#
+# There is no agent needed within the image to do this. If
+# *libguestfs* is
+# available on the host, it will be used. Otherwise *nbd* is used. The
+# file
+# system of the image will be mounted and the SSH key, which is
+# provided
+# in the REST API call will be injected as SSH key for the root user
+# and
+# appended to the ``authorized_keys`` of that user. The SELinux
+# context will
+# be set if necessary. Be aware that the injection is *not* possible
+# when the
+# instance gets launched from a volume.
+#
+# This config option will enable directly modifying the instance disk
+# and does
+# not affect what cloud-init may do using data from config_drive
+# option or the
+# metadata service.
+#
+# Related options:
+#
+# * ``inject_partition``: That option will decide about the discovery
+# and usage
+# of the file system. It also can disable the injection at all.
+# (boolean value)
+#inject_key = false
+
+#
+# Determines the way how the file system is chosen to inject data into
+# it.
+#
+# *libguestfs* will be used a first solution to inject data. If that's
+# not
+# available on the host, the image will be locally mounted on the host
+# as a
+# fallback solution. If libguestfs is not able to determine the root
+# partition
+# (because there are more or less than one root partition) or cannot
+# mount the
+# file system it will result in an error and the instance won't be
+# boot.
+#
+# Possible values:
+#
+# * -2 => disable the injection of data.
+# * -1 => find the root partition with the file system to mount with
+# libguestfs
+# * 0 => The image is not partitioned
+# * >0 => The number of the partition to use for the injection
+#
+# Related options:
+#
+# * ``inject_key``: If this option allows the injection of a SSH key
+# it depends
+# on value greater or equal to -1 for ``inject_partition``.
+# * ``inject_password``: If this option allows the injection of an
+# admin password
+# it depends on value greater or equal to -1 for
+# ``inject_partition``.
+# * ``guestfs`` You can enable the debug log level of libguestfs with
+# this
+# config option. A more verbose output will help in debugging
+# issues.
+# * ``virt_type``: If you use ``lxc`` as virt_type it will be treated
+# as a
+# single partition image
+# (integer value)
+# Minimum value: -2
+#inject_partition = -2
+
+# DEPRECATED:
+# Enable a mouse cursor within a graphical VNC or SPICE sessions.
+#
+# This will only be taken into account if the VM is fully virtualized
+# and VNC
+# and/or SPICE is enabled. If the node doesn't support a graphical
+# framebuffer,
+# then it is valid to set this to False.
+#
+# Related options:
+# * ``[vnc]enabled``: If VNC is enabled, ``use_usb_tablet`` will have
+# an effect.
+# * ``[spice]enabled`` + ``[spice].agent_enabled``: If SPICE is
+# enabled and the
+# spice agent is disabled, the config value of ``use_usb_tablet``
+# will have
+# an effect.
+# (boolean value)
+# This option is deprecated for removal since 14.0.0.
+# Its value may be silently ignored in the future.
+# Reason: This option is being replaced by the 'pointer_model' option.
+#use_usb_tablet = true
+
+#
+# The IP address or hostname to be used as the target for live
+# migration traffic.
+#
+# If this option is set to None, the hostname of the migration target
+# compute
+# node will be used.
+#
+# This option is useful in environments where the live-migration
+# traffic can
+# impact the network plane significantly. A separate network for live-
+# migration
+# traffic can then use this config option and avoids the impact on the
+# management network.
+#
+# Possible values:
+#
+# * A valid IP address or hostname, else None.
+#
+# Related options:
+#
+# * ``live_migration_tunnelled``: The live_migration_inbound_addr
+# value is
+# ignored if tunneling is enabled.
+# (string value)
+#live_migration_inbound_addr = <None>
+{%- if compute.libvirt.migration_inbound_addr is defined %}
+live_migration_inbound_addr = {{ compute.libvirt.migration_inbound_addr }}
+{%- endif %}
+
+# DEPRECATED:
+# Live migration target URI to use.
+#
+# Override the default libvirt live migration target URI (which is
+# dependent
+# on virt_type). Any included "%s" is replaced with the migration
+# target
+# hostname.
+#
+# If this option is set to None (which is the default), Nova will
+# automatically
+# generate the `live_migration_uri` value based on only 4 supported
+# `virt_type`
+# in following list:
+#
+# * 'kvm': 'qemu+tcp://%s/system'
+# * 'qemu': 'qemu+tcp://%s/system'
+# * 'xen': 'xenmigr://%s/system'
+# * 'parallels': 'parallels+tcp://%s/system'
+#
+# Related options:
+#
+# * ``live_migration_inbound_addr``: If
+# ``live_migration_inbound_addr`` value
+# is not None and ``live_migration_tunnelled`` is False, the
+# ip/hostname
+# address of target compute node is used instead of
+# ``live_migration_uri`` as
+# the uri for live migration.
+# * ``live_migration_scheme``: If ``live_migration_uri`` is not set,
+# the scheme
+# used for live migration is taken from ``live_migration_scheme``
+# instead.
+# (string value)
+# This option is deprecated for removal since 15.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# live_migration_uri is deprecated for removal in favor of two other
+# options that
+# allow to change live migration scheme and target URI:
+# ``live_migration_scheme``
+# and ``live_migration_inbound_addr`` respectively.
+#live_migration_uri = <None>
+
+#
+# URI scheme used for live migration.
+#
+# Override the default libvirt live migration scheme (which is
+# dependent on
+# virt_type). If this option is set to None, nova will automatically
+# choose a
+# sensible default based on the hypervisor. It is not recommended that
+# you change
+# this unless you are very sure that hypervisor supports a particular
+# scheme.
+#
+# Related options:
+#
+# * ``virt_type``: This option is meaningful only when ``virt_type``
+# is set to
+# `kvm` or `qemu`.
+# * ``live_migration_uri``: If ``live_migration_uri`` value is not
+# None, the
+# scheme used for live migration is taken from
+# ``live_migration_uri`` instead.
+# (string value)
+#live_migration_scheme = <None>
+
+#
+# Enable tunnelled migration.
+#
+# This option enables the tunnelled migration feature, where migration
+# data is
+# transported over the libvirtd connection. If enabled, we use the
+# VIR_MIGRATE_TUNNELLED migration flag, avoiding the need to configure
+# the network to allow direct hypervisor to hypervisor communication.
+# If False, use the native transport. If not set, Nova will choose a
+# sensible default based on, for example the availability of native
+# encryption support in the hypervisor. Enabling this option will
+# definitely
+# impact performance massively.
+#
+# Note that this option is NOT compatible with use of block migration.
+#
+# Related options:
+#
+# * ``live_migration_inbound_addr``: The live_migration_inbound_addr
+# value is
+# ignored if tunneling is enabled.
+# (boolean value)
+#live_migration_tunnelled = false
+{%- if compute.libvirt.live_migration_tunnelled is defined %}
+live_migration_tunnelled = {{ compute.libvirt.live_migration_tunnelled }}
+{%- endif %}
+
+#
+# Maximum bandwidth(in MiB/s) to be used during migration.
+#
+# If set to 0, the hypervisor will choose a suitable default. Some
+# hypervisors
+# do not support this feature and will return an error if bandwidth is
+# not 0.
+# Please refer to the libvirt documentation for further details.
+# (integer value)
+#live_migration_bandwidth = 0
+
+#
+# Maximum permitted downtime, in milliseconds, for live migration
+# switchover.
+#
+# Will be rounded up to a minimum of 100ms. You can increase this
+# value
+# if you want to allow live-migrations to complete faster, or avoid
+# live-migration timeout errors by allowing the guest to be paused for
+# longer during the live-migration switch over.
+#
+# Related options:
+#
+# * live_migration_completion_timeout
+# (integer value)
+# Minimum value: 100
+#live_migration_downtime = 500
+
+#
+# Number of incremental steps to reach max downtime value.
+#
+# Will be rounded up to a minimum of 3 steps.
+# (integer value)
+# Minimum value: 3
+#live_migration_downtime_steps = 10
+
+#
+# Time to wait, in seconds, between each step increase of the
+# migration
+# downtime.
+#
+# Minimum delay is 3 seconds. Value is per GiB of guest RAM + disk to
+# be
+# transferred, with lower bound of a minimum of 2 GiB per device.
+# (integer value)
+# Minimum value: 3
+#live_migration_downtime_delay = 75
+
+#
+# Time to wait, in seconds, for migration to successfully complete
+# transferring
+# data before aborting the operation.
+#
+# Value is per GiB of guest RAM + disk to be transferred, with lower
+# bound of
+# a minimum of 2 GiB. Should usually be larger than downtime delay *
+# downtime
+# steps. Set to 0 to disable timeouts.
+#
+# Related options:
+#
+# * live_migration_downtime
+# * live_migration_downtime_steps
+# * live_migration_downtime_delay
+# (integer value)
+# Note: This option can be changed without restarting.
+#live_migration_completion_timeout = 800
+
+# DEPRECATED:
+# Time to wait, in seconds, for migration to make forward progress in
+# transferring data before aborting the operation.
+#
+# Set to 0 to disable timeouts.
+#
+# This is deprecated, and now disabled by default because we have
+# found serious
+# bugs in this feature that caused false live-migration timeout
+# failures. This
+# feature will be removed or replaced in a future release.
+# (integer value)
+# Note: This option can be changed without restarting.
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+# Reason: Serious bugs found in this feature.
+#live_migration_progress_timeout = 0
+
+#
+# This option allows nova to switch an on-going live migration to
+# post-copy
+# mode, i.e., switch the active VM to the one on the destination node
+# before the
+# migration is complete, therefore ensuring an upper bound on the
+# memory that
+# needs to be transferred. Post-copy requires libvirt>=1.3.3 and
+# QEMU>=2.5.0.
+#
+# When permitted, post-copy mode will be automatically activated if a
+# live-migration memory copy iteration does not make percentage
+# increase of at
+# least 10% over the last iteration.
+#
+# The live-migration force complete API also uses post-copy when
+# permitted. If
+# post-copy mode is not available, force complete falls back to
+# pausing the VM
+# to ensure the live-migration operation will complete.
+#
+# When using post-copy mode, if the source and destination hosts loose
+# network
+# connectivity, the VM being live-migrated will need to be rebooted.
+# For more
+# details, please see the Administration guide.
+#
+# Related options:
+#
+# * live_migration_permit_auto_converge
+# (boolean value)
+#live_migration_permit_post_copy = false
+
+#
+# This option allows nova to start live migration with auto converge
+# on.
+#
+# Auto converge throttles down CPU if a progress of on-going live
+# migration
+# is slow. Auto converge will only be used if this flag is set to True
+# and
+# post copy is not permitted or post copy is unavailable due to the
+# version
+# of libvirt and QEMU in use.
+#
+# Related options:
+#
+# * live_migration_permit_post_copy
+# (boolean value)
+#live_migration_permit_auto_converge = false
+{%- if compute.libvirt.live_migration_permit_auto_converge is defined %}
+live_migration_permit_auto_converge={{ compute.libvirt.live_migration_permit_auto_converge|lower }}
+{%- endif %}
+
+#
+# Determine the snapshot image format when sending to the image
+# service.
+#
+# If set, this decides what format is used when sending the snapshot
+# to the
+# image service.
+# If not set, defaults to same type as source image.
+#
+# Possible values:
+#
+# * ``raw``: RAW disk format
+# * ``qcow2``: KVM default disk format
+# * ``vmdk``: VMWare default disk format
+# * ``vdi``: VirtualBox default disk format
+# * If not set, defaults to same type as source image.
+# (string value)
+# Possible values:
+# raw - <No description provided>
+# qcow2 - <No description provided>
+# vmdk - <No description provided>
+# vdi - <No description provided>
+#snapshot_image_format = <None>
+
+#
+# Override the default disk prefix for the devices attached to an
+# instance.
+#
+# If set, this is used to identify a free disk device name for a bus.
+#
+# Possible values:
+#
+# * Any prefix which will result in a valid disk device name like
+# 'sda' or 'hda'
+# for example. This is only necessary if the device names differ to
+# the
+# commonly known device name prefixes for a virtualization type such
+# as: sd,
+# xvd, uvd, vd.
+#
+# Related options:
+#
+# * ``virt_type``: Influences which device type is used, which
+# determines
+# the default disk prefix.
+# (string value)
+#disk_prefix = <None>
+
+# Number of seconds to wait for instance to shut down after soft
+# reboot request is made. We fall back to hard reboot if instance does
+# not shutdown within this window. (integer value)
+#wait_soft_reboot_seconds = 120
+
+#
+# Is used to set the CPU mode an instance should have.
+#
+# If virt_type="kvm|qemu", it will default to "host-model", otherwise
+# it will
+# default to "none".
+#
+# Possible values:
+#
+# * ``host-model``: Clones the host CPU feature flags
+# * ``host-passthrough``: Use the host CPU model exactly
+# * ``custom``: Use a named CPU model
+# * ``none``: Don't set a specific CPU model. For instances with
+# ``virt_type`` as KVM/QEMU, the default CPU model from QEMU will be
+# used,
+# which provides a basic set of CPU features that are compatible with
+# most
+# hosts.
+#
+# Related options:
+#
+# * ``cpu_model``: This should be set ONLY when ``cpu_mode`` is set to
+# ``custom``. Otherwise, it would result in an error and the instance
+# launch will fail.
+#
+# (string value)
+# Possible values:
+# host-model - <No description provided>
+# host-passthrough - <No description provided>
+# custom - <No description provided>
+# none - <No description provided>
+{%- if compute.get('libvirt', {}).cpu_model is defined and compute.cpu_mode == 'custom' %}
+cpu_model = {{ compute.libvirt.cpu_model }}
+{%- else %}
+#cpu_model=<None>
+{%- endif %}
+
+#
+# Set the name of the libvirt CPU model the instance should use.
+#
+# Possible values:
+#
+# * The named CPU models listed in ``/usr/share/libvirt/cpu_map.xml``
+#
+# Related options:
+#
+# * ``cpu_mode``: This should be set to ``custom`` ONLY when you want
+# to
+# configure (via ``cpu_model``) a specific named CPU model.
+# Otherwise, it
+# would result in an error and the instance launch will fail.
+#
+# * ``virt_type``: Only the virtualization types ``kvm`` and ``qemu``
+# use this.
+# (string value)
+#cpu_model = <None>
+
+#
+# This allows specifying granular CPU feature flags when specifying
+# CPU
+# models. For example, to explicitly specify the ``pcid``
+# (Process-Context ID, an Intel processor feature) flag to the
+# "IvyBridge"
+# virtual CPU model::
+#
+# [libvirt]
+# cpu_mode = custom
+# cpu_model = IvyBridge
+# cpu_model_extra_flags = pcid
+#
+# Currently, the choice is restricted to only one option: ``pcid``
+# (the
+# option is case-insensitive, so ``PCID`` is also valid). This flag
+# is
+# now required to address the guest performance degradation as a
+# result of
+# applying the "Meltdown" CVE fixes on certain Intel CPU models.
+#
+# Note that when using this config attribute to set the 'PCID' CPU
+# flag,
+# not all virtual (i.e. libvirt / QEMU) CPU models need it:
+#
+# * The only virtual CPU models that include the 'PCID' capability are
+# Intel "Haswell", "Broadwell", and "Skylake" variants.
+#
+# * The libvirt / QEMU CPU models "Nehalem", "Westmere",
+# "SandyBridge",
+# and "IvyBridge" will _not_ expose the 'PCID' capability by
+# default,
+# even if the host CPUs by the same name include it. I.e. 'PCID'
+# needs
+# to be explicitly specified when using the said virtual CPU models.
+#
+# For now, the ``cpu_model_extra_flags`` config attribute is valid
+# only in
+# combination with ``cpu_mode`` + ``cpu_model`` options.
+#
+# Besides ``custom``, the libvirt driver has two other CPU modes: The
+# default, ``host-model``, tells it to do the right thing with respect
+# to
+# handling 'PCID' CPU flag for the guest -- *assuming* you are running
+# updated processor microcode, host and guest kernel, libvirt, and
+# QEMU.
+# The other mode, ``host-passthrough``, checks if 'PCID' is available
+# in
+# the hardware, and if so directly passes it through to the Nova
+# guests.
+# Thus, in context of 'PCID', with either of these CPU modes
+# (``host-model`` or ``host-passthrough``), there is no need to use
+# the
+# ``cpu_model_extra_flags``.
+#
+# Related options:
+#
+# * cpu_mode
+# * cpu_model
+# (list value)
+#cpu_model_extra_flags =
+
+# Location where libvirt driver will store snapshots before uploading
+# them to image service (string value)
+#snapshots_directory = $instances_path/snapshots
+
+# Location where the Xen hvmloader is kept (string value)
+#xen_hvmloader_path = /usr/lib/xen/boot/hvmloader
+
+#
+# Specific cache modes to use for different disk types.
+#
+# For example: file=directsync,block=none,network=writeback
+#
+# For local or direct-attached storage, it is recommended that you use
+# writethrough (default) mode, as it ensures data integrity and has
+# acceptable
+# I/O performance for applications running in the guest, especially
+# for read
+# operations. However, caching mode none is recommended for remote NFS
+# storage,
+# because direct I/O operations (O_DIRECT) perform better than
+# synchronous I/O
+# operations (with O_SYNC). Caching mode none effectively turns all
+# guest I/O
+# operations into direct I/O operations on the host, which is the NFS
+# client in
+# this environment.
+#
+# Possible cache modes:
+#
+# * default: Same as writethrough.
+# * none: With caching mode set to none, the host page cache is
+# disabled, but
+# the disk write cache is enabled for the guest. In this mode, the
+# write
+# performance in the guest is optimal because write operations
+# bypass the host
+# page cache and go directly to the disk write cache. If the disk
+# write cache
+# is battery-backed, or if the applications or storage stack in the
+# guest
+# transfer data properly (either through fsync operations or file
+# system
+# barriers), then data integrity can be ensured. However, because
+# the host
+# page cache is disabled, the read performance in the guest would
+# not be as
+# good as in the modes where the host page cache is enabled, such as
+# writethrough mode. Shareable disk devices, like for a multi-
+# attachable block
+# storage volume, will have their cache mode set to 'none'
+# regardless of
+# configuration.
+# * writethrough: writethrough mode is the default caching mode. With
+# caching set to writethrough mode, the host page cache is enabled,
+# but the
+# disk write cache is disabled for the guest. Consequently, this
+# caching mode
+# ensures data integrity even if the applications and storage stack
+# in the
+# guest do not transfer data to permanent storage properly (either
+# through
+# fsync operations or file system barriers). Because the host page
+# cache is
+# enabled in this mode, the read performance for applications
+# running in the
+# guest is generally better. However, the write performance might be
+# reduced
+# because the disk write cache is disabled.
+# * writeback: With caching set to writeback mode, both the host page
+# cache
+# and the disk write cache are enabled for the guest. Because of
+# this, the
+# I/O performance for applications running in the guest is good, but
+# the data
+# is not protected in a power failure. As a result, this caching
+# mode is
+# recommended only for temporary data where potential data loss is
+# not a
+# concern.
+# * directsync: Like "writethrough", but it bypasses the host page
+# cache.
+# * unsafe: Caching mode of unsafe ignores cache transfer operations
+# completely. As its name implies, this caching mode should be used
+# only for
+# temporary data where data loss is not a concern. This mode can be
+# useful for
+# speeding up guest installations, but you should switch to another
+# caching
+# mode in production environments.
+# (list value)
+#disk_cachemodes =
+
+# A path to a device that will be used as source of entropy on the
+# host. Permitted options are: /dev/random or /dev/hwrng (string
+# value)
+#rng_dev_path = <None>
+
+# For qemu or KVM guests, set this option to specify a default machine
+# type per host architecture. You can find a list of supported machine
+# types in your environment by checking the output of the "virsh
+# capabilities"command. The format of the value for this config option
+# is host-arch=machine-type. For example:
+# x86_64=machinetype1,armv7l=machinetype2 (list value)
+#hw_machine_type = <None>
+
+# The data source used to the populate the host "serial" UUID exposed
+# to guest in the virtual BIOS. (string value)
+# Possible values:
+# none - <No description provided>
+# os - <No description provided>
+# hardware - <No description provided>
+# auto - <No description provided>
+#sysinfo_serial = auto
+
+# A number of seconds to memory usage statistics period. Zero or
+# negative value mean to disable memory usage statistics. (integer
+# value)
+#mem_stats_period_seconds = 10
+
+# List of uid targets and ranges.Syntax is guest-uid:host-
+# uid:countMaximum of 5 allowed. (list value)
+#uid_maps =
+
+# List of guid targets and ranges.Syntax is guest-gid:host-
+# gid:countMaximum of 5 allowed. (list value)
+#gid_maps =
+
+# In a realtime host context vCPUs for guest will run in that
+# scheduling priority. Priority depends on the host kernel (usually
+# 1-99) (integer value)
+#realtime_scheduler_priority = 1
+
+#
+# This is a performance event list which could be used as monitor.
+# These events
+# will be passed to libvirt domain xml while creating a new instances.
+# Then event statistics data can be collected from libvirt. The
+# minimum
+# libvirt version is 2.0.0. For more information about `Performance
+# monitoring
+# events`, refer https://libvirt.org/formatdomain.html#elementsPerf .
+#
+# Possible values:
+# * A string list. For example: ``enabled_perf_events = cmt, mbml,
+# mbmt``
+# The supported events list can be found in
+# https://libvirt.org/html/libvirt-libvirt-domain.html ,
+# which you may need to search key words ``VIR_PERF_PARAM_*``
+# (list value)
+#enabled_perf_events =
+
+#
+# VM Images format.
+#
+# If default is specified, then use_cow_images flag is used instead of
+# this
+# one.
+#
+# Related options:
+#
+# * virt.use_cow_images
+# * images_volume_group
+# (string value)
+# Possible values:
+# raw - <No description provided>
+# flat - <No description provided>
+# qcow2 - <No description provided>
+# lvm - <No description provided>
+# rbd - <No description provided>
+# ploop - <No description provided>
+# default - <No description provided>
+#images_type = default
+
+#
+# LVM Volume Group that is used for VM images, when you specify
+# images_type=lvm
+#
+# Related options:
+#
+# * images_type
+# (string value)
+#images_volume_group = <None>
+
+#
+# Create sparse logical volumes (with virtualsize) if this flag is set
+# to True.
+# (boolean value)
+#sparse_logical_volumes = false
+
+# The RADOS pool in which rbd volumes are stored (string value)
+#images_rbd_pool = rbd
+
+# Path to the ceph configuration file to use (string value)
+#images_rbd_ceph_conf =
+
+#
+# Discard option for nova managed disks.
+#
+# Requires:
+#
+# * Libvirt >= 1.0.6
+# * Qemu >= 1.5 (raw format)
+# * Qemu >= 1.6 (qcow2 format)
+# (string value)
+# Possible values:
+# ignore - <No description provided>
+# unmap - <No description provided>
+#hw_disk_discard = <None>
+{%- if compute.libvirt.hw_disk_discard is defined %}
+hw_disk_discard={{ compute.libvirt.hw_disk_discard }}
+{%- endif %}
+
+# DEPRECATED: Allows image information files to be stored in non-
+# standard locations (string value)
+# This option is deprecated for removal since 14.0.0.
+# Its value may be silently ignored in the future.
+# Reason: Image info files are no longer used by the image cache
+#image_info_filename_pattern = $instances_path/$image_cache_subdirectory_name/%(image)s.info
+
+# Unused resized base images younger than this will not be removed
+# (integer value)
+#remove_unused_resized_minimum_age_seconds = 3600
+
+# DEPRECATED: Write a checksum for files in _base to disk (boolean
+# value)
+# This option is deprecated for removal since 14.0.0.
+# Its value may be silently ignored in the future.
+# Reason: The image cache no longer periodically calculates checksums
+# of stored images. Data integrity can be checked at the block or
+# filesystem level.
+#checksum_base_images = false
+
+# DEPRECATED: How frequently to checksum base images (integer value)
+# This option is deprecated for removal since 14.0.0.
+# Its value may be silently ignored in the future.
+# Reason: The image cache no longer periodically calculates checksums
+# of stored images. Data integrity can be checked at the block or
+# filesystem level.
+#checksum_interval_seconds = 3600
+
+#
+# Method used to wipe ephemeral disks when they are deleted. Only
+# takes effect
+# if LVM is set as backing storage.
+#
+# Possible values:
+#
+# * none - do not wipe deleted volumes
+# * zero - overwrite volumes with zeroes
+# * shred - overwrite volume repeatedly
+#
+# Related options:
+#
+# * images_type - must be set to ``lvm``
+# * volume_clear_size
+# (string value)
+# Possible values:
+# none - <No description provided>
+# zero - <No description provided>
+# shred - <No description provided>
+#volume_clear = zero
+
+#
+# Size of area in MiB, counting from the beginning of the allocated
+# volume,
+# that will be cleared using method set in ``volume_clear`` option.
+#
+# Possible values:
+#
+# * 0 - clear whole volume
+# * >0 - clear specified amount of MiB
+#
+# Related options:
+#
+# * images_type - must be set to ``lvm``
+# * volume_clear - must be set and the value must be different than
+# ``none``
+# for this option to have any impact
+# (integer value)
+# Minimum value: 0
+#volume_clear_size = 0
+
+#
+# Enable snapshot compression for ``qcow2`` images.
+#
+# Note: you can set ``snapshot_image_format`` to ``qcow2`` to force
+# all
+# snapshots to be in ``qcow2`` format, independently from their
+# original image
+# type.
+#
+# Related options:
+#
+# * snapshot_image_format
+# (boolean value)
+#snapshot_compression = false
+
+# Use virtio for bridge interfaces with KVM/QEMU (boolean value)
+#use_virtio_for_bridges = true
+
+#
+# Use multipath connection of the iSCSI or FC volume
+#
+# Volumes can be connected in the LibVirt as multipath devices. This
+# will
+# provide high availability and fault tolerance.
+# (boolean value)
+# Deprecated group/name - [libvirt]/iscsi_use_multipath
+#volume_use_multipath = false
+
+#
+# Number of times to scan given storage protocol to find volume.
+# (integer value)
+# Deprecated group/name - [libvirt]/num_iscsi_scan_tries
+#num_volume_scan_tries = 5
+
+#
+# Number of times to rediscover AoE target to find volume.
+#
+# Nova provides support for block storage attaching to hosts via AOE
+# (ATA over
+# Ethernet). This option allows the user to specify the maximum number
+# of retry
+# attempts that can be made to discover the AoE device.
+# (integer value)
+#num_aoe_discover_tries = 3
+
+#
+# The iSCSI transport iface to use to connect to target in case
+# offload support
+# is desired.
+#
+# Default format is of the form <transport_name>.<hwaddress> where
+# <transport_name> is one of (be2iscsi, bnx2i, cxgb3i, cxgb4i,
+# qla4xxx, ocs) and
+# <hwaddress> is the MAC address of the interface and can be generated
+# via the
+# iscsiadm -m iface command. Do not confuse the iscsi_iface parameter
+# to be
+# provided here with the actual transport name.
+# (string value)
+# Deprecated group/name - [libvirt]/iscsi_transport
+#iscsi_iface = <None>
+
+#
+# Number of times to scan iSER target to find volume.
+#
+# iSER is a server network protocol that extends iSCSI protocol to use
+# Remote
+# Direct Memory Access (RDMA). This option allows the user to specify
+# the maximum
+# number of scan attempts that can be made to find iSER volume.
+# (integer value)
+#num_iser_scan_tries = 5
+
+#
+# Use multipath connection of the iSER volume.
+#
+# iSER volumes can be connected as multipath devices. This will
+# provide high
+# availability and fault tolerance.
+# (boolean value)
+#iser_use_multipath = false
+
+#
+# The RADOS client name for accessing rbd(RADOS Block Devices)
+# volumes.
+#
+# Libvirt will refer to this user when connecting and authenticating
+# with
+# the Ceph RBD server.
+# (string value)
+#rbd_user = <None>
+
+#
+# The libvirt UUID of the secret for the rbd_user volumes.
+# (string value)
+#rbd_secret_uuid = <None>
+
+#
+# Directory where the NFS volume is mounted on the compute node.
+# The default is 'mnt' directory of the location where nova's Python
+# module
+# is installed.
+#
+# NFS provides shared storage for the OpenStack Block Storage service.
+#
+# Possible values:
+#
+# * A string representing absolute path of mount point.
+# (string value)
+#nfs_mount_point_base = $state_path/mnt
+
+#
+# Mount options passed to the NFS client. See section of the nfs man
+# page
+# for details.
+#
+# Mount options controls the way the filesystem is mounted and how the
+# NFS client behaves when accessing files on this mount point.
+#
+# Possible values:
+#
+# * Any string representing mount options separated by commas.
+# * Example string: vers=3,lookupcache=pos
+# (string value)
+#nfs_mount_options = <None>
+
+#
+# Directory where the Quobyte volume is mounted on the compute node.
+#
+# Nova supports Quobyte volume driver that enables storing Block
+# Storage
+# service volumes on a Quobyte storage back end. This Option specifies
+# the
+# path of the directory where Quobyte volume is mounted.
+#
+# Possible values:
+#
+# * A string representing absolute path of mount point.
+# (string value)
+#quobyte_mount_point_base = $state_path/mnt
+
+# Path to a Quobyte Client configuration file. (string value)
+#quobyte_client_cfg = <None>
+
+#
+# Directory where the SMBFS shares are mounted on the compute node.
+# (string value)
+#smbfs_mount_point_base = $state_path/mnt
+
+#
+# Mount options passed to the SMBFS client.
+#
+# Provide SMBFS options as a single string containing all parameters.
+# See mount.cifs man page for details. Note that the libvirt-qemu
+# ``uid``
+# and ``gid`` must be specified.
+# (string value)
+#smbfs_mount_options =
+
+#
+# libvirt's transport method for remote file operations.
+#
+# Because libvirt cannot use RPC to copy files over network to/from
+# other
+# compute nodes, other method must be used for:
+#
+# * creating directory on remote host
+# * creating file on remote host
+# * removing file from remote host
+# * copying file to remote host
+# (string value)
+# Possible values:
+# ssh - <No description provided>
+# rsync - <No description provided>
+#remote_filesystem_transport = ssh
+
+#
+# Directory where the Virtuozzo Storage clusters are mounted on the
+# compute
+# node.
+#
+# This option defines non-standard mountpoint for Vzstorage cluster.
+#
+# Related options:
+#
+# * vzstorage_mount_* group of parameters
+# (string value)
+#vzstorage_mount_point_base = $state_path/mnt
+
+#
+# Mount owner user name.
+#
+# This option defines the owner user of Vzstorage cluster mountpoint.
+#
+# Related options:
+#
+# * vzstorage_mount_* group of parameters
+# (string value)
+#vzstorage_mount_user = stack
+
+#
+# Mount owner group name.
+#
+# This option defines the owner group of Vzstorage cluster mountpoint.
+#
+# Related options:
+#
+# * vzstorage_mount_* group of parameters
+# (string value)
+#vzstorage_mount_group = qemu
+
+#
+# Mount access mode.
+#
+# This option defines the access bits of Vzstorage cluster mountpoint,
+# in the format similar to one of chmod(1) utility, like this: 0770.
+# It consists of one to four digits ranging from 0 to 7, with missing
+# lead digits assumed to be 0's.
+#
+# Related options:
+#
+# * vzstorage_mount_* group of parameters
+# (string value)
+#vzstorage_mount_perms = 0770
+
+#
+# Path to vzstorage client log.
+#
+# This option defines the log of cluster operations,
+# it should include "%(cluster_name)s" template to separate
+# logs from multiple shares.
+#
+# Related options:
+#
+# * vzstorage_mount_opts may include more detailed logging options.
+# (string value)
+#vzstorage_log_path = /var/log/vstorage/%(cluster_name)s/nova.log.gz
+
+#
+# Path to the SSD cache file.
+#
+# You can attach an SSD drive to a client and configure the drive to
+# store
+# a local cache of frequently accessed data. By having a local cache
+# on a
+# client's SSD drive, you can increase the overall cluster performance
+# by
+# up to 10 and more times.
+# WARNING! There is a lot of SSD models which are not server grade and
+# may loose arbitrary set of data changes on power loss.
+# Such SSDs should not be used in Vstorage and are dangerous as may
+# lead
+# to data corruptions and inconsistencies. Please consult with the
+# manual
+# on which SSD models are known to be safe or verify it using
+# vstorage-hwflush-check(1) utility.
+#
+# This option defines the path which should include "%(cluster_name)s"
+# template to separate caches from multiple shares.
+#
+# Related options:
+#
+# * vzstorage_mount_opts may include more detailed cache options.
+# (string value)
+#vzstorage_cache_path = <None>
+
+#
+# Extra mount options for pstorage-mount
+#
+# For full description of them, see
+# https://static.openvz.org/vz-man/man1/pstorage-mount.1.gz.html
+# Format is a python string representation of arguments list, like:
+# "['-v', '-R', '500']"
+# Shouldn't include -c, -l, -C, -u, -g and -m as those have
+# explicit vzstorage_* options.
+#
+# Related options:
+#
+# * All other vzstorage_* options
+# (list value)
+#vzstorage_mount_opts =
+
+
+[metrics]
+#
+# Configuration options for metrics
+#
+# Options under this group allow to adjust how values assigned to
+# metrics are
+# calculated.
+
+#
+# From nova.conf
+#
+
+#
+# When using metrics to weight the suitability of a host, you can use
+# this option
+# to change how the calculated weight influences the weight assigned
+# to a host as
+# follows:
+#
+# * >1.0: increases the effect of the metric on overall weight
+# * 1.0: no change to the calculated weight
+# * >0.0,<1.0: reduces the effect of the metric on overall weight
+# * 0.0: the metric value is ignored, and the value of the
+# 'weight_of_unavailable' option is returned instead
+# * >-1.0,<0.0: the effect is reduced and reversed
+# * -1.0: the effect is reversed
+# * <-1.0: the effect is increased proportionally and reversed
+#
+# This option is only used by the FilterScheduler and its subclasses;
+# if you use
+# a different scheduler, this option has no effect.
+#
+# Possible values:
+#
+# * An integer or float value, where the value corresponds to the
+# multipler
+# ratio for this weigher.
+#
+# Related options:
+#
+# * weight_of_unavailable
+# (floating point value)
+#weight_multiplier = 1.0
+
+#
+# This setting specifies the metrics to be weighed and the relative
+# ratios for
+# each metric. This should be a single string value, consisting of a
+# series of
+# one or more 'name=ratio' pairs, separated by commas, where 'name' is
+# the name
+# of the metric to be weighed, and 'ratio' is the relative weight for
+# that
+# metric.
+#
+# Note that if the ratio is set to 0, the metric value is ignored, and
+# instead
+# the weight will be set to the value of the 'weight_of_unavailable'
+# option.
+#
+# As an example, let's consider the case where this option is set to:
+#
+# ``name1=1.0, name2=-1.3``
+#
+# The final weight will be:
+#
+# ``(name1.value * 1.0) + (name2.value * -1.3)``
+#
+# This option is only used by the FilterScheduler and its subclasses;
+# if you use
+# a different scheduler, this option has no effect.
+#
+# Possible values:
+#
+# * A list of zero or more key/value pairs separated by commas, where
+# the key is
+# a string representing the name of a metric and the value is a
+# numeric weight
+# for that metric. If any value is set to 0, the value is ignored
+# and the
+# weight will be set to the value of the 'weight_of_unavailable'
+# option.
+#
+# Related options:
+#
+# * weight_of_unavailable
+# (list value)
+#weight_setting =
+
+#
+# This setting determines how any unavailable metrics are treated. If
+# this option
+# is set to True, any hosts for which a metric is unavailable will
+# raise an
+# exception, so it is recommended to also use the MetricFilter to
+# filter out
+# those hosts before weighing.
+#
+# This option is only used by the FilterScheduler and its subclasses;
+# if you use
+# a different scheduler, this option has no effect.
+#
+# Possible values:
+#
+# * True or False, where False ensures any metric being unavailable
+# for a host
+# will set the host weight to 'weight_of_unavailable'.
+#
+# Related options:
+#
+# * weight_of_unavailable
+# (boolean value)
+#required = true
+
+#
+# When any of the following conditions are met, this value will be
+# used in place
+# of any actual metric value:
+#
+# * One of the metrics named in 'weight_setting' is not available for
+# a host,
+# and the value of 'required' is False
+# * The ratio specified for a metric in 'weight_setting' is 0
+# * The 'weight_multiplier' option is set to 0
+#
+# This option is only used by the FilterScheduler and its subclasses;
+# if you use
+# a different scheduler, this option has no effect.
+#
+# Possible values:
+#
+# * An integer or float value, where the value corresponds to the
+# multipler
+# ratio for this weigher.
+#
+# Related options:
+#
+# * weight_setting
+# * required
+# * weight_multiplier
+# (floating point value)
+#weight_of_unavailable = -10000.0
+
+
+[mks]
+#
+# Nova compute node uses WebMKS, a desktop sharing protocol to provide
+# instance console access to VM's created by VMware hypervisors.
+#
+# Related options:
+# Following options must be set to provide console access.
+# * mksproxy_base_url
+# * enabled
+
+#
+# From nova.conf
+#
+
+#
+# Location of MKS web console proxy
+#
+# The URL in the response points to a WebMKS proxy which
+# starts proxying between client and corresponding vCenter
+# server where instance runs. In order to use the web based
+# console access, WebMKS proxy should be installed and configured
+#
+# Possible values:
+#
+# * Must be a valid URL of the form:``http://host:port/`` or
+# ``https://host:port/``
+# (uri value)
+#mksproxy_base_url = http://127.0.0.1:6090/
+
+#
+# Enables graphical console access for virtual machines.
+# (boolean value)
+#enabled = false
+
+
+[neutron]
+#
+# Configuration options for neutron (network connectivity as a
+# service).
+
+#
+# From nova.conf
+#
+
+# DEPRECATED:
+# This option specifies the URL for connecting to Neutron.
+#
+# Possible values:
+#
+# * Any valid URL that points to the Neutron API service is
+# appropriate here.
+# This typically matches the URL returned for the 'network' service
+# type
+# from the Keystone service catalog.
+# (uri value)
+# This option is deprecated for removal since 17.0.0.
+# Its value may be silently ignored in the future.
+# Reason: Endpoint lookup uses the service catalog via common
+# keystoneauth1 Adapter configuration options. In the current release,
+# "url" will override this behavior, but will be ignored and/or
+# removed in a future release. To achieve the same result, use the
+# endpoint_override option instead.
+#url = http://127.0.0.1:9696
+
+#
+# Default name for the Open vSwitch integration bridge.
+#
+# Specifies the name of an integration bridge interface used by
+# OpenvSwitch.
+# This option is only used if Neutron does not specify the OVS bridge
+# name in
+# port binding responses.
+# (string value)
+#ovs_bridge = br-int
+
+#
+# Default name for the floating IP pool.
+#
+# Specifies the name of floating IP pool used for allocating floating
+# IPs. This
+# option is only used if Neutron does not specify the floating IP pool
+# name in
+# port binding reponses.
+# (string value)
+#default_floating_pool = nova
+
+#
+# Integer value representing the number of seconds to wait before
+# querying
+# Neutron for extensions. After this number of seconds the next time
+# Nova
+# needs to create a resource in Neutron it will requery Neutron for
+# the
+# extensions that it has loaded. Setting value to 0 will refresh the
+# extensions with no wait.
+# (integer value)
+# Minimum value: 0
+#extension_sync_interval = 600
+extension_sync_interval={{ compute.network.get('extension_sync_interval', '600') }}
+
+#
+# When set to True, this option indicates that Neutron will be used to
+# proxy
+# metadata requests and resolve instance ids. Otherwise, the instance
+# ID must be
+# passed to the metadata request in the 'X-Instance-ID' header.
+#
+# Related options:
+#
+# * metadata_proxy_shared_secret
+# (boolean value)
+#service_metadata_proxy = false
+
+#
+# This option holds the shared secret string used to validate proxy
+# requests to
+# Neutron metadata requests. In order to be used, the
+# 'X-Metadata-Provider-Signature' header must be supplied in the
+# request.
+#
+# Related options:
+#
+# * service_metadata_proxy
+# (string value)
+#metadata_proxy_shared_secret =
+
+# PEM encoded Certificate Authority to use when verifying HTTPs
+# connections. (string value)
+#cafile = <None>
+{%- if compute.network.get('protocol', 'http') == 'https' %}
+cafile={{ compute.network.get('cacert_file', compute.cacert_file) }}
+{%- endif %}
+
+# PEM encoded client certificate cert file (string value)
+#certfile = <None>
+
+# PEM encoded client certificate key file (string value)
+#keyfile = <None>
+
+# Verify HTTPS connections. (boolean value)
+#insecure = false
+
+# Timeout value for http requests (integer value)
+#timeout = <None>
+timeout=300
+
+# Authentication type to load (string value)
+# Deprecated group/name - [neutron]/auth_plugin
+#auth_type = <None>
+auth_type = v3password
+
+# Config Section from which to load plugin specific options (string
+# value)
+#auth_section = <None>
+
+# Authentication URL (string value)
+#auth_url = <None>
+auth_url = {{ compute.identity.get('protocol', 'http') }}://{{ compute.identity.host }}:{{ compute.identity.port }}/v3
+
+# Scope for system operations (string value)
+#system_scope = <None>
+
+# Domain ID to scope to (string value)
+#domain_id = <None>
+
+# Domain name to scope to (string value)
+#domain_name = <None>
+
+# Project ID to scope to (string value)
+#project_id = <None>
+
+# Project name to scope to (string value)
+#project_name = <None>
+project_name={{ compute.identity.tenant }}
+
+# Domain ID containing project (string value)
+#project_domain_id = <None>
+
+# Domain name containing project (string value)
+#project_domain_name = <None>
+project_domain_name = {{ compute.get('project_domain_name', 'Default') }}
+
+# Trust ID (string value)
+#trust_id = <None>
+
+# Optional domain ID to use with v3 and v2 parameters. It will be used
+# for both the user and project domain in v3 and ignored in v2
+# authentication. (string value)
+#default_domain_id = <None>
+
+# Optional domain name to use with v3 API and v2 parameters. It will
+# be used for both the user and project domain in v3 and ignored in v2
+# authentication. (string value)
+#default_domain_name = <None>
+
+# User ID (string value)
+#user_id = <None>
+
+# Username (string value)
+# Deprecated group/name - [neutron]/user_name
+#username = <None>
+username={{ compute.network.user }}
+
+# User's domain id (string value)
+#user_domain_id = <None>
+
+# User's domain name (string value)
+#user_domain_name = <None>
+user_domain_name = {{ compute.get('user_domain_name', 'Default') }}
+
+# User's password (string value)
+#password = <None>
+password={{ compute.network.password }}
+
+# Tenant ID (string value)
+#tenant_id = <None>
+
+# Tenant Name (string value)
+#tenant_name = <None>
+
+# The default service_type for endpoint URL discovery. (string value)
+#service_type = network
+
+# The default service_name for endpoint URL discovery. (string value)
+#service_name = <None>
+
+# List of interfaces, in order of preference, for endpoint URL. (list
+# value)
+#valid_interfaces = internal,public
+
+# The default region_name for endpoint URL discovery. (string value)
+#region_name = <None>
+region_name= {{ compute.network.region }}
+
+# Always use this endpoint URL for requests for this client. NOTE: The
+# unversioned endpoint should be specified here; to request a
+# particular API version, use the `version`, `min-version`, and/or
+# `max-version` options. (string value)
+#endpoint_override = <None>
+
+
+[notifications]
+#
+# Most of the actions in Nova which manipulate the system state
+# generate
+# notifications which are posted to the messaging component (e.g.
+# RabbitMQ) and
+# can be consumed by any service outside the OpenStack. More technical
+# details
+# at
+# https://docs.openstack.org/nova/latest/reference/notifications.html
+
+#
+# From nova.conf
+#
+
+#
+# If set, send compute.instance.update notifications on
+# instance state changes.
+#
+# Please refer to
+# https://docs.openstack.org/nova/latest/reference/notifications.html
+# for
+# additional information on notifications.
+#
+# Possible values:
+#
+# * None - no notifications
+# * "vm_state" - notifications are sent with VM state transition
+# information in
+# the ``old_state`` and ``state`` fields. The ``old_task_state`` and
+# ``new_task_state`` fields will be set to the current task_state of
+# the
+# instance.
+# * "vm_and_task_state" - notifications are sent with VM and task
+# state
+# transition information.
+# (string value)
+# Possible values:
+# <None> - <No description provided>
+# vm_state - <No description provided>
+# vm_and_task_state - <No description provided>
+#notify_on_state_change = <None>
+{%- if compute.get('notification', {}).notify_on is defined %}
+{%- for key, value in compute.notification.notify_on.iteritems() %}
+notify_on_{{ key }} = {{ value }}
+{%- endfor %}
+{%- elif pillar.ceilometer is defined %}
+notify_on_state_change = vm_and_task_state
+{%- endif %}
+
+# Default notification level for outgoing notifications. (string
+# value)
+# Possible values:
+# DEBUG - <No description provided>
+# INFO - <No description provided>
+# WARN - <No description provided>
+# ERROR - <No description provided>
+# CRITICAL - <No description provided>
+# Deprecated group/name - [DEFAULT]/default_notification_level
+#default_level = INFO
+
+# DEPRECATED:
+# Default publisher_id for outgoing notifications. If you consider
+# routing
+# notifications using different publisher, change this value
+# accordingly.
+#
+# Possible values:
+#
+# * Defaults to the current hostname of this host, but it can be any
+# valid
+# oslo.messaging publisher_id
+#
+# Related options:
+#
+# * host - Hostname, FQDN or IP address of this host.
+# (string value)
+# This option is deprecated for removal since 17.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# This option is only used when ``monkey_patch=True`` and
+# ``monkey_patch_modules`` is configured to specify the legacy
+# notify_decorator.
+# Since the monkey_patch and monkey_patch_modules options are
+# deprecated, this
+# option is also deprecated.
+#default_publisher_id = $host
+
+#
+# Specifies which notification format shall be used by nova.
+#
+# The default value is fine for most deployments and rarely needs to
+# be changed.
+# This value can be set to 'versioned' once the infrastructure moves
+# closer to
+# consuming the newer format of notifications. After this occurs, this
+# option
+# will be removed.
+#
+# Note that notifications can be completely disabled by setting
+# ``driver=noop``
+# in the ``[oslo_messaging_notifications]`` group.
+#
+# Possible values:
+# * unversioned: Only the legacy unversioned notifications are
+# emitted.
+# * versioned: Only the new versioned notifications are emitted.
+# * both: Both the legacy unversioned and the new versioned
+# notifications are
+# emitted. (Default)
+#
+# The list of versioned notifications is visible in
+# https://docs.openstack.org/nova/latest/reference/notifications.html
+# (string value)
+# Possible values:
+# unversioned - <No description provided>
+# versioned - <No description provided>
+# both - <No description provided>
+#notification_format = both
+
+#
+# Specifies the topics for the versioned notifications issued by nova.
+#
+# The default value is fine for most deployments and rarely needs to
+# be changed.
+# However, if you have a third-party service that consumes versioned
+# notifications, it might be worth getting a topic for that service.
+# Nova will send a message containing a versioned notification payload
+# to each
+# topic queue in this list.
+#
+# The list of versioned notifications is visible in
+# https://docs.openstack.org/nova/latest/reference/notifications.html
+# (list value)
+#versioned_notifications_topics = versioned_notifications
+
+#
+# If enabled, include block device information in the versioned
+# notification
+# payload. Sending block device information is disabled by default as
+# providing
+# that information can incur some overhead on the system since the
+# information
+# may need to be loaded from the database.
+# (boolean value)
+#bdms_in_notifications = false
+
+
+[osapi_v21]
+
+#
+# From nova.conf
+#
+
+# DEPRECATED:
+# This option is a string representing a regular expression (regex)
+# that matches
+# the project_id as contained in URLs. If not set, it will match
+# normal UUIDs
+# created by keystone.
+#
+# Possible values:
+#
+# * A string representing any legal regular expression
+# (string value)
+# This option is deprecated for removal since 13.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# Recent versions of nova constrain project IDs to hexadecimal
+# characters and
+# dashes. If your installation uses IDs outside of this range, you
+# should use
+# this option to provide your own regex and give you time to migrate
+# offending
+# projects to valid IDs before the next release.
+#project_id_regex = <None>
+
+
+[pci]
+
+#
+# From nova.conf
+#
+
+#
+# An alias for a PCI passthrough device requirement.
+#
+# This allows users to specify the alias in the extra specs for a
+# flavor, without
+# needing to repeat all the PCI property requirements.
+#
+# Possible Values:
+#
+# * A list of JSON values which describe the aliases. For example::
+#
+# alias = {
+# "name": "QuickAssist",
+# "product_id": "0443",
+# "vendor_id": "8086",
+# "device_type": "type-PCI",
+# "numa_policy": "required"
+# }
+#
+# This defines an alias for the Intel QuickAssist card. (multi
+# valued). Valid
+# key values are :
+#
+# ``name``
+# Name of the PCI alias.
+#
+# ``product_id``
+# Product ID of the device in hexadecimal.
+#
+# ``vendor_id``
+# Vendor ID of the device in hexadecimal.
+#
+# ``device_type``
+# Type of PCI device. Valid values are: ``type-PCI``, ``type-PF``
+# and
+# ``type-VF``.
+#
+# ``numa_policy``
+# Required NUMA affinity of device. Valid values are: ``legacy``,
+# ``preferred`` and ``required``.
+# (multi valued)
+# Deprecated group/name - [DEFAULT]/pci_alias
+#alias =
+
+#
+# White list of PCI devices available to VMs.
+#
+# Possible values:
+#
+# * A JSON dictionary which describe a whitelisted PCI device. It
+# should take
+# the following format:
+#
+# ["vendor_id": "<id>",] ["product_id": "<id>",]
+# ["address": "[[[[<domain>]:]<bus>]:][<slot>][.[<function>]]" |
+# "devname": "<name>",]
+# {"<tag>": "<tag_value>",}
+#
+# Where '[' indicates zero or one occurrences, '{' indicates zero or
+# multiple
+# occurrences, and '|' mutually exclusive options. Note that any
+# missing
+# fields are automatically wildcarded.
+#
+# Valid key values are :
+#
+# * "vendor_id": Vendor ID of the device in hexadecimal.
+# * "product_id": Product ID of the device in hexadecimal.
+# * "address": PCI address of the device.
+# * "devname": Device name of the device (for e.g. interface name).
+# Not all
+# PCI devices have a name.
+# * "<tag>": Additional <tag> and <tag_value> used for matching PCI
+# devices.
+# Supported <tag>: "physical_network".
+#
+# The address key supports traditional glob style and regular
+# expression
+# syntax. Valid examples are:
+#
+# passthrough_whitelist = {"devname":"eth0",
+# "physical_network":"physnet"}
+# passthrough_whitelist = {"address":"*:0a:00.*"}
+# passthrough_whitelist = {"address":":0a:00.",
+# "physical_network":"physnet1"}
+# passthrough_whitelist = {"vendor_id":"1137",
+# "product_id":"0071"}
+# passthrough_whitelist = {"vendor_id":"1137",
+# "product_id":"0071",
+# "address": "0000:0a:00.1",
+# "physical_network":"physnet1"}
+# passthrough_whitelist = {"address":{"domain": ".*",
+# "bus": "02", "slot": "01",
+# "function": "[2-7]"},
+# "physical_network":"physnet1"}
+# passthrough_whitelist = {"address":{"domain": ".*",
+# "bus": "02", "slot":
+# "0[1-2]",
+# "function": ".*"},
+# "physical_network":"physnet1"}
+#
+# The following are invalid, as they specify mutually exclusive
+# options:
+#
+# passthrough_whitelist = {"devname":"eth0",
+# "physical_network":"physnet",
+# "address":"*:0a:00.*"}
+#
+# * A JSON list of JSON dictionaries corresponding to the above
+# format. For
+# example:
+#
+# passthrough_whitelist = [{"product_id":"0001",
+# "vendor_id":"8086"},
+# {"product_id":"0002",
+# "vendor_id":"8086"}]
+# (multi valued)
+# Deprecated group/name - [DEFAULT]/pci_passthrough_whitelist
+#passthrough_whitelist =
+{%- if compute.get('sriov', false) %}
+{%- for nic_name, sriov in compute.sriov.iteritems() %}
+passthrough_whitelist = {"devname":"{{ sriov.devname }}","physical_network":"{{ sriov.physical_network }}"}
+{%- endfor %}
+{%- endif %}
+
+[placement]
+
+#
+# From nova.conf
+#
+
+# DEPRECATED:
+# Region name of this node. This is used when picking the URL in the
+# service
+# catalog.
+#
+# Possible values:
+#
+# * Any string representing region name
+# (string value)
+# This option is deprecated for removal since 17.0.0.
+# Its value may be silently ignored in the future.
+# Reason: Endpoint lookup uses the service catalog via common
+# keystoneauth1 Adapter configuration options. Use the region_name
+# option instead.
+os_region_name = {{ compute.identity.region }}
+
+# DEPRECATED:
+# Endpoint interface for this node. This is used when picking the URL
+# in the
+# service catalog.
+# (string value)
+# This option is deprecated for removal since 17.0.0.
+# Its value may be silently ignored in the future.
+# Reason: Endpoint lookup uses the service catalog via common
+# keystoneauth1 Adapter configuration options. Use the
+# valid_interfaces option instead.
+#os_interface = <None>
+
+#
+# If True, when limiting allocation candidate results, the results
+# will be
+# a random sampling of the full result set. If False, allocation
+# candidates
+# are returned in a deterministic but undefined order. That is, all
+# things
+# being equal, two requests for allocation candidates will return the
+# same
+# results in the same order; but no guarantees are made as to how that
+# order
+# is determined.
+# (boolean value)
+#randomize_allocation_candidates = false
+
+# PEM encoded Certificate Authority to use when verifying HTTPs
+# connections. (string value)
+#cafile = <None>
+{%- if compute.identity.get('protocol', 'http') == 'https' %}
+cafile={{ compute.identity.get('cacert_file', compute.cacert_file) }}
+{%- endif %}
+
+# PEM encoded client certificate cert file (string value)
+#certfile = <None>
+
+# PEM encoded client certificate key file (string value)
+#keyfile = <None>
+
+# Verify HTTPS connections. (boolean value)
+#insecure = false
+
+# Timeout value for http requests (integer value)
+#timeout = <None>
+
+# Authentication type to load (string value)
+# Deprecated group/name - [placement]/auth_plugin
+auth_type = password
+
+# Config Section from which to load plugin specific options (string
+# value)
+#auth_section = <None>
+
+# Authentication URL (string value)
+#auth_url = <None>
+auth_url={{ compute.identity.get('protocol', 'http') }}://{{ compute.identity.host }}:35357/v3
+
+# Scope for system operations (string value)
+#system_scope = <None>
+
+# Domain ID to scope to (string value)
+#domain_id = <None>
+
+# Domain name to scope to (string value)
+#domain_name = <None>
+
+# Project ID to scope to (string value)
+#project_id = <None>
+
+# Project name to scope to (string value)
+project_name = {{ compute.identity.tenant }}
+
+# Domain ID containing project (string value)
+project_domain_id = {{ compute.identity.get('domain', 'default') }}
+
+# Domain name containing project (string value)
+#project_domain_name = <None>
+
+# Trust ID (string value)
+#trust_id = <None>
+
+# Optional domain ID to use with v3 and v2 parameters. It will be used
+# for both the user and project domain in v3 and ignored in v2
+# authentication. (string value)
+#default_domain_id = <None>
+
+# Optional domain name to use with v3 API and v2 parameters. It will
+# be used for both the user and project domain in v3 and ignored in v2
+# authentication. (string value)
+#default_domain_name = <None>
+
+# User ID (string value)
+#user_id = <None>
+
+# Username (string value)
+# Deprecated group/name - [placement]/user_name
+username = {{ compute.identity.user }}
+
+# User's domain id (string value)
+user_domain_id = {{ compute.identity.get('domain', 'default') }}
+
+# User's domain name (string value)
+#user_domain_name = <None>
+
+# User's password (string value)
+password = {{ compute.identity.password }}
+
+# Tenant ID (string value)
+#tenant_id = <None>
+
+# Tenant Name (string value)
+#tenant_name = <None>
+
+# The default service_type for endpoint URL discovery. (string value)
+#service_type = placement
+
+# The default service_name for endpoint URL discovery. (string value)
+#service_name = <None>
+
+# List of interfaces, in order of preference, for endpoint URL. (list
+# value)
+# Deprecated group/name - [placement]/os_interface
+valid_interfaces = internal
+
+# The default region_name for endpoint URL discovery. (string value)
+# Deprecated group/name - [placement]/os_region_name
+#region_name = <None>
+
+# Always use this endpoint URL for requests for this client. NOTE: The
+# unversioned endpoint should be specified here; to request a
+# particular API version, use the `version`, `min-version`, and/or
+# `max-version` options. (string value)
+#endpoint_override = <None>
+
+
+[quota]
+#
+# Quota options allow to manage quotas in openstack deployment.
+
+#
+# From nova.conf
+#
+
+#
+# The number of instances allowed per project.
+#
+# Possible Values
+#
+# * A positive integer or 0.
+# * -1 to disable the quota.
+# (integer value)
+# Minimum value: -1
+# Deprecated group/name - [DEFAULT]/quota_instances
+#instances = 10
+
+#
+# The number of instance cores or vCPUs allowed per project.
+#
+# Possible values:
+#
+# * A positive integer or 0.
+# * -1 to disable the quota.
+# (integer value)
+# Minimum value: -1
+# Deprecated group/name - [DEFAULT]/quota_cores
+#cores = 20
+
+#
+# The number of megabytes of instance RAM allowed per project.
+#
+# Possible values:
+#
+# * A positive integer or 0.
+# * -1 to disable the quota.
+# (integer value)
+# Minimum value: -1
+# Deprecated group/name - [DEFAULT]/quota_ram
+#ram = 51200
+
+# DEPRECATED:
+# The number of floating IPs allowed per project.
+#
+# Floating IPs are not allocated to instances by default. Users need
+# to select
+# them from the pool configured by the OpenStack administrator to
+# attach to their
+# instances.
+#
+# Possible values:
+#
+# * A positive integer or 0.
+# * -1 to disable the quota.
+# (integer value)
+# Minimum value: -1
+# Deprecated group/name - [DEFAULT]/quota_floating_ips
+# This option is deprecated for removal since 15.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# nova-network is deprecated, as are any related configuration
+# options.
+#floating_ips = 10
+
+# DEPRECATED:
+# The number of fixed IPs allowed per project.
+#
+# Unlike floating IPs, fixed IPs are allocated dynamically by the
+# network
+# component when instances boot up. This quota value should be at
+# least the
+# number of instances allowed
+#
+# Possible values:
+#
+# * A positive integer or 0.
+# * -1 to disable the quota.
+# (integer value)
+# Minimum value: -1
+# Deprecated group/name - [DEFAULT]/quota_fixed_ips
+# This option is deprecated for removal since 15.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# nova-network is deprecated, as are any related configuration
+# options.
+#fixed_ips = -1
+
+#
+# The number of metadata items allowed per instance.
+#
+# Users can associate metadata with an instance during instance
+# creation. This
+# metadata takes the form of key-value pairs.
+#
+# Possible values:
+#
+# * A positive integer or 0.
+# * -1 to disable the quota.
+# (integer value)
+# Minimum value: -1
+# Deprecated group/name - [DEFAULT]/quota_metadata_items
+#metadata_items = 128
+
+#
+# The number of injected files allowed.
+#
+# File injection allows users to customize the personality of an
+# instance by
+# injecting data into it upon boot. Only text file injection is
+# permitted: binary
+# or ZIP files are not accepted. During file injection, any existing
+# files that
+# match specified files are renamed to include ``.bak`` extension
+# appended with a
+# timestamp.
+#
+# Possible values:
+#
+# * A positive integer or 0.
+# * -1 to disable the quota.
+# (integer value)
+# Minimum value: -1
+# Deprecated group/name - [DEFAULT]/quota_injected_files
+#injected_files = 5
+
+#
+# The number of bytes allowed per injected file.
+#
+# Possible values:
+#
+# * A positive integer or 0.
+# * -1 to disable the quota.
+# (integer value)
+# Minimum value: -1
+# Deprecated group/name - [DEFAULT]/quota_injected_file_content_bytes
+#injected_file_content_bytes = 10240
+
+#
+# The maximum allowed injected file path length.
+#
+# Possible values:
+#
+# * A positive integer or 0.
+# * -1 to disable the quota.
+# (integer value)
+# Minimum value: -1
+# Deprecated group/name - [DEFAULT]/quota_injected_file_path_length
+#injected_file_path_length = 255
+
+# DEPRECATED:
+# The number of security groups per project.
+#
+# Possible values:
+#
+# * A positive integer or 0.
+# * -1 to disable the quota.
+# (integer value)
+# Minimum value: -1
+# Deprecated group/name - [DEFAULT]/quota_security_groups
+# This option is deprecated for removal since 15.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# nova-network is deprecated, as are any related configuration
+# options.
+#security_groups = 10
+
+# DEPRECATED:
+# The number of security rules per security group.
+#
+# The associated rules in each security group control the traffic to
+# instances in
+# the group.
+#
+# Possible values:
+#
+# * A positive integer or 0.
+# * -1 to disable the quota.
+# (integer value)
+# Minimum value: -1
+# Deprecated group/name - [DEFAULT]/quota_security_group_rules
+# This option is deprecated for removal since 15.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# nova-network is deprecated, as are any related configuration
+# options.
+#security_group_rules = 20
+
+#
+# The maximum number of key pairs allowed per user.
+#
+# Users can create at least one key pair for each project and use the
+# key pair
+# for multiple instances that belong to that project.
+#
+# Possible values:
+#
+# * A positive integer or 0.
+# * -1 to disable the quota.
+# (integer value)
+# Minimum value: -1
+# Deprecated group/name - [DEFAULT]/quota_key_pairs
+#key_pairs = 100
+
+#
+# The maxiumum number of server groups per project.
+#
+# Server groups are used to control the affinity and anti-affinity
+# scheduling
+# policy for a group of servers or instances. Reducing the quota will
+# not affect
+# any existing group, but new servers will not be allowed into groups
+# that have
+# become over quota.
+#
+# Possible values:
+#
+# * A positive integer or 0.
+# * -1 to disable the quota.
+# (integer value)
+# Minimum value: -1
+# Deprecated group/name - [DEFAULT]/quota_server_groups
+#server_groups = 10
+
+#
+# The maximum number of servers per server group.
+#
+# Possible values:
+#
+# * A positive integer or 0.
+# * -1 to disable the quota.
+# (integer value)
+# Minimum value: -1
+# Deprecated group/name - [DEFAULT]/quota_server_group_members
+#server_group_members = 10
+
+#
+# The number of seconds until a reservation expires.
+#
+# This quota represents the time period for invalidating quota
+# reservations.
+# (integer value)
+#reservation_expire = 86400
+
+#
+# The count of reservations until usage is refreshed.
+#
+# This defaults to 0 (off) to avoid additional load but it is useful
+# to turn on
+# to help keep quota usage up-to-date and reduce the impact of out of
+# sync usage
+# issues.
+# (integer value)
+# Minimum value: 0
+#until_refresh = 0
+
+#
+# The number of seconds between subsequent usage refreshes.
+#
+# This defaults to 0 (off) to avoid additional load but it is useful
+# to turn on
+# to help keep quota usage up-to-date and reduce the impact of out of
+# sync usage
+# issues. Note that quotas are not updated on a periodic task, they
+# will update
+# on a new reservation if max_age has passed since the last
+# reservation.
+# (integer value)
+# Minimum value: 0
+#max_age = 0
+
+# DEPRECATED:
+# The quota enforcer driver.
+#
+# Provides abstraction for quota checks. Users can configure a
+# specific
+# driver to use for quota checks.
+#
+# Possible values:
+#
+# * nova.quota.DbQuotaDriver (default) or any string representing
+# fully
+# qualified class name.
+# (string value)
+# Deprecated group/name - [DEFAULT]/quota_driver
+# This option is deprecated for removal since 14.0.0.
+# Its value may be silently ignored in the future.
+#driver = nova.quota.DbQuotaDriver
+
+#
+# Recheck quota after resource creation to prevent allowing quota to
+# be exceeded.
+#
+# This defaults to True (recheck quota after resource creation) but
+# can be set to
+# False to avoid additional load if allowing quota to be exceeded
+# because of
+# racing requests is considered acceptable. For example, when set to
+# False, if a
+# user makes highly parallel REST API requests to create servers, it
+# will be
+# possible for them to create more servers than their allowed quota
+# during the
+# race. If their quota is 10 servers, they might be able to create 50
+# during the
+# burst. After the burst, they will not be able to create any more
+# servers but
+# they will be able to keep their 50 servers until they delete them.
+#
+# The initial quota check is done before resources are created, so if
+# multiple
+# parallel requests arrive at the same time, all could pass the quota
+# check and
+# create resources, potentially exceeding quota. When recheck_quota is
+# True,
+# quota will be checked a second time after resources have been
+# created and if
+# the resource is over quota, it will be deleted and OverQuota will be
+# raised,
+# usually resulting in a 403 response to the REST API user. This makes
+# it
+# impossible for a user to exceed their quota with the caveat that it
+# will,
+# however, be possible for a REST API user to be rejected with a 403
+# response in
+# the event of a collision close to reaching their quota limit, even
+# if the user
+# has enough quota available when they made the request.
+# (boolean value)
+#recheck_quota = true
+
+
+[rdp]
+#
+# Options under this group enable and configure Remote Desktop
+# Protocol (
+# RDP) related features.
+#
+# This group is only relevant to Hyper-V users.
+
+#
+# From nova.conf
+#
+
+#
+# Enable Remote Desktop Protocol (RDP) related features.
+#
+# Hyper-V, unlike the majority of the hypervisors employed on Nova
+# compute
+# nodes, uses RDP instead of VNC and SPICE as a desktop sharing
+# protocol to
+# provide instance console access. This option enables RDP for
+# graphical
+# console access for virtual machines created by Hyper-V.
+#
+# **Note:** RDP should only be enabled on compute nodes that support
+# the Hyper-V
+# virtualization platform.
+#
+# Related options:
+#
+# * ``compute_driver``: Must be hyperv.
+#
+# (boolean value)
+#enabled = false
+
+#
+# The URL an end user would use to connect to the RDP HTML5 console
+# proxy.
+# The console proxy service is called with this token-embedded URL and
+# establishes the connection to the proper instance.
+#
+# An RDP HTML5 console proxy service will need to be configured to
+# listen on the
+# address configured here. Typically the console proxy service would
+# be run on a
+# controller node. The localhost address used as default would only
+# work in a
+# single node environment i.e. devstack.
+#
+# An RDP HTML5 proxy allows a user to access via the web the text or
+# graphical
+# console of any Windows server or workstation using RDP. RDP HTML5
+# console
+# proxy services include FreeRDP, wsgate.
+# See https://github.com/FreeRDP/FreeRDP-WebConnect
+#
+# Possible values:
+#
+# * <scheme>://<ip-address>:<port-number>/
+#
+# The scheme must be identical to the scheme configured for the RDP
+# HTML5
+# console proxy service. It is ``http`` or ``https``.
+#
+# The IP address must be identical to the address on which the RDP
+# HTML5
+# console proxy service is listening.
+#
+# The port must be identical to the port on which the RDP HTML5
+# console proxy
+# service is listening.
+#
+# Related options:
+#
+# * ``rdp.enabled``: Must be set to ``True`` for
+# ``html5_proxy_base_url`` to be
+# effective.
+# (uri value)
+#html5_proxy_base_url = http://127.0.0.1:6083/
+
+
+[remote_debug]
+
+#
+# From nova.conf
+#
+
+#
+# Debug host (IP or name) to connect to. This command line parameter
+# is used when
+# you want to connect to a nova service via a debugger running on a
+# different
+# host.
+#
+# Note that using the remote debug option changes how Nova uses the
+# eventlet
+# library to support async IO. This could result in failures that do
+# not occur
+# under normal operation. Use at your own risk.
+#
+# Possible Values:
+#
+# * IP address of a remote host as a command line parameter
+# to a nova service. For Example:
+#
+# /usr/local/bin/nova-compute --config-file /etc/nova/nova.conf
+# --remote_debug-host <IP address where the debugger is running>
+# (unknown value)
+#host = <None>
+
+#
+# Debug port to connect to. This command line parameter allows you to
+# specify
+# the port you want to use to connect to a nova service via a debugger
+# running
+# on different host.
+#
+# Note that using the remote debug option changes how Nova uses the
+# eventlet
+# library to support async IO. This could result in failures that do
+# not occur
+# under normal operation. Use at your own risk.
+#
+# Possible Values:
+#
+# * Port number you want to use as a command line parameter
+# to a nova service. For Example:
+#
+# /usr/local/bin/nova-compute --config-file /etc/nova/nova.conf
+# --remote_debug-host <IP address where the debugger is running>
+# --remote_debug-port <port> it's listening on>.
+# (port value)
+# Minimum value: 0
+# Maximum value: 65535
+#port = <None>
+
+
+[scheduler]
+
+#
+# From nova.conf
+#
+
+#
+# The scheduler host manager to use.
+#
+# The host manager manages the in-memory picture of the hosts that the
+# scheduler
+# uses. The options values are chosen from the entry points under the
+# namespace
+# 'nova.scheduler.host_manager' in 'setup.cfg'.
+#
+# NOTE: The "ironic_host_manager" option is deprecated as of the
+# 17.0.0 Queens
+# release.
+# (string value)
+# Possible values:
+# host_manager - <No description provided>
+# ironic_host_manager - <No description provided>
+# Deprecated group/name - [DEFAULT]/scheduler_host_manager
+#host_manager = host_manager
+
+#
+# The class of the driver used by the scheduler. This should be chosen
+# from one
+# of the entrypoints under the namespace 'nova.scheduler.driver' of
+# file
+# 'setup.cfg'. If nothing is specified in this option, the
+# 'filter_scheduler' is
+# used.
+#
+# Other options are:
+#
+# * 'caching_scheduler' which aggressively caches the system state for
+# better
+# individual scheduler performance at the risk of more retries when
+# running
+# multiple schedulers. [DEPRECATED]
+# * 'chance_scheduler' which simply picks a host at random.
+# [DEPRECATED]
+# * 'fake_scheduler' which is used for testing.
+#
+# Possible values:
+#
+# * Any of the drivers included in Nova:
+# ** filter_scheduler
+# ** caching_scheduler
+# ** chance_scheduler
+# ** fake_scheduler
+# * You may also set this to the entry point name of a custom
+# scheduler driver,
+# but you will be responsible for creating and maintaining it in
+# your setup.cfg
+# file.
+# (string value)
+# Deprecated group/name - [DEFAULT]/scheduler_driver
+#driver = filter_scheduler
+
+#
+# Periodic task interval.
+#
+# This value controls how often (in seconds) to run periodic tasks in
+# the
+# scheduler. The specific tasks that are run for each period are
+# determined by
+# the particular scheduler being used.
+#
+# If this is larger than the nova-service 'service_down_time' setting,
+# Nova may
+# report the scheduler service as down. This is because the scheduler
+# driver is
+# responsible for sending a heartbeat and it will only do that as
+# often as this
+# option allows. As each scheduler can work a little differently than
+# the others,
+# be sure to test this with your selected scheduler.
+#
+# Possible values:
+#
+# * An integer, where the integer corresponds to periodic task
+# interval in
+# seconds. 0 uses the default interval (60 seconds). A negative
+# value disables
+# periodic tasks.
+#
+# Related options:
+#
+# * ``nova-service service_down_time``
+# (integer value)
+# Deprecated group/name - [DEFAULT]/scheduler_driver_task_period
+#periodic_task_interval = 60
+
+#
+# This is the maximum number of attempts that will be made for a given
+# instance
+# build/move operation. It limits the number of alternate hosts
+# returned by the
+# scheduler. When that list of hosts is exhausted, a
+# MaxRetriesExceeded
+# exception is raised and the instance is set to an error state.
+#
+# Possible values:
+#
+# * A positive integer, where the integer corresponds to the max
+# number of
+# attempts that can be made when building or moving an instance.
+# (integer value)
+# Minimum value: 1
+# Deprecated group/name - [DEFAULT]/scheduler_max_attempts
+#max_attempts = 3
+
+#
+# Periodic task interval.
+#
+# This value controls how often (in seconds) the scheduler should
+# attempt
+# to discover new hosts that have been added to cells. If negative
+# (the
+# default), no automatic discovery will occur.
+#
+# Deployments where compute nodes come and go frequently may want this
+# enabled, where others may prefer to manually discover hosts when one
+# is added to avoid any overhead from constantly checking. If enabled,
+# every time this runs, we will select any unmapped hosts out of each
+# cell database on every run.
+# (integer value)
+# Minimum value: -1
+#discover_hosts_in_cells_interval = -1
+
+#
+# This setting determines the maximum limit on results received from
+# the
+# placement service during a scheduling operation. It effectively
+# limits
+# the number of hosts that may be considered for scheduling requests
+# that
+# match a large number of candidates.
+#
+# A value of 1 (the minimum) will effectively defer scheduling to the
+# placement
+# service strictly on "will it fit" grounds. A higher value will put
+# an upper
+# cap on the number of results the scheduler will consider during the
+# filtering
+# and weighing process. Large deployments may need to set this lower
+# than the
+# total number of hosts available to limit memory consumption, network
+# traffic,
+# etc. of the scheduler.
+#
+# This option is only used by the FilterScheduler; if you use a
+# different
+# scheduler, this option has no effect.
+# (integer value)
+# Minimum value: 1
+#max_placement_results = 1000
+
+
+[serial_console]
+#
+# The serial console feature allows you to connect to a guest in case
+# a
+# graphical console like VNC, RDP or SPICE is not available. This is
+# only
+# currently supported for the libvirt, Ironic and hyper-v drivers.
+
+#
+# From nova.conf
+#
+
+#
+# Enable the serial console feature.
+#
+# In order to use this feature, the service ``nova-serialproxy`` needs
+# to run.
+# This service is typically executed on the controller node.
+# (boolean value)
+#enabled = false
+
+#
+# A range of TCP ports a guest can use for its backend.
+#
+# Each instance which gets created will use one port out of this
+# range. If the
+# range is not big enough to provide another port for an new instance,
+# this
+# instance won't get launched.
+#
+# Possible values:
+#
+# * Each string which passes the regex ``\d+:\d+`` For example
+# ``10000:20000``.
+# Be sure that the first port number is lower than the second port
+# number
+# and that both are in range from 0 to 65535.
+# (string value)
+#port_range = 10000:20000
+
+#
+# The URL an end user would use to connect to the ``nova-serialproxy``
+# service.
+#
+# The ``nova-serialproxy`` service is called with this token enriched
+# URL
+# and establishes the connection to the proper instance.
+#
+# Related options:
+#
+# * The IP address must be identical to the address to which the
+# ``nova-serialproxy`` service is listening (see option
+# ``serialproxy_host``
+# in this section).
+# * The port must be the same as in the option ``serialproxy_port`` of
+# this
+# section.
+# * If you choose to use a secured websocket connection, then start
+# this option
+# with ``wss://`` instead of the unsecured ``ws://``. The options
+# ``cert``
+# and ``key`` in the ``[DEFAULT]`` section have to be set for that.
+# (uri value)
+#base_url = ws://127.0.0.1:6083/
+
+#
+# The IP address to which proxy clients (like ``nova-serialproxy``)
+# should
+# connect to get the serial console of an instance.
+#
+# This is typically the IP address of the host of a ``nova-compute``
+# service.
+# (string value)
+#proxyclient_address = 127.0.0.1
+
+#
+# The IP address which is used by the ``nova-serialproxy`` service to
+# listen
+# for incoming requests.
+#
+# The ``nova-serialproxy`` service listens on this IP address for
+# incoming
+# connection requests to instances which expose serial console.
+#
+# Related options:
+#
+# * Ensure that this is the same IP address which is defined in the
+# option
+# ``base_url`` of this section or use ``0.0.0.0`` to listen on all
+# addresses.
+# (string value)
+#serialproxy_host = 0.0.0.0
+
+#
+# The port number which is used by the ``nova-serialproxy`` service to
+# listen
+# for incoming requests.
+#
+# The ``nova-serialproxy`` service listens on this port number for
+# incoming
+# connection requests to instances which expose serial console.
+#
+# Related options:
+#
+# * Ensure that this is the same port number which is defined in the
+# option
+# ``base_url`` of this section.
+# (port value)
+# Minimum value: 0
+# Maximum value: 65535
+#serialproxy_port = 6083
+
+
+[service_user]
+#
+# Configuration options for service to service authentication using a
+# service
+# token. These options allow sending a service token along with the
+# user's token
+# when contacting external REST APIs.
+
+#
+# From nova.conf
+#
+
+#
+# When True, if sending a user token to a REST API, also send a
+# service token.
+#
+# Nova often reuses the user token provided to the nova-api to talk to
+# other REST
+# APIs, such as Cinder, Glance and Neutron. It is possible that while
+# the user
+# token was valid when the request was made to Nova, the token may
+# expire before
+# it reaches the other service. To avoid any failures, and to make it
+# clear it is
+# Nova calling the service on the user's behalf, we include a service
+# token along
+# with the user token. Should the user's token have expired, a valid
+# service
+# token ensures the REST API request will still be accepted by the
+# keystone
+# middleware.
+# (boolean value)
+#send_service_user_token = false
+
+# PEM encoded Certificate Authority to use when verifying HTTPs
+# connections. (string value)
+#cafile = <None>
+
+# PEM encoded client certificate cert file (string value)
+#certfile = <None>
+
+# PEM encoded client certificate key file (string value)
+#keyfile = <None>
+
+# Verify HTTPS connections. (boolean value)
+#insecure = false
+
+# Timeout value for http requests (integer value)
+#timeout = <None>
+
+# Authentication type to load (string value)
+# Deprecated group/name - [service_user]/auth_plugin
+#auth_type = <None>
+
+# Config Section from which to load plugin specific options (string
+# value)
+#auth_section = <None>
+
+# Authentication URL (string value)
+#auth_url = <None>
+
+# Scope for system operations (string value)
+#system_scope = <None>
+
+# Domain ID to scope to (string value)
+#domain_id = <None>
+
+# Domain name to scope to (string value)
+#domain_name = <None>
+
+# Project ID to scope to (string value)
+#project_id = <None>
+
+# Project name to scope to (string value)
+#project_name = <None>
+
+# Domain ID containing project (string value)
+#project_domain_id = <None>
+
+# Domain name containing project (string value)
+#project_domain_name = <None>
+
+# Trust ID (string value)
+#trust_id = <None>
+
+# Optional domain ID to use with v3 and v2 parameters. It will be used
+# for both the user and project domain in v3 and ignored in v2
+# authentication. (string value)
+#default_domain_id = <None>
+
+# Optional domain name to use with v3 API and v2 parameters. It will
+# be used for both the user and project domain in v3 and ignored in v2
+# authentication. (string value)
+#default_domain_name = <None>
+
+# User ID (string value)
+#user_id = <None>
+
+# Username (string value)
+# Deprecated group/name - [service_user]/user_name
+#username = <None>
+
+# User's domain id (string value)
+#user_domain_id = <None>
+
+# User's domain name (string value)
+#user_domain_name = <None>
+
+# User's password (string value)
+#password = <None>
+
+# Tenant ID (string value)
+#tenant_id = <None>
+
+# Tenant Name (string value)
+#tenant_name = <None>
+
+
+[spice]
+#
+# SPICE console feature allows you to connect to a guest virtual
+# machine.
+# SPICE is a replacement for fairly limited VNC protocol.
+#
+# Following requirements must be met in order to use SPICE:
+#
+# * Virtualization driver must be libvirt
+# * spice.enabled set to True
+# * vnc.enabled set to False
+# * update html5proxy_base_url
+# * update server_proxyclient_address
+
+#
+# From nova.conf
+#
+
+#
+# Enable SPICE related features.
+#
+# Related options:
+#
+# * VNC must be explicitly disabled to get access to the SPICE
+# console. Set the
+# enabled option to False in the [vnc] section to disable the VNC
+# console.
+# (boolean value)
+#enabled = false
+enabled = false
+#
+# Enable the SPICE guest agent support on the instances.
+#
+# The Spice agent works with the Spice protocol to offer a better
+# guest console
+# experience. However, the Spice console can still be used without the
+# Spice
+# Agent. With the Spice agent installed the following features are
+# enabled:
+#
+# * Copy & Paste of text and images between the guest and client
+# machine
+# * Automatic adjustment of resolution when the client screen changes
+# - e.g.
+# if you make the Spice console full screen the guest resolution
+# will adjust to
+# match it rather than letterboxing.
+# * Better mouse integration - The mouse can be captured and released
+# without
+# needing to click inside the console or press keys to release it.
+# The
+# performance of mouse movement is also improved.
+# (boolean value)
+#agent_enabled = true
+
+#
+# Location of the SPICE HTML5 console proxy.
+#
+# End user would use this URL to connect to the `nova-
+# spicehtml5proxy``
+# service. This service will forward request to the console of an
+# instance.
+#
+# In order to use SPICE console, the service ``nova-spicehtml5proxy``
+# should be
+# running. This service is typically launched on the controller node.
+#
+# Possible values:
+#
+# * Must be a valid URL of the form:
+# ``http://host:port/spice_auto.html``
+# where host is the node running ``nova-spicehtml5proxy`` and the
+# port is
+# typically 6082. Consider not using default value as it is not well
+# defined
+# for any real deployment.
+#
+# Related options:
+#
+# * This option depends on ``html5proxy_host`` and ``html5proxy_port``
+# options.
+# The access URL returned by the compute node must have the host
+# and port where the ``nova-spicehtml5proxy`` service is listening.
+# (uri value)
+#html5proxy_base_url = http://127.0.0.1:6082/spice_auto.html
+{%- if compute.vncproxy_url is defined %}
+html5proxy_base_url = {{ compute.vncproxy_url }}/spice_auto.html
+{%- endif %}
+
+#
+# The address where the SPICE server running on the instances should
+# listen.
+#
+# Typically, the ``nova-spicehtml5proxy`` proxy client runs on the
+# controller
+# node and connects over the private network to this address on the
+# compute
+# node(s).
+#
+# Possible values:
+#
+# * IP address to listen on.
+# (string value)
+#server_listen = 127.0.0.1
+
+#
+# The address used by ``nova-spicehtml5proxy`` client to connect to
+# instance
+# console.
+#
+# Typically, the ``nova-spicehtml5proxy`` proxy client runs on the
+# controller node and connects over the private network to this
+# address on the
+# compute node(s).
+#
+# Possible values:
+#
+# * Any valid IP address on the compute node.
+#
+# Related options:
+#
+# * This option depends on the ``server_listen`` option.
+# The proxy client must be able to access the address specified in
+# ``server_listen`` using the value of this option.
+# (string value)
+#server_proxyclient_address = 127.0.0.1
+
+#
+# A keyboard layout which is supported by the underlying hypervisor on
+# this
+# node.
+#
+# Possible values:
+# * This is usually an 'IETF language tag' (default is 'en-us'). If
+# you
+# use QEMU as hypervisor, you should find the list of supported
+# keyboard
+# layouts at /usr/share/qemu/keymaps.
+# (string value)
+#keymap = en-us
+
+#
+# IP address or a hostname on which the ``nova-spicehtml5proxy``
+# service
+# listens for incoming requests.
+#
+# Related options:
+#
+# * This option depends on the ``html5proxy_base_url`` option.
+# The ``nova-spicehtml5proxy`` service must be listening on a host
+# that is
+# accessible from the HTML5 client.
+# (unknown value)
+#html5proxy_host = 0.0.0.0
+
+#
+# Port on which the ``nova-spicehtml5proxy`` service listens for
+# incoming
+# requests.
+#
+# Related options:
+#
+# * This option depends on the ``html5proxy_base_url`` option.
+# The ``nova-spicehtml5proxy`` service must be listening on a port
+# that is
+# accessible from the HTML5 client.
+# (port value)
+# Minimum value: 0
+# Maximum value: 65535
+#html5proxy_port = 6082
+
+
+[upgrade_levels]
+
+{%- if compute.upgrade_levels is defined %}
+{%- for key, value in compute.upgrade_levels.iteritems() %}
+{{ key }}={{ value }}
+{%- endfor %}
+{%- endif %}
+#
+# upgrade_levels options are used to set version cap for RPC
+# messages sent between different nova services.
+#
+# By default all services send messages using the latest version
+# they know about.
+#
+# The compute upgrade level is an important part of rolling upgrades
+# where old and new nova-compute services run side by side.
+#
+# The other options can largely be ignored, and are only kept to
+# help with a possible future backport issue.
+
+#
+# From nova.conf
+#
+
+#
+# Compute RPC API version cap.
+#
+# By default, we always send messages using the most recent version
+# the client knows about.
+#
+# Where you have old and new compute services running, you should set
+# this to the lowest deployed version. This is to guarantee that all
+# services never send messages that one of the compute nodes can't
+# understand. Note that we only support upgrading from release N to
+# release N+1.
+#
+# Set this option to "auto" if you want to let the compute RPC module
+# automatically determine what version to use based on the service
+# versions in the deployment.
+#
+# Possible values:
+#
+# * By default send the latest version the client knows about
+# * 'auto': Automatically determines what version to use based on
+# the service versions in the deployment.
+# * A string representing a version number in the format 'N.N';
+# for example, possible values might be '1.12' or '2.0'.
+# * An OpenStack release name, in lower case, such as 'mitaka' or
+# 'liberty'.
+# (string value)
+#compute = <None>
+
+# Cells RPC API version cap (string value)
+#cells = <None>
+
+# Intercell RPC API version cap (string value)
+#intercell = <None>
+
+# Cert RPC API version cap (string value)
+#cert = <None>
+
+# Scheduler RPC API version cap (string value)
+#scheduler = <None>
+
+# Conductor RPC API version cap (string value)
+#conductor = <None>
+
+# Console RPC API version cap (string value)
+#console = <None>
+
+# Consoleauth RPC API version cap (string value)
+#consoleauth = <None>
+
+# Network RPC API version cap (string value)
+#network = <None>
+
+# Base API RPC API version cap (string value)
+#baseapi = <None>
+
+
+[vault]
+
+#
+# From nova.conf
+#
+
+# root token for vault (string value)
+#root_token_id = <None>
+
+# Use this endpoint to connect to Vault, for example:
+# "http://127.0.0.1:8200" (string value)
+#vault_url = http://127.0.0.1:8200
+
+# Absolute path to ca cert file (string value)
+#ssl_ca_crt_file = <None>
+
+# SSL Enabled/Disabled (boolean value)
+#use_ssl = false
+
+
+[vendordata_dynamic_auth]
+#
+# Options within this group control the authentication of the
+# vendordata
+# subsystem of the metadata API server (and config drive) with
+# external systems.
+
+#
+# From nova.conf
+#
+
+# PEM encoded Certificate Authority to use when verifying HTTPs
+# connections. (string value)
+#cafile = <None>
+
+# PEM encoded client certificate cert file (string value)
+#certfile = <None>
+
+# PEM encoded client certificate key file (string value)
+#keyfile = <None>
+
+# Verify HTTPS connections. (boolean value)
+#insecure = false
+
+# Timeout value for http requests (integer value)
+#timeout = <None>
+
+# Authentication type to load (string value)
+# Deprecated group/name - [vendordata_dynamic_auth]/auth_plugin
+#auth_type = <None>
+
+# Config Section from which to load plugin specific options (string
+# value)
+#auth_section = <None>
+
+# Authentication URL (string value)
+#auth_url = <None>
+
+# Scope for system operations (string value)
+#system_scope = <None>
+
+# Domain ID to scope to (string value)
+#domain_id = <None>
+
+# Domain name to scope to (string value)
+#domain_name = <None>
+
+# Project ID to scope to (string value)
+#project_id = <None>
+
+# Project name to scope to (string value)
+#project_name = <None>
+
+# Domain ID containing project (string value)
+#project_domain_id = <None>
+
+# Domain name containing project (string value)
+#project_domain_name = <None>
+
+# Trust ID (string value)
+#trust_id = <None>
+
+# Optional domain ID to use with v3 and v2 parameters. It will be used
+# for both the user and project domain in v3 and ignored in v2
+# authentication. (string value)
+#default_domain_id = <None>
+
+# Optional domain name to use with v3 API and v2 parameters. It will
+# be used for both the user and project domain in v3 and ignored in v2
+# authentication. (string value)
+#default_domain_name = <None>
+
+# User ID (string value)
+#user_id = <None>
+
+# Username (string value)
+# Deprecated group/name - [vendordata_dynamic_auth]/user_name
+#username = <None>
+
+# User's domain id (string value)
+#user_domain_id = <None>
+
+# User's domain name (string value)
+#user_domain_name = <None>
+
+# User's password (string value)
+#password = <None>
+
+# Tenant ID (string value)
+#tenant_id = <None>
+
+# Tenant Name (string value)
+#tenant_name = <None>
+
+
+[vmware]
+#
+# Related options:
+# Following options must be set in order to launch VMware-based
+# virtual machines.
+#
+# * compute_driver: Must use vmwareapi.VMwareVCDriver.
+# * vmware.host_username
+# * vmware.host_password
+# * vmware.cluster_name
+
+#
+# From nova.conf
+#
+
+#
+# This option specifies the physical ethernet adapter name for VLAN
+# networking.
+#
+# Set the vlan_interface configuration option to match the ESX host
+# interface that handles VLAN-tagged VM traffic.
+#
+# Possible values:
+#
+# * Any valid string representing VLAN interface name
+# (string value)
+#vlan_interface = vmnic0
+
+#
+# This option should be configured only when using the NSX-MH Neutron
+# plugin. This is the name of the integration bridge on the ESXi
+# server
+# or host. This should not be set for any other Neutron plugin. Hence
+# the default value is not set.
+#
+# Possible values:
+#
+# * Any valid string representing the name of the integration bridge
+# (string value)
+#integration_bridge = <None>
+
+#
+# Set this value if affected by an increased network latency causing
+# repeated characters when typing in a remote console.
+# (integer value)
+# Minimum value: 0
+#console_delay_seconds = <None>
+
+#
+# Identifies the remote system where the serial port traffic will
+# be sent.
+#
+# This option adds a virtual serial port which sends console output to
+# a configurable service URI. At the service URI address there will be
+# virtual serial port concentrator that will collect console logs.
+# If this is not set, no serial ports will be added to the created
+# VMs.
+#
+# Possible values:
+#
+# * Any valid URI
+# (string value)
+#serial_port_service_uri = <None>
+
+#
+# Identifies a proxy service that provides network access to the
+# serial_port_service_uri.
+#
+# Possible values:
+#
+# * Any valid URI (The scheme is 'telnet' or 'telnets'.)
+#
+# Related options:
+# This option is ignored if serial_port_service_uri is not specified.
+# * serial_port_service_uri
+# (uri value)
+#serial_port_proxy_uri = <None>
+
+#
+# Specifies the directory where the Virtual Serial Port Concentrator
+# is
+# storing console log files. It should match the 'serial_log_dir'
+# config
+# value of VSPC.
+# (string value)
+#serial_log_dir = /opt/vmware/vspc
+
+#
+# Hostname or IP address for connection to VMware vCenter host.
+# (unknown value)
+#host_ip = <None>
+
+# Port for connection to VMware vCenter host. (port value)
+# Minimum value: 0
+# Maximum value: 65535
+#host_port = 443
+
+# Username for connection to VMware vCenter host. (string value)
+#host_username = <None>
+
+# Password for connection to VMware vCenter host. (string value)
+#host_password = <None>
+
+#
+# Specifies the CA bundle file to be used in verifying the vCenter
+# server certificate.
+# (string value)
+#ca_file = <None>
+
+#
+# If true, the vCenter server certificate is not verified. If false,
+# then the default CA truststore is used for verification.
+#
+# Related options:
+# * ca_file: This option is ignored if "ca_file" is set.
+# (boolean value)
+#insecure = false
+
+# Name of a VMware Cluster ComputeResource. (string value)
+#cluster_name = <None>
+
+#
+# Regular expression pattern to match the name of datastore.
+#
+# The datastore_regex setting specifies the datastores to use with
+# Compute. For example, datastore_regex="nas.*" selects all the data
+# stores that have a name starting with "nas".
+#
+# NOTE: If no regex is given, it just picks the datastore with the
+# most freespace.
+#
+# Possible values:
+#
+# * Any matching regular expression to a datastore must be given
+# (string value)
+#datastore_regex = <None>
+
+#
+# Time interval in seconds to poll remote tasks invoked on
+# VMware VC server.
+# (floating point value)
+#task_poll_interval = 0.5
+
+#
+# Number of times VMware vCenter server API must be retried on
+# connection
+# failures, e.g. socket error, etc.
+# (integer value)
+# Minimum value: 0
+#api_retry_count = 10
+
+#
+# This option specifies VNC starting port.
+#
+# Every VM created by ESX host has an option of enabling VNC client
+# for remote connection. Above option 'vnc_port' helps you to set
+# default starting port for the VNC client.
+#
+# Possible values:
+#
+# * Any valid port number within 5900 -(5900 + vnc_port_total)
+#
+# Related options:
+# Below options should be set to enable VNC client.
+# * vnc.enabled = True
+# * vnc_port_total
+# (port value)
+# Minimum value: 0
+# Maximum value: 65535
+#vnc_port = 5900
+
+#
+# Total number of VNC ports.
+# (integer value)
+# Minimum value: 0
+#vnc_port_total = 10000
+
+#
+# This option enables/disables the use of linked clone.
+#
+# The ESX hypervisor requires a copy of the VMDK file in order to boot
+# up a virtual machine. The compute driver must download the VMDK via
+# HTTP from the OpenStack Image service to a datastore that is visible
+# to the hypervisor and cache it. Subsequent virtual machines that
+# need
+# the VMDK use the cached version and don't have to copy the file
+# again
+# from the OpenStack Image service.
+#
+# If set to false, even with a cached VMDK, there is still a copy
+# operation from the cache location to the hypervisor file directory
+# in the shared datastore. If set to true, the above copy operation
+# is avoided as it creates copy of the virtual machine that shares
+# virtual disks with its parent VM.
+# (boolean value)
+#use_linked_clone = true
+
+#
+# This option sets the http connection pool size
+#
+# The connection pool size is the maximum number of connections from
+# nova to
+# vSphere. It should only be increased if there are warnings
+# indicating that
+# the connection pool is full, otherwise, the default should suffice.
+# (integer value)
+# Minimum value: 10
+#connection_pool_size = 10
+
+#
+# This option enables or disables storage policy based placement
+# of instances.
+#
+# Related options:
+#
+# * pbm_default_policy
+# (boolean value)
+#pbm_enabled = false
+
+#
+# This option specifies the PBM service WSDL file location URL.
+#
+# Setting this will disable storage policy based placement
+# of instances.
+#
+# Possible values:
+#
+# * Any valid file path
+# e.g file:///opt/SDK/spbm/wsdl/pbmService.wsdl
+# (string value)
+#pbm_wsdl_location = <None>
+
+#
+# This option specifies the default policy to be used.
+#
+# If pbm_enabled is set and there is no defined storage policy for the
+# specific request, then this policy will be used.
+#
+# Possible values:
+#
+# * Any valid storage policy such as VSAN default storage policy
+#
+# Related options:
+#
+# * pbm_enabled
+# (string value)
+#pbm_default_policy = <None>
+
+#
+# This option specifies the limit on the maximum number of objects to
+# return in a single result.
+#
+# A positive value will cause the operation to suspend the retrieval
+# when the count of objects reaches the specified limit. The server
+# may
+# still limit the count to something less than the configured value.
+# Any remaining objects may be retrieved with additional requests.
+# (integer value)
+# Minimum value: 0
+#maximum_objects = 100
+
+#
+# This option adds a prefix to the folder where cached images are
+# stored
+#
+# This is not the full path - just a folder prefix. This should only
+# be
+# used when a datastore cache is shared between compute nodes.
+#
+# Note: This should only be used when the compute nodes are running on
+# same
+# host or they have a shared file system.
+#
+# Possible values:
+#
+# * Any string representing the cache prefix to the folder
+# (string value)
+#cache_prefix = <None>
+
+
+[vnc]
+#
+# Virtual Network Computer (VNC) can be used to provide remote desktop
+# console access to instances for tenants and/or administrators.
+
+#
+# From nova.conf
+#
+
+#
+# Enable VNC related features.
+#
+# Guests will get created with graphical devices to support this.
+# Clients
+# (for example Horizon) can then establish a VNC connection to the
+# guest.
+# (boolean value)
+# Deprecated group/name - [DEFAULT]/vnc_enabled
+enabled = true
+
+{%- if compute.vncproxy_url is defined %}
+novncproxy_base_url={{ compute.vncproxy_url }}/vnc_auto.html
+{%- endif %}
+{%- if compute.bind is defined and compute.bind.vnc_port is defined %}
+novncproxy_port={{ compute.bind.vnc_port }}
+{%- endif %}
+{%- if compute.bind is defined %}
+{%- if compute.bind.vnc_address is defined %}
+vncserver_listen={{ compute.bind.vnc_address }}
+vncserver_proxyclient_address={{ compute.bind.vnc_address }}
+{%- else %}
+vncserver_listen=0.0.0.0
+{%- endif %}
+{%- endif %}
+
+#
+# Keymap for VNC.
+#
+# The keyboard mapping (keymap) determines which keyboard layout a VNC
+# session should use by default.
+#
+# Possible values:
+#
+# * A keyboard layout which is supported by the underlying hypervisor
+# on
+# this node. This is usually an 'IETF language tag' (for example
+# 'en-us'). If you use QEMU as hypervisor, you should find the
+# list
+# of supported keyboard layouts at ``/usr/share/qemu/keymaps``.
+# (string value)
+# Deprecated group/name - [DEFAULT]/vnc_keymap
+keymap = {{ compute.get('vnc_keymap', 'en-us') }}
+
+#
+# The IP address or hostname on which an instance should listen to for
+# incoming VNC connection requests on this node.
+# (unknown value)
+# Deprecated group/name - [DEFAULT]/vncserver_listen
+# Deprecated group/name - [vnc]/vncserver_listen
+#server_listen = 127.0.0.1
+
+#
+# Private, internal IP address or hostname of VNC console proxy.
+#
+# The VNC proxy is an OpenStack component that enables compute service
+# users to access their instances through VNC clients.
+#
+# This option sets the private address to which proxy clients, such as
+# ``nova-xvpvncproxy``, should connect to.
+# (unknown value)
+# Deprecated group/name - [DEFAULT]/vncserver_proxyclient_address
+# Deprecated group/name - [vnc]/vncserver_proxyclient_address
+#server_proxyclient_address = 127.0.0.1
+
+#
+# Public address of noVNC VNC console proxy.
+#
+# The VNC proxy is an OpenStack component that enables compute service
+# users to access their instances through VNC clients. noVNC provides
+# VNC support through a websocket-based client.
+#
+# This option sets the public base URL to which client systems will
+# connect. noVNC clients can use this address to connect to the noVNC
+# instance and, by extension, the VNC sessions.
+#
+# Related options:
+#
+# * novncproxy_host
+# * novncproxy_port
+# (uri value)
+#novncproxy_base_url = http://127.0.0.1:6080/vnc_auto.html
+
+#
+# IP address or hostname that the XVP VNC console proxy should bind
+# to.
+#
+# The VNC proxy is an OpenStack component that enables compute service
+# users to access their instances through VNC clients. Xen provides
+# the Xenserver VNC Proxy, or XVP, as an alternative to the
+# websocket-based noVNC proxy used by Libvirt. In contrast to noVNC,
+# XVP clients are Java-based.
+#
+# This option sets the private address to which the XVP VNC console
+# proxy
+# service should bind to.
+#
+# Related options:
+#
+# * xvpvncproxy_port
+# * xvpvncproxy_base_url
+# (unknown value)
+#xvpvncproxy_host = 0.0.0.0
+
+#
+# Port that the XVP VNC console proxy should bind to.
+#
+# The VNC proxy is an OpenStack component that enables compute service
+# users to access their instances through VNC clients. Xen provides
+# the Xenserver VNC Proxy, or XVP, as an alternative to the
+# websocket-based noVNC proxy used by Libvirt. In contrast to noVNC,
+# XVP clients are Java-based.
+#
+# This option sets the private port to which the XVP VNC console proxy
+# service should bind to.
+#
+# Related options:
+#
+# * xvpvncproxy_host
+# * xvpvncproxy_base_url
+# (port value)
+# Minimum value: 0
+# Maximum value: 65535
+#xvpvncproxy_port = 6081
+
+#
+# Public URL address of XVP VNC console proxy.
+#
+# The VNC proxy is an OpenStack component that enables compute service
+# users to access their instances through VNC clients. Xen provides
+# the Xenserver VNC Proxy, or XVP, as an alternative to the
+# websocket-based noVNC proxy used by Libvirt. In contrast to noVNC,
+# XVP clients are Java-based.
+#
+# This option sets the public base URL to which client systems will
+# connect. XVP clients can use this address to connect to the XVP
+# instance and, by extension, the VNC sessions.
+#
+# Related options:
+#
+# * xvpvncproxy_host
+# * xvpvncproxy_port
+# (uri value)
+#xvpvncproxy_base_url = http://127.0.0.1:6081/console
+
+#
+# IP address that the noVNC console proxy should bind to.
+#
+# The VNC proxy is an OpenStack component that enables compute service
+# users to access their instances through VNC clients. noVNC provides
+# VNC support through a websocket-based client.
+#
+# This option sets the private address to which the noVNC console
+# proxy
+# service should bind to.
+#
+# Related options:
+#
+# * novncproxy_port
+# * novncproxy_base_url
+# (string value)
+#novncproxy_host = 0.0.0.0
+
+#
+# Port that the noVNC console proxy should bind to.
+#
+# The VNC proxy is an OpenStack component that enables compute service
+# users to access their instances through VNC clients. noVNC provides
+# VNC support through a websocket-based client.
+#
+# This option sets the private port to which the noVNC console proxy
+# service should bind to.
+#
+# Related options:
+#
+# * novncproxy_host
+# * novncproxy_base_url
+# (port value)
+# Minimum value: 0
+# Maximum value: 65535
+#novncproxy_port = 6080
+
+#
+# The authentication schemes to use with the compute node.
+#
+# Control what RFB authentication schemes are permitted for
+# connections between
+# the proxy and the compute host. If multiple schemes are enabled, the
+# first
+# matching scheme will be used, thus the strongest schemes should be
+# listed
+# first.
+#
+# Possible values:
+#
+# * ``none``: allow connection without authentication
+# * ``vencrypt``: use VeNCrypt authentication scheme
+#
+# Related options:
+#
+# * ``[vnc]vencrypt_client_key``, ``[vnc]vencrypt_client_cert``: must
+# also be set
+# (list value)
+#auth_schemes = none
+
+# The path to the client certificate PEM file (for x509)
+#
+# The fully qualified path to a PEM file containing the private key
+# which the VNC
+# proxy server presents to the compute node during VNC authentication.
+#
+# Related options:
+#
+# * ``vnc.auth_schemes``: must include ``vencrypt``
+# * ``vnc.vencrypt_client_cert``: must also be set
+# (string value)
+#vencrypt_client_key = <None>
+
+# The path to the client key file (for x509)
+#
+# The fully qualified path to a PEM file containing the x509
+# certificate which
+# the VNC proxy server presents to the compute node during VNC
+# authentication.
+#
+# Realted options:
+#
+# * ``vnc.auth_schemes``: must include ``vencrypt``
+# * ``vnc.vencrypt_client_key``: must also be set
+# (string value)
+#vencrypt_client_cert = <None>
+
+# The path to the CA certificate PEM file
+#
+# The fully qualified path to a PEM file containing one or more x509
+# certificates
+# for the certificate authorities used by the compute node VNC server.
+#
+# Related options:
+#
+# * ``vnc.auth_schemes``: must include ``vencrypt``
+# (string value)
+#vencrypt_ca_certs = <None>
+
+
+[workarounds]
+#
+# A collection of workarounds used to mitigate bugs or issues found in
+# system
+# tools (e.g. Libvirt or QEMU) or Nova itself under certain
+# conditions. These
+# should only be enabled in exceptional circumstances. All options are
+# linked
+# against bug IDs, where more information on the issue can be found.
+
+#
+# From nova.conf
+#
+
+#
+# Use sudo instead of rootwrap.
+#
+# Allow fallback to sudo for performance reasons.
+#
+# For more information, refer to the bug report:
+#
+# https://bugs.launchpad.net/nova/+bug/1415106
+#
+# Possible values:
+#
+# * True: Use sudo instead of rootwrap
+# * False: Use rootwrap as usual
+#
+# Interdependencies to other options:
+#
+# * Any options that affect 'rootwrap' will be ignored.
+# (boolean value)
+#disable_rootwrap = false
+
+#
+# Disable live snapshots when using the libvirt driver.
+#
+# Live snapshots allow the snapshot of the disk to happen without an
+# interruption to the guest, using coordination with a guest agent to
+# quiesce the filesystem.
+#
+# When using libvirt 1.2.2 live snapshots fail intermittently under
+# load
+# (likely related to concurrent libvirt/qemu operations). This config
+# option provides a mechanism to disable live snapshot, in favor of
+# cold
+# snapshot, while this is resolved. Cold snapshot causes an instance
+# outage while the guest is going through the snapshotting process.
+#
+# For more information, refer to the bug report:
+#
+# https://bugs.launchpad.net/nova/+bug/1334398
+#
+# Possible values:
+#
+# * True: Live snapshot is disabled when using libvirt
+# * False: Live snapshots are always used when snapshotting (as long
+# as
+# there is a new enough libvirt and the backend storage supports it)
+# (boolean value)
+#disable_libvirt_livesnapshot = false
+disable_libvirt_livesnapshot = {{ compute.get('workaround', {}).get('disable_libvirt_livesnapshot', True)|lower }}
+
+#
+# Enable handling of events emitted from compute drivers.
+#
+# Many compute drivers emit lifecycle events, which are events that
+# occur when,
+# for example, an instance is starting or stopping. If the instance is
+# going
+# through task state changes due to an API operation, like resize, the
+# events
+# are ignored.
+#
+# This is an advanced feature which allows the hypervisor to signal to
+# the
+# compute service that an unexpected state change has occurred in an
+# instance
+# and that the instance can be shutdown automatically. Unfortunately,
+# this can
+# race in some conditions, for example in reboot operations or when
+# the compute
+# service or when host is rebooted (planned or due to an outage). If
+# such races
+# are common, then it is advisable to disable this feature.
+#
+# Care should be taken when this feature is disabled and
+# 'sync_power_state_interval' is set to a negative value. In this
+# case, any
+# instances that get out of sync between the hypervisor and the Nova
+# database
+# will have to be synchronized manually.
+#
+# For more information, refer to the bug report:
+#
+# https://bugs.launchpad.net/bugs/1444630
+#
+# Interdependencies to other options:
+#
+# * If ``sync_power_state_interval`` is negative and this feature is
+# disabled,
+# then instances that get out of sync between the hypervisor and the
+# Nova
+# database will have to be synchronized manually.
+# (boolean value)
+#handle_virt_lifecycle_events = true
+
+#
+# Disable the server group policy check upcall in compute.
+#
+# In order to detect races with server group affinity policy, the
+# compute
+# service attempts to validate that the policy was not violated by the
+# scheduler. It does this by making an upcall to the API database to
+# list
+# the instances in the server group for one that it is booting, which
+# violates
+# our api/cell isolation goals. Eventually this will be solved by
+# proper affinity
+# guarantees in the scheduler and placement service, but until then,
+# this late
+# check is needed to ensure proper affinity policy.
+#
+# Operators that desire api/cell isolation over this check should
+# enable this flag, which will avoid making that upcall from compute.
+#
+# Related options:
+#
+# * [filter_scheduler]/track_instance_changes also relies on upcalls
+# from the
+# compute service to the scheduler service.
+# (boolean value)
+#disable_group_policy_check_upcall = false
+
+
+[wsgi]
+#
+# Options under this group are used to configure WSGI (Web Server
+# Gateway
+# Interface). WSGI is used to serve API requests.
+
+#
+# From nova.conf
+#
+
+#
+# This option represents a file name for the paste.deploy config for
+# nova-api.
+#
+# Possible values:
+#
+# * A string representing file name for the paste.deploy config.
+# (string value)
+api_paste_config = /etc/nova/api-paste.ini
+
+# DEPRECATED:
+# It represents a python format string that is used as the template to
+# generate
+# log lines. The following values can be formatted into it: client_ip,
+# date_time, request_line, status_code, body_length, wall_seconds.
+#
+# This option is used for building custom request loglines when
+# running
+# nova-api under eventlet. If used under uwsgi or apache, this option
+# has no effect.
+#
+# Possible values:
+#
+# * '%(client_ip)s "%(request_line)s" status: %(status_code)s'
+# 'len: %(body_length)s time: %(wall_seconds).7f' (default)
+# * Any formatted string formed by specific values.
+# (string value)
+# This option is deprecated for removal since 16.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# This option only works when running nova-api under eventlet, and
+# encodes very eventlet specific pieces of information. Starting in
+# Pike
+# the preferred model for running nova-api is under uwsgi or apache
+# mod_wsgi.
+#wsgi_log_format = %(client_ip)s "%(request_line)s" status: %(status_code)s len: %(body_length)s time: %(wall_seconds).7f
+
+#
+# This option specifies the HTTP header used to determine the protocol
+# scheme
+# for the original request, even if it was removed by a SSL
+# terminating proxy.
+#
+# Possible values:
+#
+# * None (default) - the request scheme is not influenced by any HTTP
+# headers
+# * Valid HTTP header, like HTTP_X_FORWARDED_PROTO
+#
+# WARNING: Do not set this unless you know what you are doing.
+#
+# Make sure ALL of the following are true before setting this
+# (assuming the
+# values from the example above):
+# * Your API is behind a proxy.
+# * Your proxy strips the X-Forwarded-Proto header from all incoming
+# requests.
+# In other words, if end users include that header in their
+# requests, the proxy
+# will discard it.
+# * Your proxy sets the X-Forwarded-Proto header and sends it to API,
+# but only
+# for requests that originally come in via HTTPS.
+#
+# If any of those are not true, you should keep this setting set to
+# None.
+#
+# (string value)
+#secure_proxy_ssl_header = <None>
+
+#
+# This option allows setting path to the CA certificate file that
+# should be used
+# to verify connecting clients.
+#
+# Possible values:
+#
+# * String representing path to the CA certificate file.
+#
+# Related options:
+#
+# * enabled_ssl_apis
+# (string value)
+#ssl_ca_file = <None>
+
+#
+# This option allows setting path to the SSL certificate of API
+# server.
+#
+# Possible values:
+#
+# * String representing path to the SSL certificate.
+#
+# Related options:
+#
+# * enabled_ssl_apis
+# (string value)
+#ssl_cert_file = <None>
+
+#
+# This option specifies the path to the file where SSL private key of
+# API
+# server is stored when SSL is in effect.
+#
+# Possible values:
+#
+# * String representing path to the SSL private key.
+#
+# Related options:
+#
+# * enabled_ssl_apis
+# (string value)
+#ssl_key_file = <None>
+
+#
+# This option sets the value of TCP_KEEPIDLE in seconds for each
+# server socket.
+# It specifies the duration of time to keep connection active. TCP
+# generates a
+# KEEPALIVE transmission for an application that requests to keep
+# connection
+# active. Not supported on OS X.
+#
+# Related options:
+#
+# * keep_alive
+# (integer value)
+# Minimum value: 0
+#tcp_keepidle = 600
+
+#
+# This option specifies the size of the pool of greenthreads used by
+# wsgi.
+# It is possible to limit the number of concurrent connections using
+# this
+# option.
+# (integer value)
+# Minimum value: 0
+# Deprecated group/name - [DEFAULT]/wsgi_default_pool_size
+#default_pool_size = 1000
+
+#
+# This option specifies the maximum line size of message headers to be
+# accepted.
+# max_header_line may need to be increased when using large tokens
+# (typically
+# those generated by the Keystone v3 API with big service catalogs).
+#
+# Since TCP is a stream based protocol, in order to reuse a
+# connection, the HTTP
+# has to have a way to indicate the end of the previous response and
+# beginning
+# of the next. Hence, in a keep_alive case, all messages must have a
+# self-defined message length.
+# (integer value)
+# Minimum value: 0
+#max_header_line = 16384
+
+#
+# This option allows using the same TCP connection to send and receive
+# multiple
+# HTTP requests/responses, as opposed to opening a new one for every
+# single
+# request/response pair. HTTP keep-alive indicates HTTP connection
+# reuse.
+#
+# Possible values:
+#
+# * True : reuse HTTP connection.
+# * False : closes the client socket connection explicitly.
+#
+# Related options:
+#
+# * tcp_keepidle
+# (boolean value)
+# Deprecated group/name - [DEFAULT]/wsgi_keep_alive
+#keep_alive = true
+
+#
+# This option specifies the timeout for client connections' socket
+# operations.
+# If an incoming connection is idle for this number of seconds it will
+# be
+# closed. It indicates timeout on individual read/writes on the socket
+# connection. To wait forever set to 0.
+# (integer value)
+# Minimum value: 0
+#client_socket_timeout = 900
+
+
+[xenserver]
+#
+# XenServer options are used when the compute_driver is set to use
+# XenServer (compute_driver=xenapi.XenAPIDriver).
+#
+# Must specify connection_url, connection_password and
+# ovs_integration_bridge to
+# use compute_driver=xenapi.XenAPIDriver.
+
+#
+# From nova.conf
+#
+
+#
+# Number of seconds to wait for agent's reply to a request.
+#
+# Nova configures/performs certain administrative actions on a server
+# with the
+# help of an agent that's installed on the server. The communication
+# between
+# Nova and the agent is achieved via sharing messages, called records,
+# over
+# xenstore, a shared storage across all the domains on a Xenserver
+# host.
+# Operations performed by the agent on behalf of nova are: 'version','
+# key_init',
+# 'password','resetnetwork','inject_file', and 'agentupdate'.
+#
+# To perform one of the above operations, the xapi 'agent' plugin
+# writes the
+# command and its associated parameters to a certain location known to
+# the domain
+# and awaits response. On being notified of the message, the agent
+# performs
+# appropriate actions on the server and writes the result back to
+# xenstore. This
+# result is then read by the xapi 'agent' plugin to determine the
+# success/failure
+# of the operation.
+#
+# This config option determines how long the xapi 'agent' plugin shall
+# wait to
+# read the response off of xenstore for a given request/command. If
+# the agent on
+# the instance fails to write the result in this time period, the
+# operation is
+# considered to have timed out.
+#
+# Related options:
+#
+# * ``agent_version_timeout``
+# * ``agent_resetnetwork_timeout``
+#
+# (integer value)
+# Minimum value: 0
+#agent_timeout = 30
+
+#
+# Number of seconds to wait for agent't reply to version request.
+#
+# This indicates the amount of time xapi 'agent' plugin waits for the
+# agent to
+# respond to the 'version' request specifically. The generic timeout
+# for agent
+# communication ``agent_timeout`` is ignored in this case.
+#
+# During the build process the 'version' request is used to determine
+# if the
+# agent is available/operational to perform other requests such as
+# 'resetnetwork', 'password', 'key_init' and 'inject_file'. If the
+# 'version' call
+# fails, the other configuration is skipped. So, this configuration
+# option can
+# also be interpreted as time in which agent is expected to be fully
+# operational.
+# (integer value)
+# Minimum value: 0
+#agent_version_timeout = 300
+
+#
+# Number of seconds to wait for agent's reply to resetnetwork
+# request.
+#
+# This indicates the amount of time xapi 'agent' plugin waits for the
+# agent to
+# respond to the 'resetnetwork' request specifically. The generic
+# timeout for
+# agent communication ``agent_timeout`` is ignored in this case.
+# (integer value)
+# Minimum value: 0
+#agent_resetnetwork_timeout = 60
+
+#
+# Path to locate guest agent on the server.
+#
+# Specifies the path in which the XenAPI guest agent should be
+# located. If the
+# agent is present, network configuration is not injected into the
+# image.
+#
+# Related options:
+#
+# For this option to have an effect:
+# * ``flat_injected`` should be set to ``True``
+# * ``compute_driver`` should be set to ``xenapi.XenAPIDriver``
+#
+# (string value)
+#agent_path = usr/sbin/xe-update-networking
+
+#
+# Disables the use of XenAPI agent.
+#
+# This configuration option suggests whether the use of agent should
+# be enabled
+# or not regardless of what image properties are present. Image
+# properties have
+# an effect only when this is set to ``True``. Read description of
+# config option
+# ``use_agent_default`` for more information.
+#
+# Related options:
+#
+# * ``use_agent_default``
+#
+# (boolean value)
+#disable_agent = false
+
+#
+# Whether or not to use the agent by default when its usage is enabled
+# but not
+# indicated by the image.
+#
+# The use of XenAPI agent can be disabled altogether using the
+# configuration
+# option ``disable_agent``. However, if it is not disabled, the use of
+# an agent
+# can still be controlled by the image in use through one of its
+# properties,
+# ``xenapi_use_agent``. If this property is either not present or
+# specified
+# incorrectly on the image, the use of agent is determined by this
+# configuration
+# option.
+#
+# Note that if this configuration is set to ``True`` when the agent is
+# not
+# present, the boot times will increase significantly.
+#
+# Related options:
+#
+# * ``disable_agent``
+#
+# (boolean value)
+#use_agent_default = false
+
+# Timeout in seconds for XenAPI login. (integer value)
+# Minimum value: 0
+#login_timeout = 10
+
+#
+# Maximum number of concurrent XenAPI connections.
+#
+# In nova, multiple XenAPI requests can happen at a time.
+# Configuring this option will parallelize access to the XenAPI
+# session, which allows you to make concurrent XenAPI connections.
+# (integer value)
+# Minimum value: 1
+#connection_concurrent = 5
+
+#
+# Cache glance images locally.
+#
+# The value for this option must be chosen from the choices listed
+# here. Configuring a value other than these will default to 'all'.
+#
+# Note: There is nothing that deletes these images.
+#
+# Possible values:
+#
+# * `all`: will cache all images.
+# * `some`: will only cache images that have the
+# image_property `cache_in_nova=True`.
+# * `none`: turns off caching entirely.
+# (string value)
+# Possible values:
+# all - <No description provided>
+# some - <No description provided>
+# none - <No description provided>
+#cache_images = all
+
+#
+# Compression level for images.
+#
+# By setting this option we can configure the gzip compression level.
+# This option sets GZIP environment variable before spawning tar -cz
+# to force the compression level. It defaults to none, which means the
+# GZIP environment variable is not set and the default (usually -6)
+# is used.
+#
+# Possible values:
+#
+# * Range is 1-9, e.g., 9 for gzip -9, 9 being most
+# compressed but most CPU intensive on dom0.
+# * Any values out of this range will default to None.
+# (integer value)
+# Minimum value: 1
+# Maximum value: 9
+#image_compression_level = <None>
+
+# Default OS type used when uploading an image to glance (string
+# value)
+#default_os_type = linux
+
+# Time in secs to wait for a block device to be created (integer
+# value)
+# Minimum value: 1
+#block_device_creation_timeout = 10
+{%- if compute.block_device_creation_timeout is defined %}
+block_device_creation_timeout = {{ compute.block_device_creation_timeout }}
+{%- endif %}
+
+#
+# Maximum size in bytes of kernel or ramdisk images.
+#
+# Specifying the maximum size of kernel or ramdisk will avoid copying
+# large files to dom0 and fill up /boot/guest.
+# (integer value)
+#max_kernel_ramdisk_size = 16777216
+
+#
+# Filter for finding the SR to be used to install guest instances on.
+#
+# Possible values:
+#
+# * To use the Local Storage in default XenServer/XCP installations
+# set this flag to other-config:i18n-key=local-storage.
+# * To select an SR with a different matching criteria, you could
+# set it to other-config:my_favorite_sr=true.
+# * To fall back on the Default SR, as displayed by XenCenter,
+# set this flag to: default-sr:true.
+# (string value)
+#sr_matching_filter = default-sr:true
+
+#
+# Whether to use sparse_copy for copying data on a resize down.
+# (False will use standard dd). This speeds up resizes down
+# considerably since large runs of zeros won't have to be rsynced.
+# (boolean value)
+#sparse_copy = true
+
+#
+# Maximum number of retries to unplug VBD.
+# If set to 0, should try once, no retries.
+# (integer value)
+# Minimum value: 0
+#num_vbd_unplug_retries = 10
+
+#
+# Name of network to use for booting iPXE ISOs.
+#
+# An iPXE ISO is a specially crafted ISO which supports iPXE booting.
+# This feature gives a means to roll your own image.
+#
+# By default this option is not set. Enable this option to
+# boot an iPXE ISO.
+#
+# Related Options:
+#
+# * `ipxe_boot_menu_url`
+# * `ipxe_mkisofs_cmd`
+# (string value)
+#ipxe_network_name = <None>
+
+#
+# URL to the iPXE boot menu.
+#
+# An iPXE ISO is a specially crafted ISO which supports iPXE booting.
+# This feature gives a means to roll your own image.
+#
+# By default this option is not set. Enable this option to
+# boot an iPXE ISO.
+#
+# Related Options:
+#
+# * `ipxe_network_name`
+# * `ipxe_mkisofs_cmd`
+# (string value)
+#ipxe_boot_menu_url = <None>
+
+#
+# Name and optionally path of the tool used for ISO image creation.
+#
+# An iPXE ISO is a specially crafted ISO which supports iPXE booting.
+# This feature gives a means to roll your own image.
+#
+# Note: By default `mkisofs` is not present in the Dom0, so the
+# package can either be manually added to Dom0 or include the
+# `mkisofs` binary in the image itself.
+#
+# Related Options:
+#
+# * `ipxe_network_name`
+# * `ipxe_boot_menu_url`
+# (string value)
+#ipxe_mkisofs_cmd = mkisofs
+
+#
+# URL for connection to XenServer/Xen Cloud Platform. A special value
+# of unix://local can be used to connect to the local unix socket.
+#
+# Possible values:
+#
+# * Any string that represents a URL. The connection_url is
+# generally the management network IP address of the XenServer.
+# * This option must be set if you chose the XenServer driver.
+# (string value)
+#connection_url = <None>
+
+# Username for connection to XenServer/Xen Cloud Platform (string
+# value)
+#connection_username = root
+
+# Password for connection to XenServer/Xen Cloud Platform (string
+# value)
+#connection_password = <None>
+
+#
+# The interval used for polling of coalescing vhds.
+#
+# This is the interval after which the task of coalesce VHD is
+# performed, until it reaches the max attempts that is set by
+# vhd_coalesce_max_attempts.
+#
+# Related options:
+#
+# * `vhd_coalesce_max_attempts`
+# (floating point value)
+# Minimum value: 0
+#vhd_coalesce_poll_interval = 5.0
+
+#
+# Ensure compute service is running on host XenAPI connects to.
+# This option must be set to false if the 'independent_compute'
+# option is set to true.
+#
+# Possible values:
+#
+# * Setting this option to true will make sure that compute service
+# is running on the same host that is specified by connection_url.
+# * Setting this option to false, doesn't perform the check.
+#
+# Related options:
+#
+# * `independent_compute`
+# (boolean value)
+#check_host = true
+
+#
+# Max number of times to poll for VHD to coalesce.
+#
+# This option determines the maximum number of attempts that can be
+# made for coalescing the VHD before giving up.
+#
+# Related opitons:
+#
+# * `vhd_coalesce_poll_interval`
+# (integer value)
+# Minimum value: 0
+#vhd_coalesce_max_attempts = 20
+
+# Base path to the storage repository on the XenServer host. (string
+# value)
+#sr_base_path = /var/run/sr-mount
+
+#
+# The iSCSI Target Host.
+#
+# This option represents the hostname or ip of the iSCSI Target.
+# If the target host is not present in the connection information from
+# the volume provider then the value from this option is taken.
+#
+# Possible values:
+#
+# * Any string that represents hostname/ip of Target.
+# (unknown value)
+#target_host = <None>
+
+#
+# The iSCSI Target Port.
+#
+# This option represents the port of the iSCSI Target. If the
+# target port is not present in the connection information from the
+# volume provider then the value from this option is taken.
+# (port value)
+# Minimum value: 0
+# Maximum value: 65535
+#target_port = 3260
+
+#
+# Used to prevent attempts to attach VBDs locally, so Nova can
+# be run in a VM on a different host.
+#
+# Related options:
+#
+# * ``CONF.flat_injected`` (Must be False)
+# * ``CONF.xenserver.check_host`` (Must be False)
+# * ``CONF.default_ephemeral_format`` (Must be unset or 'ext3')
+# * Joining host aggregates (will error if attempted)
+# * Swap disks for Windows VMs (will error if attempted)
+# * Nova-based auto_configure_disk (will error if attempted)
+# (boolean value)
+#independent_compute = false
+
+#
+# Wait time for instances to go to running state.
+#
+# Provide an integer value representing time in seconds to set the
+# wait time for an instance to go to running state.
+#
+# When a request to create an instance is received by nova-api and
+# communicated to nova-compute, the creation of the instance occurs
+# through interaction with Xen via XenAPI in the compute node. Once
+# the node on which the instance(s) are to be launched is decided by
+# nova-schedule and the launch is triggered, a certain amount of wait
+# time is involved until the instance(s) can become available and
+# 'running'. This wait time is defined by running_timeout. If the
+# instances do not go to running state within this specified wait
+# time, the launch expires and the instance(s) are set to 'error'
+# state.
+# (integer value)
+# Minimum value: 0
+#running_timeout = 60
+
+# DEPRECATED:
+# The XenAPI VIF driver using XenServer Network APIs.
+#
+# Provide a string value representing the VIF XenAPI vif driver to use
+# for
+# plugging virtual network interfaces.
+#
+# Xen configuration uses bridging within the backend domain to allow
+# all VMs to appear on the network as individual hosts. Bridge
+# interfaces are used to create a XenServer VLAN network in which
+# the VIFs for the VM instances are plugged. If no VIF bridge driver
+# is plugged, the bridge is not made available. This configuration
+# option takes in a value for the VIF driver.
+#
+# Possible values:
+#
+# * nova.virt.xenapi.vif.XenAPIOpenVswitchDriver (default)
+# * nova.virt.xenapi.vif.XenAPIBridgeDriver (deprecated)
+#
+# Related options:
+#
+# * ``vlan_interface``
+# * ``ovs_integration_bridge``
+# (string value)
+# This option is deprecated for removal since 15.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# There are only two in-tree vif drivers for XenServer.
+# XenAPIBridgeDriver is for
+# nova-network which is deprecated and XenAPIOpenVswitchDriver is for
+# Neutron
+# which is the default configuration for Nova since the 15.0.0 Ocata
+# release. In
+# the future the "use_neutron" configuration option will be used to
+# determine
+# which vif driver to use.
+#vif_driver = nova.virt.xenapi.vif.XenAPIOpenVswitchDriver
+
+#
+# Dom0 plugin driver used to handle image uploads.
+#
+# Provide a string value representing a plugin driver required to
+# handle the image uploading to GlanceStore.
+#
+# Images, and snapshots from XenServer need to be uploaded to the data
+# store for use. image_upload_handler takes in a value for the Dom0
+# plugin driver. This driver is then called to uplaod images to the
+# GlanceStore.
+# (string value)
+#image_upload_handler = nova.virt.xenapi.image.glance.GlanceStore
+
+#
+# Number of seconds to wait for SR to settle if the VDI
+# does not exist when first introduced.
+#
+# Some SRs, particularly iSCSI connections are slow to see the VDIs
+# right after they got introduced. Setting this option to a
+# time interval will make the SR to wait for that time period
+# before raising VDI not found exception.
+# (integer value)
+# Minimum value: 0
+#introduce_vdi_retry_wait = 20
+
+#
+# The name of the integration Bridge that is used with xenapi
+# when connecting with Open vSwitch.
+#
+# Note: The value of this config option is dependent on the
+# environment, therefore this configuration value must be set
+# accordingly if you are using XenAPI.
+#
+# Possible values:
+#
+# * Any string that represents a bridge name.
+# (string value)
+#ovs_integration_bridge = <None>
+
+#
+# When adding new host to a pool, this will append a --force flag to
+# the
+# command, forcing hosts to join a pool, even if they have different
+# CPUs.
+#
+# Since XenServer version 5.6 it is possible to create a pool of hosts
+# that have
+# different CPU capabilities. To accommodate CPU differences,
+# XenServer limited
+# features it uses to determine CPU compatibility to only the ones
+# that are
+# exposed by CPU and support for CPU masking was added.
+# Despite this effort to level differences between CPUs, it is still
+# possible
+# that adding new host will fail, thus option to force join was
+# introduced.
+# (boolean value)
+#use_join_force = true
+
+#
+# Publicly visible name for this console host.
+#
+# Possible values:
+#
+# * Current hostname (default) or any string representing hostname.
+# (string value)
+#console_public_hostname = <current_hostname>
+
+
+[xvp]
+#
+# Configuration options for XVP.
+#
+# xvp (Xen VNC Proxy) is a proxy server providing password-protected
+# VNC-based
+# access to the consoles of virtual machines hosted on Citrix
+# XenServer.
+
+#
+# From nova.conf
+#
+
+# XVP conf template (string value)
+#console_xvp_conf_template = $pybasedir/nova/console/xvp.conf.template
+
+# Generated XVP conf file (string value)
+#console_xvp_conf = /etc/xvp.conf
+
+# XVP master process pid file (string value)
+#console_xvp_pid = /var/run/xvp.pid
+
+# XVP log file (string value)
+#console_xvp_log = /var/log/xvp.log
+
+# Port for XVP to multiplex VNC connections on (port value)
+# Minimum value: 0
+# Maximum value: 65535
+#console_xvp_multiplex_port = 5900
+
+[matchmaker_redis]
+{#- include "oslo_templates/oslo/_matchmaker_redis.conf" #}
+
+[oslo_messaging_notifications]
+{%- set _data = compute.notification %}
+{%- include "oslo_templates/files/queens/oslo/messaging/_notifications.conf" %}
+
+{%- if copmute.message_queue is defined %}
+{%- set _data = compute.message_queue %}
+{%- if _data.engine == 'rabbitmq' %}
+ {%- set messaging_engine = 'rabbit' %}
+{%- else %}
+ {%- set messaging_engine = _data.engine %}
+{%- endif %}
+[oslo_messaging_{{ messaging_engine }}]
+{%- include "oslo_templates/files/queens/oslo/messaging/_" + messaging_engine + ".conf" %}
+{%- endif %}
+
+[oslo_policy]
+{%- if compute.policy is defined %}
+{%- set _data = compute.policy %}
+{%- include "oslo_templates/files/queens/oslo/_policy.conf" %}
+{%- endif %}
+
+[database]
+{%- set _data = compute.database %}
+{%- if _data.ssl is defined and 'cacert_file' not in _data.get('ssl', {}).keys() %}{% do _data['ssl'].update({'cacert_file': compute.cacert_file}) %}{% endif %}
+{%- include "oslo_templates/files/queens/oslo/_database.conf" %}
+
+[oslo_middleware]
+{%- set _data = compute %}
+{%- include "oslo_templates/files/queens/oslo/_middleware.conf" %}
+
+[keystone_authtoken]
+{%- set _data = compute.identity %}
+{%- set auth_type = _data.get('auth_type', 'password') %}
+{%- include "oslo_templates/files/queens/keystonemiddleware/_auth_token.conf" %}
+{%- include "oslo_templates/files/queens/keystoneauth/_type_" + auth_type + ".conf" %}
+
diff --git a/nova/files/queens/nova-compute.conf.RedHat b/nova/files/queens/nova-compute.conf.RedHat
new file mode 120000
index 0000000..ad4c8f6
--- /dev/null
+++ b/nova/files/queens/nova-compute.conf.RedHat
@@ -0,0 +1 @@
+nova-compute.conf.Debian
\ No newline at end of file
diff --git a/nova/files/queens/nova-controller.conf.Debian b/nova/files/queens/nova-controller.conf.Debian
new file mode 100644
index 0000000..480fec8
--- /dev/null
+++ b/nova/files/queens/nova-controller.conf.Debian
@@ -0,0 +1,11146 @@
+{%- from "nova/map.jinja" import controller with context %}
+[DEFAULT]
+
+#
+# From nova.conf
+#
+compute_manager=nova.controller.manager.ComputeManager
+network_device_mtu=65000
+use_neutron = True
+security_group_api=neutron
+image_service=nova.image.glance.GlanceImageService
+
+#
+# Availability zone for internal services.
+#
+# This option determines the availability zone for the various
+# internal nova
+# services, such as 'nova-scheduler', 'nova-conductor', etc.
+#
+# Possible values:
+#
+# * Any string representing an existing availability zone name.
+# (string value)
+#internal_service_availability_zone = internal
+
+#
+# Default availability zone for compute services.
+#
+# This option determines the default availability zone for 'nova-
+# compute'
+# services, which will be used if the service(s) do not belong to
+# aggregates with
+# availability zone metadata.
+#
+# Possible values:
+#
+# * Any string representing an existing availability zone name.
+# (string value)
+#default_availability_zone = nova
+
+#
+# Default availability zone for instances.
+#
+# This option determines the default availability zone for instances,
+# which will
+# be used when a user does not specify one when creating an instance.
+# The
+# instance(s) will be bound to this availability zone for their
+# lifetime.
+#
+# Possible values:
+#
+# * Any string representing an existing availability zone name.
+# * None, which means that the instance can move from one availability
+# zone to
+# another during its lifetime if it is moved from one compute node
+# to another.
+# (string value)
+#default_schedule_zone = <None>
+
+# Length of generated instance admin passwords. (integer value)
+# Minimum value: 0
+#password_length = 12
+
+#
+# Time period to generate instance usages for. It is possible to
+# define optional
+# offset to given period by appending @ character followed by a number
+# defining
+# offset.
+#
+# Possible values:
+#
+# * period, example: ``hour``, ``day``, ``month` or ``year``
+# * period with offset, example: ``month@15`` will result in monthly
+# audits
+# starting on 15th day of month.
+# (string value)
+#instance_usage_audit_period = month
+{% if pillar.ceilometer is defined %}
+instance_usage_audit = True
+instance_usage_audit_period = hour
+{%- endif %}
+
+#
+# Start and use a daemon that can run the commands that need to be run
+# with
+# root privileges. This option is usually enabled on nodes that run
+# nova compute
+# processes.
+# (boolean value)
+#use_rootwrap_daemon = false
+
+#
+# Path to the rootwrap configuration file.
+#
+# Goal of the root wrapper is to allow a service-specific unprivileged
+# user to
+# run a number of actions as the root user in the safest manner
+# possible.
+# The configuration file used here must match the one defined in the
+# sudoers
+# entry.
+# (string value)
+rootwrap_config = /etc/nova/rootwrap.conf
+
+# Explicitly specify the temporary working directory. (string value)
+#tempdir = <None>
+
+# DEPRECATED:
+# Determine if monkey patching should be applied.
+#
+# Related options:
+#
+# * ``monkey_patch_modules``: This must have values set for this
+# option to
+# have any effect
+# (boolean value)
+# This option is deprecated for removal since 17.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# Monkey patching nova is not tested, not supported, and is a barrier
+# for interoperability.
+#monkey_patch = false
+
+# DEPRECATED:
+# List of modules/decorators to monkey patch.
+#
+# This option allows you to patch a decorator for all functions in
+# specified
+# modules.
+#
+# Possible values:
+#
+# * nova.compute.api:nova.notifications.notify_decorator
+# * [...]
+#
+# Related options:
+#
+# * ``monkey_patch``: This must be set to ``True`` for this option to
+# have any effect
+# (list value)
+# This option is deprecated for removal since 17.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# Monkey patching nova is not tested, not supported, and is a barrier
+# for interoperability.
+#monkey_patch_modules = nova.compute.api:nova.notifications.notify_decorator
+
+#
+# Defines which driver to use for controlling virtualization.
+#
+# Possible values:
+#
+# * ``libvirt.LibvirtDriver``
+# * ``xenapi.XenAPIDriver``
+# * ``fake.FakeDriver``
+# * ``ironic.IronicDriver``
+# * ``vmwareapi.VMwareVCDriver``
+# * ``hyperv.HyperVDriver``
+# * ``powervm.PowerVMDriver``
+# (string value)
+#compute_driver = <None>
+compute_driver = {{ controller.get('compute_driver', 'libvirt.LibvirtDriver') }}
+
+#
+# Allow destination machine to match source for resize. Useful when
+# testing in single-host environments. By default it is not allowed
+# to resize to the same host. Setting this option to true will add
+# the same host to the destination options. Also set to true
+# if you allow the ServerGroupAffinityFilter and need to resize.
+# (boolean value)
+#allow_resize_to_same_host = false
+allow_resize_to_same_host = true
+
+#
+# Image properties that should not be inherited from the instance
+# when taking a snapshot.
+#
+# This option gives an opportunity to select which image-properties
+# should not be inherited by newly created snapshots.
+#
+# Possible values:
+#
+# * A comma-separated list whose item is an image property. Usually
+# only
+# the image properties that are only needed by base images can be
+# included
+# here, since the snapshots that are created from the base images
+# don't
+# need them.
+# * Default list: cache_in_nova, bittorrent,
+# img_signature_hash_method,
+# img_signature, img_signature_key_type,
+# img_signature_certificate_uuid
+#
+# (list value)
+#non_inheritable_image_properties = cache_in_nova,bittorrent,img_signature_hash_method,img_signature,img_signature_key_type,img_signature_certificate_uuid
+
+# DEPRECATED:
+# When creating multiple instances with a single request using the
+# os-multiple-create API extension, this template will be used to
+# build
+# the display name for each instance. The benefit is that the
+# instances
+# end up with different hostnames. Example display names when creating
+# two VM's: name-1, name-2.
+#
+# Possible values:
+#
+# * Valid keys for the template are: name, uuid, count.
+# (string value)
+# This option is deprecated for removal since 15.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# This config changes API behaviour. All changes in API behaviour
+# should be
+# discoverable.
+#multi_instance_display_name_template = %(name)s-%(count)d
+
+#
+# Maximum number of devices that will result in a local image being
+# created on the hypervisor node.
+#
+# A negative number means unlimited. Setting max_local_block_devices
+# to 0 means that any request that attempts to create a local disk
+# will fail. This option is meant to limit the number of local discs
+# (so root local disc that is the result of --image being used, and
+# any other ephemeral and swap disks). 0 does not mean that images
+# will be automatically converted to volumes and boot instances from
+# volumes - it just means that all requests that attempt to create a
+# local disk will fail.
+#
+# Possible values:
+#
+# * 0: Creating a local disk is not allowed.
+# * Negative number: Allows unlimited number of local discs.
+# * Positive number: Allows only these many number of local discs.
+# (Default value is 3).
+# (integer value)
+#max_local_block_devices = 3
+
+#
+# A comma-separated list of monitors that can be used for getting
+# compute metrics. You can use the alias/name from the setuptools
+# entry points for nova.compute.monitors.* namespaces. If no
+# namespace is supplied, the "cpu." namespace is assumed for
+# backwards-compatibility.
+#
+# NOTE: Only one monitor per namespace (For example: cpu) can be
+# loaded at
+# a time.
+#
+# Possible values:
+#
+# * An empty list will disable the feature (Default).
+# * An example value that would enable both the CPU and NUMA memory
+# bandwidth monitors that use the virt driver variant:
+#
+# compute_monitors = cpu.virt_driver, numa_mem_bw.virt_driver
+# (list value)
+#compute_monitors =
+
+#
+# The default format an ephemeral_volume will be formatted with on
+# creation.
+#
+# Possible values:
+#
+# * ``ext2``
+# * ``ext3``
+# * ``ext4``
+# * ``xfs``
+# * ``ntfs`` (only for Windows guests)
+# (string value)
+#default_ephemeral_format = <None>
+
+#
+# Determine if instance should boot or fail on VIF plugging timeout.
+#
+# Nova sends a port update to Neutron after an instance has been
+# scheduled,
+# providing Neutron with the necessary information to finish setup of
+# the port.
+# Once completed, Neutron notifies Nova that it has finished setting
+# up the
+# port, at which point Nova resumes the boot of the instance since
+# network
+# connectivity is now supposed to be present. A timeout will occur if
+# the reply
+# is not received after a given interval.
+#
+# This option determines what Nova does when the VIF plugging timeout
+# event
+# happens. When enabled, the instance will error out. When disabled,
+# the
+# instance will continue to boot on the assumption that the port is
+# ready.
+#
+# Possible values:
+#
+# * True: Instances should fail after VIF plugging timeout
+# * False: Instances should continue booting after VIF plugging
+# timeout
+# (boolean value)
+vif_plugging_is_fatal = {{ controller.get('vif_plugging_is_fatal', 'true') }}
+
+#
+# Timeout for Neutron VIF plugging event message arrival.
+#
+# Number of seconds to wait for Neutron vif plugging events to
+# arrive before continuing or failing (see 'vif_plugging_is_fatal').
+#
+# Related options:
+#
+# * vif_plugging_is_fatal - If ``vif_plugging_timeout`` is set to zero
+# and
+# ``vif_plugging_is_fatal`` is False, events should not be expected
+# to
+# arrive at all.
+# (integer value)
+# Minimum value: 0
+vif_plugging_timeout = {{ controller.get('vif_plugging_timeout', '300') }}
+
+# Path to '/etc/network/interfaces' template.
+#
+# The path to a template file for the '/etc/network/interfaces'-style
+# file, which
+# will be populated by nova and subsequently used by cloudinit. This
+# provides a
+# method to configure network connectivity in environments without a
+# DHCP server.
+#
+# The template will be rendered using Jinja2 template engine, and
+# receive a
+# top-level key called ``interfaces``. This key will contain a list of
+# dictionaries, one for each interface.
+#
+# Refer to the cloudinit documentaion for more information:
+#
+# https://cloudinit.readthedocs.io/en/latest/topics/datasources.html
+#
+# Possible values:
+#
+# * A path to a Jinja2-formatted template for a Debian
+# '/etc/network/interfaces'
+# file. This applies even if using a non Debian-derived guest.
+#
+# Related options:
+#
+# * ``flat_inject``: This must be set to ``True`` to ensure nova
+# embeds network
+# configuration information in the metadata provided through the
+# config drive.
+# (string value)
+#injected_network_template = $pybasedir/nova/virt/interfaces.template
+
+#
+# The image preallocation mode to use.
+#
+# Image preallocation allows storage for instance images to be
+# allocated up front
+# when the instance is initially provisioned. This ensures immediate
+# feedback is
+# given if enough space isn't available. In addition, it should
+# significantly
+# improve performance on writes to new blocks and may even improve I/O
+# performance to prewritten blocks due to reduced fragmentation.
+#
+# Possible values:
+#
+# * "none" => no storage provisioning is done up front
+# * "space" => storage is fully allocated at instance start
+# (string value)
+# Possible values:
+# none - <No description provided>
+# space - <No description provided>
+#preallocate_images = none
+
+#
+# Enable use of copy-on-write (cow) images.
+#
+# QEMU/KVM allow the use of qcow2 as backing files. By disabling this,
+# backing files will not be used.
+# (boolean value)
+#use_cow_images = true
+
+#
+# Force conversion of backing images to raw format.
+#
+# Possible values:
+#
+# * True: Backing image files will be converted to raw image format
+# * False: Backing image files will not be converted
+#
+# Related options:
+#
+# * ``compute_driver``: Only the libvirt driver uses this option.
+# (boolean value)
+#force_raw_images = true
+
+#
+# Name of the mkfs commands for ephemeral device.
+#
+# The format is <os_type>=<mkfs command>
+# (multi valued)
+#virt_mkfs =
+
+#
+# Enable resizing of filesystems via a block device.
+#
+# If enabled, attempt to resize the filesystem by accessing the image
+# over a
+# block device. This is done by the host and may not be necessary if
+# the image
+# contains a recent version of cloud-init. Possible mechanisms require
+# the nbd
+# driver (for qcow and raw), or loop (for raw).
+# (boolean value)
+#resize_fs_using_block_device = false
+
+# Amount of time, in seconds, to wait for NBD device start up.
+# (integer value)
+# Minimum value: 0
+#timeout_nbd = 10
+
+#
+# Location of cached images.
+#
+# This is NOT the full path - just a folder name relative to
+# '$instances_path'.
+# For per-compute-host cached images, set to '_base_$my_ip'
+# (string value)
+#image_cache_subdirectory_name = _base
+
+# Should unused base images be removed? (boolean value)
+#remove_unused_base_images = true
+
+#
+# Unused unresized base images younger than this will not be removed.
+# (integer value)
+remove_unused_original_minimum_age_seconds = 86400
+
+#
+# Generic property to specify the pointer type.
+#
+# Input devices allow interaction with a graphical framebuffer. For
+# example to provide a graphic tablet for absolute cursor movement.
+#
+# If set, the 'hw_pointer_model' image property takes precedence over
+# this configuration option.
+#
+# Possible values:
+#
+# * None: Uses default behavior provided by drivers (mouse on PS2 for
+# libvirt x86)
+# * ps2mouse: Uses relative movement. Mouse connected by PS2
+# * usbtablet: Uses absolute movement. Tablet connect by USB
+#
+# Related options:
+#
+# * usbtablet must be configured with VNC enabled or SPICE enabled and
+# SPICE
+# agent disabled. When used with libvirt the instance mode should be
+# configured as HVM.
+# (string value)
+# Possible values:
+# <None> - <No description provided>
+# ps2mouse - <No description provided>
+# usbtablet - <No description provided>
+#pointer_model = usbtablet
+
+#
+# Defines which physical CPUs (pCPUs) can be used by instance
+# virtual CPUs (vCPUs).
+#
+# Possible values:
+#
+# * A comma-separated list of physical CPU numbers that virtual CPUs
+# can be
+# allocated to by default. Each element should be either a single
+# CPU number,
+# a range of CPU numbers, or a caret followed by a CPU number to be
+# excluded from a previous range. For example:
+#
+# vcpu_pin_set = "4-12,^8,15"
+# (string value)
+#vcpu_pin_set = <None>
+
+#
+# Number of huge/large memory pages to reserved per NUMA host cell.
+#
+# Possible values:
+#
+# * A list of valid key=value which reflect NUMA node ID, page size
+# (Default unit is KiB) and number of pages to be reserved.
+#
+# reserved_huge_pages = node:0,size:2048,count:64
+# reserved_huge_pages = node:1,size:1GB,count:1
+#
+# In this example we are reserving on NUMA node 0 64 pages of 2MiB
+# and on NUMA node 1 1 page of 1GiB.
+# (dict value)
+#reserved_huge_pages = <None>
+
+#
+# Amount of disk resources in MB to make them always available to
+# host. The
+# disk usage gets reported back to the scheduler from nova-compute
+# running
+# on the compute nodes. To prevent the disk resources from being
+# considered
+# as available, this option can be used to reserve disk space for that
+# host.
+#
+# Possible values:
+#
+# * Any positive integer representing amount of disk in MB to reserve
+# for the host.
+# (integer value)
+# Minimum value: 0
+#reserved_host_disk_mb = 0
+
+#
+# Amount of memory in MB to reserve for the host so that it is always
+# available
+# to host processes. The host resources usage is reported back to the
+# scheduler
+# continuously from nova-compute running on the compute node. To
+# prevent the host
+# memory from being considered as available, this option is used to
+# reserve
+# memory for the host.
+#
+# Possible values:
+#
+# * Any positive integer representing amount of memory in MB to
+# reserve
+# for the host.
+# (integer value)
+# Minimum value: 0
+#reserved_host_memory_mb = 512
+
+#
+# Number of physical CPUs to reserve for the host. The host resources
+# usage is
+# reported back to the scheduler continuously from nova-compute
+# running on the
+# compute node. To prevent the host CPU from being considered as
+# available,
+# this option is used to reserve random pCPU(s) for the host.
+#
+# Possible values:
+#
+# * Any positive integer representing number of physical CPUs to
+# reserve
+# for the host.
+# (integer value)
+# Minimum value: 0
+#reserved_host_cpus = 0
+
+#
+# This option helps you specify virtual CPU to physical CPU allocation
+# ratio.
+#
+# From Ocata (15.0.0) this is used to influence the hosts selected by
+# the Placement API. Note that when Placement is used, the CoreFilter
+# is redundant, because the Placement API will have already filtered
+# out hosts that would have failed the CoreFilter.
+#
+# This configuration specifies ratio for CoreFilter which can be set
+# per compute node. For AggregateCoreFilter, it will fall back to this
+# configuration value if no per-aggregate setting is found.
+#
+# NOTE: This can be set per-compute, or if set to 0.0, the value
+# set on the scheduler node(s) or compute node(s) will be used
+# and defaulted to 16.0.
+#
+# NOTE: As of the 16.0.0 Pike release, this configuration option is
+# ignored
+# for the ironic.IronicDriver compute driver and is hardcoded to 1.0.
+#
+# Possible values:
+#
+# * Any valid positive integer or float value
+# (floating point value)
+# Minimum value: 0
+#cpu_allocation_ratio = 0.0
+{%- if controller.cpu_allocation_ratio is defined %}
+cpu_allocation_ratio = {{ controller.cpu_allocation_ratio }}
+{%- else %}
+#cpu_allocation_ratio=0.0
+{%- endif %}
+
+#
+# This option helps you specify virtual RAM to physical RAM
+# allocation ratio.
+#
+# From Ocata (15.0.0) this is used to influence the hosts selected by
+# the Placement API. Note that when Placement is used, the RamFilter
+# is redundant, because the Placement API will have already filtered
+# out hosts that would have failed the RamFilter.
+#
+# This configuration specifies ratio for RamFilter which can be set
+# per compute node. For AggregateRamFilter, it will fall back to this
+# configuration value if no per-aggregate setting found.
+#
+# NOTE: This can be set per-compute, or if set to 0.0, the value
+# set on the scheduler node(s) or compute node(s) will be used and
+# defaulted to 1.5.
+#
+# NOTE: As of the 16.0.0 Pike release, this configuration option is
+# ignored
+# for the ironic.IronicDriver compute driver and is hardcoded to 1.0.
+#
+# Possible values:
+#
+# * Any valid positive integer or float value
+# (floating point value)
+# Minimum value: 0
+#ram_allocation_ratio = 0.0
+{%- if controller.ram_allocation_ratio is defined %}
+ram_allocation_ratio = {{ controller.ram_allocation_ratio }}
+{%- else %}
+#ram_allocation_ratio=0.0
+{%- endif %}
+
+#
+# This option helps you specify virtual disk to physical disk
+# allocation ratio.
+#
+# From Ocata (15.0.0) this is used to influence the hosts selected by
+# the Placement API. Note that when Placement is used, the DiskFilter
+# is redundant, because the Placement API will have already filtered
+# out hosts that would have failed the DiskFilter.
+#
+# A ratio greater than 1.0 will result in over-subscription of the
+# available physical disk, which can be useful for more
+# efficiently packing instances created with images that do not
+# use the entire virtual disk, such as sparse or compressed
+# images. It can be set to a value between 0.0 and 1.0 in order
+# to preserve a percentage of the disk for uses other than
+# instances.
+#
+# NOTE: This can be set per-compute, or if set to 0.0, the value
+# set on the scheduler node(s) or compute node(s) will be used and
+# defaulted to 1.0.
+#
+# NOTE: As of the 16.0.0 Pike release, this configuration option is
+# ignored
+# for the ironic.IronicDriver compute driver and is hardcoded to 1.0.
+#
+# Possible values:
+#
+# * Any valid positive integer or float value
+# (floating point value)
+# Minimum value: 0
+#disk_allocation_ratio = 0.0
+
+#
+# Console proxy host to be used to connect to instances on this host.
+# It is the
+# publicly visible name for the console host.
+#
+# Possible values:
+#
+# * Current hostname (default) or any string representing hostname.
+# (string value)
+#console_host = <current_hostname>
+
+#
+# Name of the network to be used to set access IPs for instances. If
+# there are
+# multiple IPs to choose from, an arbitrary one will be chosen.
+#
+# Possible values:
+#
+# * None (default)
+# * Any string representing network name.
+# (string value)
+#default_access_ip_network_name = <None>
+
+#
+# Whether to batch up the application of IPTables rules during a host
+# restart
+# and apply all at the end of the init phase.
+# (boolean value)
+#defer_iptables_apply = false
+
+#
+# Specifies where instances are stored on the hypervisor's disk.
+# It can point to locally attached storage or a directory on NFS.
+#
+# Possible values:
+#
+# * $state_path/instances where state_path is a config option that
+# specifies
+# the top-level directory for maintaining nova's state. (default) or
+# Any string representing directory path.
+# (string value)
+#instances_path =
+
+#
+# This option enables periodic compute.instance.exists notifications.
+# Each
+# compute node must be configured to generate system usage data. These
+# notifications are consumed by OpenStack Telemetry service.
+# (boolean value)
+#instance_usage_audit = false
+{% if controller.instance_usage_audit is defined %}
+instance_usage_audit = {{ controller.instance_usage_audit }}
+{%- endif %}
+
+#
+# Maximum number of 1 second retries in live_migration. It specifies
+# number
+# of retries to iptables when it complains. It happens when an user
+# continuously
+# sends live-migration request to same host leading to concurrent
+# request
+# to iptables.
+#
+# Possible values:
+#
+# * Any positive integer representing retry count.
+# (integer value)
+# Minimum value: 0
+#live_migration_retry_count = 30
+
+#
+# This option specifies whether to start guests that were running
+# before the
+# host rebooted. It ensures that all of the instances on a Nova
+# compute node
+# resume their state each time the compute node boots or restarts.
+# (boolean value)
+#resume_guests_state_on_host_boot = {{ controller.get('resume_guests_state_on_host_boot', True) }}
+
+#
+# Number of times to retry network allocation. It is required to
+# attempt network
+# allocation retries if the virtual interface plug fails.
+#
+# Possible values:
+#
+# * Any positive integer representing retry count.
+# (integer value)
+# Minimum value: 0
+#network_allocate_retries = 0
+
+#
+# Limits the maximum number of instance builds to run concurrently by
+# nova-compute. Compute service can attempt to build an infinite
+# number of
+# instances, if asked to do so. This limit is enforced to avoid
+# building
+# unlimited instance concurrently on a compute node. This value can be
+# set
+# per compute node.
+#
+# Possible Values:
+#
+# * 0 : treated as unlimited.
+# * Any positive integer representing maximum concurrent builds.
+# (integer value)
+# Minimum value: 0
+#max_concurrent_builds = 10
+
+#
+# Maximum number of live migrations to run concurrently. This limit is
+# enforced
+# to avoid outbound live migrations overwhelming the host/network and
+# causing
+# failures. It is not recommended that you change this unless you are
+# very sure
+# that doing so is safe and stable in your environment.
+#
+# Possible values:
+#
+# * 0 : treated as unlimited.
+# * Negative value defaults to 0.
+# * Any positive integer representing maximum number of live
+# migrations
+# to run concurrently.
+# (integer value)
+#max_concurrent_live_migrations = 1
+
+#
+# Number of times to retry block device allocation on failures.
+# Starting with
+# Liberty, Cinder can use image volume cache. This may help with block
+# device
+# allocation performance. Look at the cinder
+# image_volume_cache_enabled
+# configuration option.
+#
+# Possible values:
+#
+# * 60 (default)
+# * If value is 0, then one attempt is made.
+# * Any negative value is treated as 0.
+# * For any value > 0, total attempts are (value + 1)
+# (integer value)
+block_device_allocate_retries = {{ controller.get('block_device_allocate_retries', '600') }}
+
+#
+# Number of greenthreads available for use to sync power states.
+#
+# This option can be used to reduce the number of concurrent requests
+# made to the hypervisor or system with real instance power states
+# for performance reasons, for example, with Ironic.
+#
+# Possible values:
+#
+# * Any positive integer representing greenthreads count.
+# (integer value)
+#sync_power_state_pool_size = 1000
+
+#
+# Number of seconds to wait between runs of the image cache manager.
+#
+# Possible values:
+# * 0: run at the default rate.
+# * -1: disable
+# * Any other value
+# (integer value)
+# Minimum value: -1
+# image_cache_manager_interval = 0
+
+#
+# Interval to pull network bandwidth usage info.
+#
+# Not supported on all hypervisors. If a hypervisor doesn't support
+# bandwidth
+# usage, it will not get the info in the usage events.
+#
+# Possible values:
+#
+# * 0: Will run at the default periodic interval.
+# * Any value < 0: Disables the option.
+# * Any positive integer in seconds.
+# (integer value)
+#bandwidth_poll_interval = 600
+
+#
+# Interval to sync power states between the database and the
+# hypervisor.
+#
+# The interval that Nova checks the actual virtual machine power state
+# and the power state that Nova has in its database. If a user powers
+# down their VM, Nova updates the API to report the VM has been
+# powered down. Should something turn on the VM unexpectedly,
+# Nova will turn the VM back off to keep the system in the expected
+# state.
+#
+# Possible values:
+#
+# * 0: Will run at the default periodic interval.
+# * Any value < 0: Disables the option.
+# * Any positive integer in seconds.
+#
+# Related options:
+#
+# * If ``handle_virt_lifecycle_events`` in workarounds_group is
+# false and this option is negative, then instances that get out
+# of sync between the hypervisor and the Nova database will have
+# to be synchronized manually.
+# (integer value)
+#sync_power_state_interval = 600
+
+#
+# Interval between instance network information cache updates.
+#
+# Number of seconds after which each compute node runs the task of
+# querying Neutron for all of its instances networking information,
+# then updates the Nova db with that information. Nova will never
+# update it's cache if this option is set to 0. If we don't update the
+# cache, the metadata service and nova-api endpoints will be proxying
+# incorrect network data about the instance. So, it is not recommended
+# to set this option to 0.
+#
+# Possible values:
+#
+# * Any positive integer in seconds.
+# * Any value <=0 will disable the sync. This is not recommended.
+# (integer value)
+#heal_instance_info_cache_interval = 60
+
+#
+# Interval for reclaiming deleted instances.
+#
+# A value greater than 0 will enable SOFT_DELETE of instances.
+# This option decides whether the server to be deleted will be put
+# into
+# the SOFT_DELETED state. If this value is greater than 0, the deleted
+# server will not be deleted immediately, instead it will be put into
+# a queue until it's too old (deleted time greater than the value of
+# reclaim_instance_interval). The server can be recovered from the
+# delete queue by using the restore action. If the deleted server
+# remains
+# longer than the value of reclaim_instance_interval, it will be
+# deleted by a periodic task in the compute service automatically.
+#
+# Note that this option is read from both the API and compute nodes,
+# and
+# must be set globally otherwise servers could be put into a soft
+# deleted
+# state in the API and never actually reclaimed (deleted) on the
+# compute
+# node.
+#
+# Possible values:
+#
+# * Any positive integer(in seconds) greater than 0 will enable
+# this option.
+# * Any value <=0 will disable the option.
+# (integer value)
+#reclaim_instance_interval = 0
+
+#
+# Interval for gathering volume usages.
+#
+# This option updates the volume usage cache for every
+# volume_usage_poll_interval number of seconds.
+#
+# Possible values:
+#
+# * Any positive integer(in seconds) greater than 0 will enable
+# this option.
+# * Any value <=0 will disable the option.
+# (integer value)
+#volume_usage_poll_interval = 0
+
+#
+# Interval for polling shelved instances to offload.
+#
+# The periodic task runs for every shelved_poll_interval number
+# of seconds and checks if there are any shelved instances. If it
+# finds a shelved instance, based on the 'shelved_offload_time' config
+# value it offloads the shelved instances. Check
+# 'shelved_offload_time'
+# config option description for details.
+#
+# Possible values:
+#
+# * Any value <= 0: Disables the option.
+# * Any positive integer in seconds.
+#
+# Related options:
+#
+# * ``shelved_offload_time``
+# (integer value)
+#shelved_poll_interval = 3600
+
+#
+# Time before a shelved instance is eligible for removal from a host.
+#
+# By default this option is set to 0 and the shelved instance will be
+# removed from the hypervisor immediately after shelve operation.
+# Otherwise, the instance will be kept for the value of
+# shelved_offload_time(in seconds) so that during the time period the
+# unshelve action will be faster, then the periodic task will remove
+# the instance from hypervisor after shelved_offload_time passes.
+#
+# Possible values:
+#
+# * 0: Instance will be immediately offloaded after being
+# shelved.
+# * Any value < 0: An instance will never offload.
+# * Any positive integer in seconds: The instance will exist for
+# the specified number of seconds before being offloaded.
+# (integer value)
+#shelved_offload_time = 0
+
+#
+# Interval for retrying failed instance file deletes.
+#
+# This option depends on 'maximum_instance_delete_attempts'.
+# This option specifies how often to retry deletes whereas
+# 'maximum_instance_delete_attempts' specifies the maximum number
+# of retry attempts that can be made.
+#
+# Possible values:
+#
+# * 0: Will run at the default periodic interval.
+# * Any value < 0: Disables the option.
+# * Any positive integer in seconds.
+#
+# Related options:
+#
+# * ``maximum_instance_delete_attempts`` from instance_cleaning_opts
+# group.
+# (integer value)
+#instance_delete_interval = 300
+
+#
+# Interval (in seconds) between block device allocation retries on
+# failures.
+#
+# This option allows the user to specify the time interval between
+# consecutive retries. 'block_device_allocate_retries' option
+# specifies
+# the maximum number of retries.
+#
+# Possible values:
+#
+# * 0: Disables the option.
+# * Any positive integer in seconds enables the option.
+#
+# Related options:
+#
+# * ``block_device_allocate_retries`` in compute_manager_opts group.
+# (integer value)
+# Minimum value: 0
+block_device_allocate_retries_interval = {{ controller.get('block_device_allocate_retries_interval', '10') }}
+
+#
+# Interval between sending the scheduler a list of current instance
+# UUIDs to
+# verify that its view of instances is in sync with nova.
+#
+# If the CONF option 'scheduler_tracks_instance_changes' is
+# False, the sync calls will not be made. So, changing this option
+# will
+# have no effect.
+#
+# If the out of sync situations are not very common, this interval
+# can be increased to lower the number of RPC messages being sent.
+# Likewise, if sync issues turn out to be a problem, the interval
+# can be lowered to check more frequently.
+#
+# Possible values:
+#
+# * 0: Will run at the default periodic interval.
+# * Any value < 0: Disables the option.
+# * Any positive integer in seconds.
+#
+# Related options:
+#
+# * This option has no impact if ``scheduler_tracks_instance_changes``
+# is set to False.
+# (integer value)
+#scheduler_instance_sync_interval = 120
+
+#
+# Interval for updating compute resources.
+#
+# This option specifies how often the update_available_resources
+# periodic task should run. A number less than 0 means to disable the
+# task completely. Leaving this at the default of 0 will cause this to
+# run at the default periodic interval. Setting it to any positive
+# value will cause it to run at approximately that number of seconds.
+#
+# Possible values:
+#
+# * 0: Will run at the default periodic interval.
+# * Any value < 0: Disables the option.
+# * Any positive integer in seconds.
+# (integer value)
+#update_resources_interval = 0
+
+#
+# Time interval after which an instance is hard rebooted
+# automatically.
+#
+# When doing a soft reboot, it is possible that a guest kernel is
+# completely hung in a way that causes the soft reboot task
+# to not ever finish. Setting this option to a time period in seconds
+# will automatically hard reboot an instance if it has been stuck
+# in a rebooting state longer than N seconds.
+#
+# Possible values:
+#
+# * 0: Disables the option (default).
+# * Any positive integer in seconds: Enables the option.
+# (integer value)
+# Minimum value: 0
+#reboot_timeout = 0
+
+#
+# Maximum time in seconds that an instance can take to build.
+#
+# If this timer expires, instance status will be changed to ERROR.
+# Enabling this option will make sure an instance will not be stuck
+# in BUILD state for a longer period.
+#
+# Possible values:
+#
+# * 0: Disables the option (default)
+# * Any positive integer in seconds: Enables the option.
+# (integer value)
+# Minimum value: 0
+#instance_build_timeout = 0
+
+#
+# Interval to wait before un-rescuing an instance stuck in RESCUE.
+#
+# Possible values:
+#
+# * 0: Disables the option (default)
+# * Any positive integer in seconds: Enables the option.
+# (integer value)
+# Minimum value: 0
+#rescue_timeout = 0
+
+#
+# Automatically confirm resizes after N seconds.
+#
+# Resize functionality will save the existing server before resizing.
+# After the resize completes, user is requested to confirm the resize.
+# The user has the opportunity to either confirm or revert all
+# changes. Confirm resize removes the original server and changes
+# server status from resized to active. Setting this option to a time
+# period (in seconds) will automatically confirm the resize if the
+# server is in resized state longer than that time.
+#
+# Possible values:
+#
+# * 0: Disables the option (default)
+# * Any positive integer in seconds: Enables the option.
+# (integer value)
+# Minimum value: 0
+#resize_confirm_window = 0
+
+#
+# Total time to wait in seconds for an instance toperform a clean
+# shutdown.
+#
+# It determines the overall period (in seconds) a VM is allowed to
+# perform a clean shutdown. While performing stop, rescue and shelve,
+# rebuild operations, configuring this option gives the VM a chance
+# to perform a controlled shutdown before the instance is powered off.
+# The default timeout is 60 seconds.
+#
+# The timeout value can be overridden on a per image basis by means
+# of os_shutdown_timeout that is an image metadata setting allowing
+# different types of operating systems to specify how much time they
+# need to shut down cleanly.
+#
+# Possible values:
+#
+# * Any positive integer in seconds (default value is 60).
+# (integer value)
+# Minimum value: 1
+#shutdown_timeout = 60
+
+#
+# The compute service periodically checks for instances that have been
+# deleted in the database but remain running on the compute node. The
+# above option enables action to be taken when such instances are
+# identified.
+#
+# Possible values:
+#
+# * reap: Powers down the instances and deletes them(default)
+# * log: Logs warning message about deletion of the resource
+# * shutdown: Powers down instances and marks them as non-
+# bootable which can be later used for debugging/analysis
+# * noop: Takes no action
+#
+# Related options:
+#
+# * running_deleted_instance_poll_interval
+# * running_deleted_instance_timeout
+# (string value)
+# Possible values:
+# noop - <No description provided>
+# log - <No description provided>
+# shutdown - <No description provided>
+# reap - <No description provided>
+#running_deleted_instance_action = reap
+
+#
+# Time interval in seconds to wait between runs for the clean up
+# action.
+# If set to 0, above check will be disabled. If
+# "running_deleted_instance
+# _action" is set to "log" or "reap", a value greater than 0 must be
+# set.
+#
+# Possible values:
+#
+# * Any positive integer in seconds enables the option.
+# * 0: Disables the option.
+# * 1800: Default value.
+#
+# Related options:
+#
+# * running_deleted_instance_action
+# (integer value)
+#running_deleted_instance_poll_interval = 1800
+
+#
+# Time interval in seconds to wait for the instances that have
+# been marked as deleted in database to be eligible for cleanup.
+#
+# Possible values:
+#
+# * Any positive integer in seconds(default is 0).
+#
+# Related options:
+#
+# * "running_deleted_instance_action"
+# (integer value)
+#running_deleted_instance_timeout = 0
+
+#
+# The number of times to attempt to reap an instance's files.
+#
+# This option specifies the maximum number of retry attempts
+# that can be made.
+#
+# Possible values:
+#
+# * Any positive integer defines how many attempts are made.
+# * Any value <=0 means no delete attempts occur, but you should use
+# ``instance_delete_interval`` to disable the delete attempts.
+#
+# Related options:
+# * ``instance_delete_interval`` in interval_opts group can be used to
+# disable
+# this option.
+# (integer value)
+#maximum_instance_delete_attempts = 5
+
+#
+# Sets the scope of the check for unique instance names.
+#
+# The default doesn't check for unique names. If a scope for the name
+# check is
+# set, a launch of a new instance or an update of an existing instance
+# with a
+# duplicate name will result in an ''InstanceExists'' error. The
+# uniqueness is
+# case-insensitive. Setting this option can increase the usability for
+# end
+# users as they don't have to distinguish among instances with the
+# same name
+# by their IDs.
+#
+# Possible values:
+#
+# * '': An empty value means that no uniqueness check is done and
+# duplicate
+# names are possible.
+# * "project": The instance name check is done only for instances
+# within the
+# same project.
+# * "global": The instance name check is done for all instances
+# regardless of
+# the project.
+# (string value)
+# Possible values:
+# '' - <No description provided>
+# project - <No description provided>
+# global - <No description provided>
+#osapi_compute_unique_server_name_scope =
+
+#
+# Enable new nova-compute services on this host automatically.
+#
+# When a new nova-compute service starts up, it gets
+# registered in the database as an enabled service. Sometimes it can
+# be useful
+# to register new compute services in disabled state and then enabled
+# them at a
+# later point in time. This option only sets this behavior for nova-
+# compute
+# services, it does not auto-disable other services like nova-
+# conductor,
+# nova-scheduler, nova-consoleauth, or nova-osapi_compute.
+#
+# Possible values:
+#
+# * ``True``: Each new compute service is enabled as soon as it
+# registers itself.
+# * ``False``: Compute services must be enabled via an os-services
+# REST API call
+# or with the CLI with ``nova service-enable <hostname> <binary>``,
+# otherwise
+# they are not ready to use.
+# (boolean value)
+#enable_new_services = true
+
+#
+# Template string to be used to generate instance names.
+#
+# This template controls the creation of the database name of an
+# instance. This
+# is *not* the display name you enter when creating an instance (via
+# Horizon
+# or CLI). For a new deployment it is advisable to change the default
+# value
+# (which uses the database autoincrement) to another value which makes
+# use
+# of the attributes of an instance, like ``instance-%(uuid)s``. If you
+# already have instances in your deployment when you change this, your
+# deployment will break.
+#
+# Possible values:
+#
+# * A string which either uses the instance database ID (like the
+# default)
+# * A string with a list of named database columns, for example
+# ``%(id)d``
+# or ``%(uuid)s`` or ``%(hostname)s``.
+#
+# Related options:
+#
+# * not to be confused with: ``multi_instance_display_name_template``
+# (string value)
+#instance_name_template = instance-%08x
+
+#
+# Number of times to retry live-migration before failing.
+#
+# Possible values:
+#
+# * If == -1, try until out of hosts (default)
+# * If == 0, only try once, no retries
+# * Integer greater than 0
+# (integer value)
+# Minimum value: -1
+#migrate_max_retries = -1
+
+#
+# Configuration drive format
+#
+# Configuration drive format that will contain metadata attached to
+# the
+# instance when it boots.
+#
+# Possible values:
+#
+# * iso9660: A file system image standard that is widely supported
+# across
+# operating systems. NOTE: Mind the libvirt bug
+# (https://bugs.launchpad.net/nova/+bug/1246201) - If your
+# hypervisor
+# driver is libvirt, and you want live migrate to work without
+# shared storage,
+# then use VFAT.
+# * vfat: For legacy reasons, you can configure the configuration
+# drive to
+# use VFAT format instead of ISO 9660.
+#
+# Related options:
+#
+# * This option is meaningful when one of the following alternatives
+# occur:
+# 1. force_config_drive option set to 'true'
+# 2. the REST API call to create the instance contains an enable
+# flag for
+# config drive option
+# 3. the image used to create the instance requires a config drive,
+# this is defined by img_config_drive property for that image.
+# * A compute node running Hyper-V hypervisor can be configured to
+# attach
+# configuration drive as a CD drive. To attach the configuration
+# drive as a CD
+# drive, set config_drive_cdrom option at hyperv section, to true.
+# (string value)
+# Possible values:
+# iso9660 - <No description provided>
+# vfat - <No description provided>
+#config_drive_format = iso9660
+
+#
+# Force injection to take place on a config drive
+#
+# When this option is set to true configuration drive functionality
+# will be
+# forced enabled by default, otherwise user can still enable
+# configuration
+# drives via the REST API or image metadata properties.
+#
+# Possible values:
+#
+# * True: Force to use of configuration drive regardless the user's
+# input in the
+# REST API call.
+# * False: Do not force use of configuration drive. Config drives can
+# still be
+# enabled via the REST API or image metadata properties.
+#
+# Related options:
+#
+# * Use the 'mkisofs_cmd' flag to set the path where you install the
+# genisoimage program. If genisoimage is in same path as the
+# nova-compute service, you do not need to set this flag.
+# * To use configuration drive with Hyper-V, you must set the
+# 'mkisofs_cmd' value to the full path to an mkisofs.exe
+# installation.
+# Additionally, you must set the qemu_img_cmd value in the hyperv
+# configuration section to the full path to an qemu-img command
+# installation.
+# (boolean value)
+#force_config_drive = false
+
+#
+# Name or path of the tool used for ISO image creation
+#
+# Use the mkisofs_cmd flag to set the path where you install the
+# genisoimage
+# program. If genisoimage is on the system path, you do not need to
+# change
+# the default value.
+#
+# To use configuration drive with Hyper-V, you must set the
+# mkisofs_cmd value
+# to the full path to an mkisofs.exe installation. Additionally, you
+# must set
+# the qemu_img_cmd value in the hyperv configuration section to the
+# full path
+# to an qemu-img command installation.
+#
+# Possible values:
+#
+# * Name of the ISO image creator program, in case it is in the same
+# directory
+# as the nova-compute service
+# * Path to ISO image creator program
+#
+# Related options:
+#
+# * This option is meaningful when config drives are enabled.
+# * To use configuration drive with Hyper-V, you must set the
+# qemu_img_cmd
+# value in the hyperv configuration section to the full path to an
+# qemu-img
+# command installation.
+# (string value)
+#mkisofs_cmd = genisoimage
+
+# DEPRECATED: The driver to use for database access (string value)
+# This option is deprecated for removal since 13.0.0.
+# Its value may be silently ignored in the future.
+#db_driver = nova.db
+
+# DEPRECATED:
+# Default flavor to use for the EC2 API only.
+# The Nova API does not support a default flavor.
+# (string value)
+# This option is deprecated for removal since 14.0.0.
+# Its value may be silently ignored in the future.
+# Reason: The EC2 API is deprecated.
+#default_flavor = m1.small
+
+#
+# The IP address which the host is using to connect to the management
+# network.
+#
+# Possible values:
+#
+# * String with valid IP address. Default is IPv4 address of this
+# host.
+#
+# Related options:
+#
+# * metadata_host
+# * my_block_storage_ip
+# * routing_source_ip
+# * vpn_ip
+# (string value)
+#my_ip = <host_ipv4>
+my_ip={{ controller.bind.private_address }}
+
+#
+# The IP address which is used to connect to the block storage
+# network.
+#
+# Possible values:
+#
+# * String with valid IP address. Default is IP address of this host.
+#
+# Related options:
+#
+# * my_ip - if my_block_storage_ip is not set, then my_ip value is
+# used.
+# (string value)
+#my_block_storage_ip = $my_ip
+
+#
+# Hostname, FQDN or IP address of this host.
+#
+# Used as:
+#
+# * the oslo.messaging queue name for nova-compute worker
+# * we use this value for the binding_host sent to neutron. This means
+# if you use
+# a neutron agent, it should have the same value for host.
+# * cinder host attachment information
+#
+# Must be valid within AMQP key.
+#
+# Possible values:
+#
+# * String with hostname, FQDN or IP address. Default is hostname of
+# this host.
+# (string value)
+#host = <current_hostname>
+{%- if controller.host is defined %}
+host={{ controller.host }}
+{%- endif %}
+
+# DEPRECATED:
+# This option is a list of full paths to one or more configuration
+# files for
+# dhcpbridge. In most cases the default path of '/etc/nova/nova-
+# dhcpbridge.conf'
+# should be sufficient, but if you have special needs for configuring
+# dhcpbridge,
+# you can change or add to this list.
+#
+# Possible values
+#
+# * A list of strings, where each string is the full path to a
+# dhcpbridge
+# configuration file.
+# (multi valued)
+# This option is deprecated for removal since 16.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# nova-network is deprecated, as are any related configuration
+# options.
+#dhcpbridge_flagfile = /etc/nova/nova.conf
+
+# DEPRECATED:
+# The location where the network configuration files will be kept. The
+# default is
+# the 'networks' directory off of the location where nova's Python
+# module is
+# installed.
+#
+# Possible values
+#
+# * A string containing the full path to the desired configuration
+# directory
+# (string value)
+# This option is deprecated for removal since 16.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# nova-network is deprecated, as are any related configuration
+# options.
+#networks_path = $state_path/networks
+
+# DEPRECATED:
+# This is the name of the network interface for public IP addresses.
+# The default
+# is 'eth0'.
+#
+# Possible values:
+#
+# * Any string representing a network interface name
+# (string value)
+# This option is deprecated for removal since 16.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# nova-network is deprecated, as are any related configuration
+# options.
+#public_interface = eth0
+
+# DEPRECATED:
+# The location of the binary nova-dhcpbridge. By default it is the
+# binary named
+# 'nova-dhcpbridge' that is installed with all the other nova
+# binaries.
+#
+# Possible values:
+#
+# * Any string representing the full path to the binary for dhcpbridge
+# (string value)
+# This option is deprecated for removal since 16.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# nova-network is deprecated, as are any related configuration
+# options.
+#dhcpbridge = $bindir/nova-dhcpbridge
+
+# DEPRECATED:
+# The public IP address of the network host.
+#
+# This is used when creating an SNAT rule.
+#
+# Possible values:
+#
+# * Any valid IP address
+#
+# Related options:
+#
+# * ``force_snat_range``
+# (string value)
+# This option is deprecated for removal since 16.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# nova-network is deprecated, as are any related configuration
+# options.
+#routing_source_ip = $my_ip
+
+# DEPRECATED:
+# The lifetime of a DHCP lease, in seconds. The default is 86400 (one
+# day).
+#
+# Possible values:
+#
+# * Any positive integer value.
+# (integer value)
+# Minimum value: 1
+# This option is deprecated for removal since 16.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# nova-network is deprecated, as are any related configuration
+# options.
+#dhcp_lease_time = 86400
+
+# DEPRECATED:
+# Despite the singular form of the name of this option, it is actually
+# a list of
+# zero or more server addresses that dnsmasq will use for DNS
+# nameservers. If
+# this is not empty, dnsmasq will not read /etc/resolv.conf, but will
+# only use
+# the servers specified in this option. If the option
+# use_network_dns_servers is
+# True, the dns1 and dns2 servers from the network will be appended to
+# this list,
+# and will be used as DNS servers, too.
+#
+# Possible values:
+#
+# * A list of strings, where each string is either an IP address or a
+# FQDN.
+#
+# Related options:
+#
+# * ``use_network_dns_servers``
+# (multi valued)
+# This option is deprecated for removal since 16.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# nova-network is deprecated, as are any related configuration
+# options.
+#dns_server =
+
+# DEPRECATED:
+# When this option is set to True, the dns1 and dns2 servers for the
+# network
+# specified by the user on boot will be used for DNS, as well as any
+# specified in
+# the `dns_server` option.
+#
+# Related options:
+#
+# * ``dns_server``
+# (boolean value)
+# This option is deprecated for removal since 16.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# nova-network is deprecated, as are any related configuration
+# options.
+#use_network_dns_servers = false
+
+# DEPRECATED:
+# This option is a list of zero or more IP address ranges in your
+# network's DMZ
+# that should be accepted.
+#
+# Possible values:
+#
+# * A list of strings, each of which should be a valid CIDR.
+# (list value)
+# This option is deprecated for removal since 16.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# nova-network is deprecated, as are any related configuration
+# options.
+#dmz_cidr =
+
+# DEPRECATED:
+# This is a list of zero or more IP ranges that traffic from the
+# `routing_source_ip` will be SNATted to. If the list is empty, then
+# no SNAT
+# rules are created.
+#
+# Possible values:
+#
+# * A list of strings, each of which should be a valid CIDR.
+#
+# Related options:
+#
+# * ``routing_source_ip``
+# (multi valued)
+# This option is deprecated for removal since 16.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# nova-network is deprecated, as are any related configuration
+# options.
+#force_snat_range =
+
+# DEPRECATED:
+# The path to the custom dnsmasq configuration file, if any.
+#
+# Possible values:
+#
+# * The full path to the configuration file, or an empty string if
+# there is no
+# custom dnsmasq configuration file.
+# (string value)
+# This option is deprecated for removal since 16.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# nova-network is deprecated, as are any related configuration
+# options.
+#dnsmasq_config_file =
+
+# DEPRECATED:
+# This is the class used as the ethernet device driver for linuxnet
+# bridge
+# operations. The default value should be all you need for most cases,
+# but if you
+# wish to use a customized class, set this option to the full dot-
+# separated
+# import path for that class.
+#
+# Possible values:
+#
+# * Any string representing a dot-separated class path that Nova can
+# import.
+# (string value)
+# This option is deprecated for removal since 16.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# nova-network is deprecated, as are any related configuration
+# options.
+#linuxnet_interface_driver = nova.network.linux_net.LinuxBridgeInterfaceDriver
+
+# DEPRECATED:
+# The name of the Open vSwitch bridge that is used with linuxnet when
+# connecting
+# with Open vSwitch."
+#
+# Possible values:
+#
+# * Any string representing a valid bridge name.
+# (string value)
+# This option is deprecated for removal since 16.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# nova-network is deprecated, as are any related configuration
+# options.
+#linuxnet_ovs_integration_bridge = br-int
+
+#
+# When True, when a device starts up, and upon binding floating IP
+# addresses, arp
+# messages will be sent to ensure that the arp caches on the compute
+# hosts are
+# up-to-date.
+#
+# Related options:
+#
+# * ``send_arp_for_ha_count``
+# (boolean value)
+#send_arp_for_ha = false
+
+#
+# When arp messages are configured to be sent, they will be sent with
+# the count
+# set to the value of this option. Of course, if this is set to zero,
+# no arp
+# messages will be sent.
+#
+# Possible values:
+#
+# * Any integer greater than or equal to 0
+#
+# Related options:
+#
+# * ``send_arp_for_ha``
+# (integer value)
+#send_arp_for_ha_count = 3
+
+# DEPRECATED:
+# When set to True, only the firt nic of a VM will get its default
+# gateway from
+# the DHCP server.
+# (boolean value)
+# This option is deprecated for removal since 16.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# nova-network is deprecated, as are any related configuration
+# options.
+#use_single_default_gateway = false
+
+# DEPRECATED:
+# One or more interfaces that bridges can forward traffic to. If any
+# of the items
+# in this list is the special keyword 'all', then all traffic will be
+# forwarded.
+#
+# Possible values:
+#
+# * A list of zero or more interface names, or the word 'all'.
+# (multi valued)
+# This option is deprecated for removal since 16.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# nova-network is deprecated, as are any related configuration
+# options.
+#forward_bridge_interface = all
+
+#
+# This option determines the IP address for the network metadata API
+# server.
+#
+# This is really the client side of the metadata host equation that
+# allows
+# nova-network to find the metadata server when doing a default multi
+# host
+# networking.
+#
+# Possible values:
+#
+# * Any valid IP address. The default is the address of the Nova API
+# server.
+#
+# Related options:
+#
+# * ``metadata_port``
+# (string value)
+#metadata_host = $my_ip
+
+# DEPRECATED:
+# This option determines the port used for the metadata API server.
+#
+# Related options:
+#
+# * ``metadata_host``
+# (port value)
+# Minimum value: 0
+# Maximum value: 65535
+# This option is deprecated for removal since 16.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# nova-network is deprecated, as are any related configuration
+# options.
+#metadata_port = 8775
+
+# DEPRECATED:
+# This expression, if defined, will select any matching iptables rules
+# and place
+# them at the top when applying metadata changes to the rules.
+#
+# Possible values:
+#
+# * Any string representing a valid regular expression, or an empty
+# string
+#
+# Related options:
+#
+# * ``iptables_bottom_regex``
+# (string value)
+# This option is deprecated for removal since 16.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# nova-network is deprecated, as are any related configuration
+# options.
+#iptables_top_regex =
+
+# DEPRECATED:
+# This expression, if defined, will select any matching iptables rules
+# and place
+# them at the bottom when applying metadata changes to the rules.
+#
+# Possible values:
+#
+# * Any string representing a valid regular expression, or an empty
+# string
+#
+# Related options:
+#
+# * iptables_top_regex
+# (string value)
+# This option is deprecated for removal since 16.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# nova-network is deprecated, as are any related configuration
+# options.
+#iptables_bottom_regex =
+
+# DEPRECATED:
+# By default, packets that do not pass the firewall are DROPped. In
+# many cases,
+# though, an operator may find it more useful to change this from DROP
+# to REJECT,
+# so that the user issuing those packets may have a better idea as to
+# what's
+# going on, or LOGDROP in order to record the blocked traffic before
+# DROPping.
+#
+# Possible values:
+#
+# * A string representing an iptables chain. The default is DROP.
+# (string value)
+# This option is deprecated for removal since 16.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# nova-network is deprecated, as are any related configuration
+# options.
+#iptables_drop_action = DROP
+
+# DEPRECATED:
+# This option represents the period of time, in seconds, that the
+# ovs_vsctl calls
+# will wait for a response from the database before timing out. A
+# setting of 0
+# means that the utility should wait forever for a response.
+#
+# Possible values:
+#
+# * Any positive integer if a limited timeout is desired, or zero if
+# the calls
+# should wait forever for a response.
+# (integer value)
+# Minimum value: 0
+# This option is deprecated for removal since 16.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# nova-network is deprecated, as are any related configuration
+# options.
+#ovs_vsctl_timeout = 120
+
+# DEPRECATED:
+# This option is used mainly in testing to avoid calls to the
+# underlying network
+# utilities.
+# (boolean value)
+# This option is deprecated for removal since 16.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# nova-network is deprecated, as are any related configuration
+# options.
+#fake_network = false
+
+# DEPRECATED:
+# This option determines the number of times to retry ebtables
+# commands before
+# giving up. The minimum number of retries is 1.
+#
+# Possible values:
+#
+# * Any positive integer
+#
+# Related options:
+#
+# * ``ebtables_retry_interval``
+# (integer value)
+# Minimum value: 1
+# This option is deprecated for removal since 16.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# nova-network is deprecated, as are any related configuration
+# options.
+#ebtables_exec_attempts = 3
+
+# DEPRECATED:
+# This option determines the time, in seconds, that the system will
+# sleep in
+# between ebtables retries. Note that each successive retry waits a
+# multiple of
+# this value, so for example, if this is set to the default of 1.0
+# seconds, and
+# ebtables_exec_attempts is 4, after the first failure, the system
+# will sleep for
+# 1 * 1.0 seconds, after the second failure it will sleep 2 * 1.0
+# seconds, and
+# after the third failure it will sleep 3 * 1.0 seconds.
+#
+# Possible values:
+#
+# * Any non-negative float or integer. Setting this to zero will
+# result in no
+# waiting between attempts.
+#
+# Related options:
+#
+# * ebtables_exec_attempts
+# (floating point value)
+# This option is deprecated for removal since 16.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# nova-network is deprecated, as are any related configuration
+# options.
+#ebtables_retry_interval = 1.0
+
+# DEPRECATED:
+# Enable neutron as the backend for networking.
+#
+# Determine whether to use Neutron or Nova Network as the back end.
+# Set to true
+# to use neutron.
+# (boolean value)
+# This option is deprecated for removal since 15.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# nova-network is deprecated, as are any related configuration
+# options.
+use_neutron = true
+
+#
+# This option determines whether the network setup information is
+# injected into
+# the VM before it is booted. While it was originally designed to be
+# used only
+# by nova-network, it is also used by the vmware and xenapi virt
+# drivers to
+# control whether network information is injected into a VM. The
+# libvirt virt
+# driver also uses it when we use config_drive to configure network to
+# control
+# whether network information is injected into a VM.
+# (boolean value)
+#flat_injected = false
+
+# DEPRECATED:
+# This option determines the bridge used for simple network interfaces
+# when no
+# bridge is specified in the VM creation request.
+#
+# Please note that this option is only used when using nova-network
+# instead of
+# Neutron in your deployment.
+#
+# Possible values:
+#
+# * Any string representing a valid network bridge, such as 'br100'
+#
+# Related options:
+#
+# * ``use_neutron``
+# (string value)
+# This option is deprecated for removal since 15.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# nova-network is deprecated, as are any related configuration
+# options.
+#flat_network_bridge = <None>
+
+# DEPRECATED:
+# This is the address of the DNS server for a simple network. If this
+# option is
+# not specified, the default of '8.8.4.4' is used.
+#
+# Please note that this option is only used when using nova-network
+# instead of
+# Neutron in your deployment.
+#
+# Possible values:
+#
+# * Any valid IP address.
+#
+# Related options:
+#
+# * ``use_neutron``
+# (string value)
+# This option is deprecated for removal since 15.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# nova-network is deprecated, as are any related configuration
+# options.
+#flat_network_dns = 8.8.4.4
+
+# DEPRECATED:
+# This option is the name of the virtual interface of the VM on which
+# the bridge
+# will be built. While it was originally designed to be used only by
+# nova-network, it is also used by libvirt for the bridge interface
+# name.
+#
+# Possible values:
+#
+# * Any valid virtual interface name, such as 'eth0'
+# (string value)
+# This option is deprecated for removal since 15.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# nova-network is deprecated, as are any related configuration
+# options.
+#flat_interface = <None>
+
+# DEPRECATED:
+# This is the VLAN number used for private networks. Note that the
+# when creating
+# the networks, if the specified number has already been assigned,
+# nova-network
+# will increment this number until it finds an available VLAN.
+#
+# Please note that this option is only used when using nova-network
+# instead of
+# Neutron in your deployment. It also will be ignored if the
+# configuration option
+# for `network_manager` is not set to the default of
+# 'nova.network.manager.VlanManager'.
+#
+# Possible values:
+#
+# * Any integer between 1 and 4094. Values outside of that range will
+# raise a
+# ValueError exception.
+#
+# Related options:
+#
+# * ``network_manager``
+# * ``use_neutron``
+# (integer value)
+# Minimum value: 1
+# Maximum value: 4094
+# This option is deprecated for removal since 15.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# nova-network is deprecated, as are any related configuration
+# options.
+#vlan_start = 100
+
+# DEPRECATED:
+# This option is the name of the virtual interface of the VM on which
+# the VLAN
+# bridge will be built. While it was originally designed to be used
+# only by
+# nova-network, it is also used by libvirt and xenapi for the bridge
+# interface
+# name.
+#
+# Please note that this setting will be ignored in nova-network if the
+# configuration option for `network_manager` is not set to the default
+# of
+# 'nova.network.manager.VlanManager'.
+#
+# Possible values:
+#
+# * Any valid virtual interface name, such as 'eth0'
+# (string value)
+# This option is deprecated for removal since 15.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# nova-network is deprecated, as are any related configuration
+# options. While
+# this option has an effect when using neutron, it incorrectly
+# override the value
+# provided by neutron and should therefore not be used.
+#vlan_interface = <None>
+
+# DEPRECATED:
+# This option represents the number of networks to create if not
+# explicitly
+# specified when the network is created. The only time this is used is
+# if a CIDR
+# is specified, but an explicit network_size is not. In that case, the
+# subnets
+# are created by diving the IP address space of the CIDR by
+# num_networks. The
+# resulting subnet sizes cannot be larger than the configuration
+# option
+# `network_size`; in that event, they are reduced to `network_size`,
+# and a
+# warning is logged.
+#
+# Please note that this option is only used when using nova-network
+# instead of
+# Neutron in your deployment.
+#
+# Possible values:
+#
+# * Any positive integer is technically valid, although there are
+# practical
+# limits based upon available IP address space and virtual
+# interfaces.
+#
+# Related options:
+#
+# * ``use_neutron``
+# * ``network_size``
+# (integer value)
+# Minimum value: 1
+# This option is deprecated for removal since 15.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# nova-network is deprecated, as are any related configuration
+# options.
+#num_networks = 1
+
+# DEPRECATED:
+# This option is no longer used since the /os-cloudpipe API was
+# removed in the
+# 16.0.0 Pike release. This is the public IP address for the cloudpipe
+# VPN
+# servers. It defaults to the IP address of the host.
+#
+# Please note that this option is only used when using nova-network
+# instead of
+# Neutron in your deployment. It also will be ignored if the
+# configuration option
+# for `network_manager` is not set to the default of
+# 'nova.network.manager.VlanManager'.
+#
+# Possible values:
+#
+# * Any valid IP address. The default is ``$my_ip``, the IP address of
+# the VM.
+#
+# Related options:
+#
+# * ``network_manager``
+# * ``use_neutron``
+# * ``vpn_start``
+# (string value)
+# This option is deprecated for removal since 15.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# nova-network is deprecated, as are any related configuration
+# options.
+#vpn_ip = $my_ip
+
+# DEPRECATED:
+# This is the port number to use as the first VPN port for private
+# networks.
+#
+# Please note that this option is only used when using nova-network
+# instead of
+# Neutron in your deployment. It also will be ignored if the
+# configuration option
+# for `network_manager` is not set to the default of
+# 'nova.network.manager.VlanManager', or if you specify a value the
+# 'vpn_start'
+# parameter when creating a network.
+#
+# Possible values:
+#
+# * Any integer representing a valid port number. The default is 1000.
+#
+# Related options:
+#
+# * ``use_neutron``
+# * ``vpn_ip``
+# * ``network_manager``
+# (port value)
+# Minimum value: 0
+# Maximum value: 65535
+# This option is deprecated for removal since 15.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# nova-network is deprecated, as are any related configuration
+# options.
+#vpn_start = 1000
+
+# DEPRECATED:
+# This option determines the number of addresses in each private
+# subnet.
+#
+# Please note that this option is only used when using nova-network
+# instead of
+# Neutron in your deployment.
+#
+# Possible values:
+#
+# * Any positive integer that is less than or equal to the available
+# network
+# size. Note that if you are creating multiple networks, they must
+# all fit in
+# the available IP address space. The default is 256.
+#
+# Related options:
+#
+# * ``use_neutron``
+# * ``num_networks``
+# (integer value)
+# Minimum value: 1
+# This option is deprecated for removal since 15.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# nova-network is deprecated, as are any related configuration
+# options.
+#network_size = 256
+
+# DEPRECATED:
+# This option determines the fixed IPv6 address block when creating a
+# network.
+#
+# Please note that this option is only used when using nova-network
+# instead of
+# Neutron in your deployment.
+#
+# Possible values:
+#
+# * Any valid IPv6 CIDR
+#
+# Related options:
+#
+# * ``use_neutron``
+# (string value)
+# This option is deprecated for removal since 15.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# nova-network is deprecated, as are any related configuration
+# options.
+#fixed_range_v6 = fd00::/48
+
+# DEPRECATED:
+# This is the default IPv4 gateway. It is used only in the testing
+# suite.
+#
+# Please note that this option is only used when using nova-network
+# instead of
+# Neutron in your deployment.
+#
+# Possible values:
+#
+# * Any valid IP address.
+#
+# Related options:
+#
+# * ``use_neutron``
+# * ``gateway_v6``
+# (string value)
+# This option is deprecated for removal since 15.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# nova-network is deprecated, as are any related configuration
+# options.
+#gateway = <None>
+
+# DEPRECATED:
+# This is the default IPv6 gateway. It is used only in the testing
+# suite.
+#
+# Please note that this option is only used when using nova-network
+# instead of
+# Neutron in your deployment.
+#
+# Possible values:
+#
+# * Any valid IP address.
+#
+# Related options:
+#
+# * ``use_neutron``
+# * ``gateway``
+# (string value)
+# This option is deprecated for removal since 15.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# nova-network is deprecated, as are any related configuration
+# options.
+#gateway_v6 = <None>
+
+# DEPRECATED:
+# This option represents the number of IP addresses to reserve at the
+# top of the
+# address range for VPN clients. It also will be ignored if the
+# configuration
+# option for `network_manager` is not set to the default of
+# 'nova.network.manager.VlanManager'.
+#
+# Possible values:
+#
+# * Any integer, 0 or greater.
+#
+# Related options:
+#
+# * ``use_neutron``
+# * ``network_manager``
+# (integer value)
+# Minimum value: 0
+# This option is deprecated for removal since 15.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# nova-network is deprecated, as are any related configuration
+# options.
+#cnt_vpn_clients = 0
+
+# DEPRECATED:
+# This is the number of seconds to wait before disassociating a
+# deallocated fixed
+# IP address. This is only used with the nova-network service, and has
+# no effect
+# when using neutron for networking.
+#
+# Possible values:
+#
+# * Any integer, zero or greater.
+#
+# Related options:
+#
+# * ``use_neutron``
+# (integer value)
+# Minimum value: 0
+# This option is deprecated for removal since 15.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# nova-network is deprecated, as are any related configuration
+# options.
+#fixed_ip_disassociate_timeout = 600
+
+# DEPRECATED:
+# This option determines how many times nova-network will attempt to
+# create a
+# unique MAC address before giving up and raising a
+# `VirtualInterfaceMacAddressException` error.
+#
+# Possible values:
+#
+# * Any positive integer. The default is 5.
+#
+# Related options:
+#
+# * ``use_neutron``
+# (integer value)
+# Minimum value: 1
+# This option is deprecated for removal since 15.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# nova-network is deprecated, as are any related configuration
+# options.
+#create_unique_mac_address_attempts = 5
+
+# DEPRECATED:
+# Determines whether unused gateway devices, both VLAN and bridge, are
+# deleted if
+# the network is in nova-network VLAN mode and is multi-hosted.
+#
+# Related options:
+#
+# * ``use_neutron``
+# * ``vpn_ip``
+# * ``fake_network``
+# (boolean value)
+# This option is deprecated for removal since 15.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# nova-network is deprecated, as are any related configuration
+# options.
+#teardown_unused_network_gateway = false
+
+# DEPRECATED:
+# When this option is True, a call is made to release the DHCP for the
+# instance
+# when that instance is terminated.
+#
+# Related options:
+#
+# * ``use_neutron``
+# (boolean value)
+# This option is deprecated for removal since 15.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# nova-network is deprecated, as are any related configuration
+# options.
+force_dhcp_release = {{ controller.get('force_dhcp_release', 'true') }}
+
+# DEPRECATED:
+# When this option is True, whenever a DNS entry must be updated, a
+# fanout cast
+# message is sent to all network hosts to update their DNS entries in
+# multi-host
+# mode.
+#
+# Related options:
+#
+# * ``use_neutron``
+# (boolean value)
+# This option is deprecated for removal since 15.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# nova-network is deprecated, as are any related configuration
+# options.
+#update_dns_entries = false
+
+# DEPRECATED:
+# This option determines the time, in seconds, to wait between
+# refreshing DNS
+# entries for the network.
+#
+# Possible values:
+#
+# * A positive integer
+# * -1 to disable updates
+#
+# Related options:
+#
+# * ``use_neutron``
+# (integer value)
+# Minimum value: -1
+# This option is deprecated for removal since 15.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# nova-network is deprecated, as are any related configuration
+# options.
+#dns_update_periodic_interval = -1
+
+# DEPRECATED:
+# This option allows you to specify the domain for the DHCP server.
+#
+# Possible values:
+#
+# * Any string that is a valid domain name.
+#
+# Related options:
+#
+# * ``use_neutron``
+# (string value)
+# This option is deprecated for removal since 15.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# nova-network is deprecated, as are any related configuration
+# options.
+#dhcp_domain = novalocal
+dhcp_domain={{ controller.get('dhcp_domain', 'novalocal') }}
+
+# DEPRECATED:
+# This option allows you to specify the L3 management library to be
+# used.
+#
+# Possible values:
+#
+# * Any dot-separated string that represents the import path to an L3
+# networking
+# library.
+#
+# Related options:
+#
+# * ``use_neutron``
+# (string value)
+# This option is deprecated for removal since 15.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# nova-network is deprecated, as are any related configuration
+# options.
+#l3_lib = nova.network.l3.LinuxNetL3
+
+# DEPRECATED:
+# THIS VALUE SHOULD BE SET WHEN CREATING THE NETWORK.
+#
+# If True in multi_host mode, all compute hosts share the same dhcp
+# address. The
+# same IP address used for DHCP will be added on each nova-network
+# node which is
+# only visible to the VMs on the same host.
+#
+# The use of this configuration has been deprecated and may be removed
+# in any
+# release after Mitaka. It is recommended that instead of relying on
+# this option,
+# an explicit value should be passed to 'create_networks()' as a
+# keyword argument
+# with the name 'share_address'.
+# (boolean value)
+# This option is deprecated for removal since 2014.2.
+# Its value may be silently ignored in the future.
+#share_dhcp_address = false
+
+# DEPRECATED:
+# URL for LDAP server which will store DNS entries
+#
+# Possible values:
+#
+# * A valid LDAP URL representing the server
+# (uri value)
+# This option is deprecated for removal since 16.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# nova-network is deprecated, as are any related configuration
+# options.
+#ldap_dns_url = ldap://ldap.example.com:389
+
+# DEPRECATED: Bind user for LDAP server (string value)
+# This option is deprecated for removal since 16.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# nova-network is deprecated, as are any related configuration
+# options.
+#ldap_dns_user = uid=admin,ou=people,dc=example,dc=org
+
+# DEPRECATED: Bind user's password for LDAP server (string value)
+# This option is deprecated for removal since 16.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# nova-network is deprecated, as are any related configuration
+# options.
+#ldap_dns_password = password
+
+# DEPRECATED:
+# Hostmaster for LDAP DNS driver Statement of Authority
+#
+# Possible values:
+#
+# * Any valid string representing LDAP DNS hostmaster.
+# (string value)
+# This option is deprecated for removal since 16.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# nova-network is deprecated, as are any related configuration
+# options.
+#ldap_dns_soa_hostmaster = hostmaster@example.org
+
+# DEPRECATED:
+# DNS Servers for LDAP DNS driver
+#
+# Possible values:
+#
+# * A valid URL representing a DNS server
+# (multi valued)
+# This option is deprecated for removal since 16.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# nova-network is deprecated, as are any related configuration
+# options.
+#ldap_dns_servers = dns.example.org
+
+# DEPRECATED:
+# Base distinguished name for the LDAP search query
+#
+# This option helps to decide where to look up the host in LDAP.
+# (string value)
+# This option is deprecated for removal since 16.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# nova-network is deprecated, as are any related configuration
+# options.
+#ldap_dns_base_dn = ou=hosts,dc=example,dc=org
+
+# DEPRECATED:
+# Refresh interval (in seconds) for LDAP DNS driver Start of Authority
+#
+# Time interval, a secondary/slave DNS server waits before requesting
+# for
+# primary DNS server's current SOA record. If the records are
+# different,
+# secondary DNS server will request a zone transfer from primary.
+#
+# NOTE: Lower values would cause more traffic.
+# (integer value)
+# This option is deprecated for removal since 16.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# nova-network is deprecated, as are any related configuration
+# options.
+#ldap_dns_soa_refresh = 1800
+
+# DEPRECATED:
+# Retry interval (in seconds) for LDAP DNS driver Start of Authority
+#
+# Time interval, a secondary/slave DNS server should wait, if an
+# attempt to transfer zone failed during the previous refresh
+# interval.
+# (integer value)
+# This option is deprecated for removal since 16.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# nova-network is deprecated, as are any related configuration
+# options.
+#ldap_dns_soa_retry = 3600
+
+# DEPRECATED:
+# Expiry interval (in seconds) for LDAP DNS driver Start of Authority
+#
+# Time interval, a secondary/slave DNS server holds the information
+# before it is no longer considered authoritative.
+# (integer value)
+# This option is deprecated for removal since 16.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# nova-network is deprecated, as are any related configuration
+# options.
+#ldap_dns_soa_expiry = 86400
+
+# DEPRECATED:
+# Minimum interval (in seconds) for LDAP DNS driver Start of Authority
+#
+# It is Minimum time-to-live applies for all resource records in the
+# zone file. This value is supplied to other servers how long they
+# should keep the data in cache.
+# (integer value)
+# This option is deprecated for removal since 16.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# nova-network is deprecated, as are any related configuration
+# options.
+#ldap_dns_soa_minimum = 7200
+
+# DEPRECATED:
+# Default value for multi_host in networks.
+#
+# nova-network service can operate in a multi-host or single-host
+# mode.
+# In multi-host mode each compute node runs a copy of nova-network and
+# the
+# instances on that compute node use the compute node as a gateway to
+# the
+# Internet. Where as in single-host mode, a central server runs the
+# nova-network
+# service. All compute nodes forward traffic from the instances to the
+# cloud controller which then forwards traffic to the Internet.
+#
+# If this options is set to true, some rpc network calls will be sent
+# directly
+# to host.
+#
+# Note that this option is only used when using nova-network instead
+# of
+# Neutron in your deployment.
+#
+# Related options:
+#
+# * ``use_neutron``
+# (boolean value)
+# This option is deprecated for removal since 15.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# nova-network is deprecated, as are any related configuration
+# options.
+#multi_host = false
+
+# DEPRECATED:
+# Driver to use for network creation.
+#
+# Network driver initializes (creates bridges and so on) only when the
+# first VM lands on a host node. All network managers configure the
+# network using network drivers. The driver is not tied to any
+# particular
+# network manager.
+#
+# The default Linux driver implements vlans, bridges, and iptables
+# rules
+# using linux utilities.
+#
+# Note that this option is only used when using nova-network instead
+# of Neutron in your deployment.
+#
+# Related options:
+#
+# * ``use_neutron``
+# (string value)
+# This option is deprecated for removal since 15.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# nova-network is deprecated, as are any related configuration
+# options.
+#network_driver = nova.network.linux_net
+
+# DEPRECATED:
+# Firewall driver to use with ``nova-network`` service.
+#
+# This option only applies when using the ``nova-network`` service.
+# When using
+# another networking services, such as Neutron, this should be to set
+# to the
+# ``nova.virt.firewall.NoopFirewallDriver``.
+#
+# Possible values:
+#
+# * ``nova.virt.firewall.IptablesFirewallDriver``
+# * ``nova.virt.firewall.NoopFirewallDriver``
+# * ``nova.virt.libvirt.firewall.IptablesFirewallDriver``
+# * [...]
+#
+# Related options:
+#
+# * ``use_neutron``: This must be set to ``False`` to enable ``nova-
+# network``
+# networking
+# (string value)
+# This option is deprecated for removal since 16.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# nova-network is deprecated, as are any related configuration
+# options.
+firewall_driver = nova.virt.firewall.NoopFirewallDriver
+
+# DEPRECATED:
+# Determine whether to allow network traffic from same network.
+#
+# When set to true, hosts on the same subnet are not filtered and are
+# allowed
+# to pass all types of traffic between them. On a flat network, this
+# allows
+# all instances from all projects unfiltered communication. With VLAN
+# networking, this allows access between instances within the same
+# project.
+#
+# This option only applies when using the ``nova-network`` service.
+# When using
+# another networking services, such as Neutron, security groups or
+# other
+# approaches should be used.
+#
+# Possible values:
+#
+# * True: Network traffic should be allowed pass between all instances
+# on the
+# same network, regardless of their tenant and security policies
+# * False: Network traffic should not be allowed pass between
+# instances unless
+# it is unblocked in a security group
+#
+# Related options:
+#
+# * ``use_neutron``: This must be set to ``False`` to enable ``nova-
+# network``
+# networking
+# * ``firewall_driver``: This must be set to
+# ``nova.virt.libvirt.firewall.IptablesFirewallDriver`` to ensure
+# the
+# libvirt firewall driver is enabled.
+# (boolean value)
+# This option is deprecated for removal since 16.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# nova-network is deprecated, as are any related configuration
+# options.
+#allow_same_net_traffic = true
+
+# DEPRECATED:
+# Default pool for floating IPs.
+#
+# This option specifies the default floating IP pool for allocating
+# floating IPs.
+#
+# While allocating a floating ip, users can optionally pass in the
+# name of the
+# pool they want to allocate from, otherwise it will be pulled from
+# the
+# default pool.
+#
+# If this option is not set, then 'nova' is used as default floating
+# pool.
+#
+# Possible values:
+#
+# * Any string representing a floating IP pool name
+# (string value)
+# This option is deprecated for removal since 16.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# This option was used for two purposes: to set the floating IP pool
+# name for
+# nova-network and to do the same for neutron. nova-network is
+# deprecated, as are
+# any related configuration options. Users of neutron, meanwhile,
+# should use the
+# 'default_floating_pool' option in the '[neutron]' group.
+#default_floating_pool = nova
+
+# DEPRECATED:
+# Autoassigning floating IP to VM
+#
+# When set to True, floating IP is auto allocated and associated
+# to the VM upon creation.
+#
+# Related options:
+#
+# * use_neutron: this options only works with nova-network.
+# (boolean value)
+# This option is deprecated for removal since 15.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# nova-network is deprecated, as are any related configuration
+# options.
+#auto_assign_floating_ip = false
+
+# DEPRECATED:
+# Full class name for the DNS Manager for floating IPs.
+#
+# This option specifies the class of the driver that provides
+# functionality
+# to manage DNS entries associated with floating IPs.
+#
+# When a user adds a DNS entry for a specified domain to a floating
+# IP,
+# nova will add a DNS entry using the specified floating DNS driver.
+# When a floating IP is deallocated, its DNS entry will automatically
+# be deleted.
+#
+# Possible values:
+#
+# * Full Python path to the class to be used
+#
+# Related options:
+#
+# * use_neutron: this options only works with nova-network.
+# (string value)
+# This option is deprecated for removal since 15.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# nova-network is deprecated, as are any related configuration
+# options.
+#floating_ip_dns_manager = nova.network.noop_dns_driver.NoopDNSDriver
+
+# DEPRECATED:
+# Full class name for the DNS Manager for instance IPs.
+#
+# This option specifies the class of the driver that provides
+# functionality
+# to manage DNS entries for instances.
+#
+# On instance creation, nova will add DNS entries for the instance
+# name and
+# id, using the specified instance DNS driver and domain. On instance
+# deletion,
+# nova will remove the DNS entries.
+#
+# Possible values:
+#
+# * Full Python path to the class to be used
+#
+# Related options:
+#
+# * use_neutron: this options only works with nova-network.
+# (string value)
+# This option is deprecated for removal since 15.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# nova-network is deprecated, as are any related configuration
+# options.
+#instance_dns_manager = nova.network.noop_dns_driver.NoopDNSDriver
+
+# DEPRECATED:
+# If specified, Nova checks if the availability_zone of every instance
+# matches
+# what the database says the availability_zone should be for the
+# specified
+# dns_domain.
+#
+# Related options:
+#
+# * use_neutron: this options only works with nova-network.
+# (string value)
+# This option is deprecated for removal since 15.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# nova-network is deprecated, as are any related configuration
+# options.
+#instance_dns_domain =
+
+# DEPRECATED:
+# Assign IPv6 and IPv4 addresses when creating instances.
+#
+# Related options:
+#
+# * use_neutron: this only works with nova-network.
+# (boolean value)
+# This option is deprecated for removal since 16.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# nova-network is deprecated, as are any related configuration
+# options.
+#use_ipv6 = false
+
+# DEPRECATED:
+# Abstracts out IPv6 address generation to pluggable backends.
+#
+# nova-network can be put into dual-stack mode, so that it uses
+# both IPv4 and IPv6 addresses. In dual-stack mode, by default,
+# instances
+# acquire IPv6 global unicast addresses with the help of stateless
+# address
+# auto-configuration mechanism.
+#
+# Related options:
+#
+# * use_neutron: this option only works with nova-network.
+# * use_ipv6: this option only works if ipv6 is enabled for nova-
+# network.
+# (string value)
+# Possible values:
+# rfc2462 - <No description provided>
+# account_identifier - <No description provided>
+# This option is deprecated for removal since 16.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# nova-network is deprecated, as are any related configuration
+# options.
+#ipv6_backend = rfc2462
+
+# DEPRECATED:
+# This option is used to enable or disable quota checking for tenant
+# networks.
+#
+# Related options:
+#
+# * quota_networks
+# (boolean value)
+# This option is deprecated for removal since 14.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# CRUD operations on tenant networks are only available when using
+# nova-network
+# and nova-network is itself deprecated.
+#enable_network_quota = false
+
+# DEPRECATED:
+# This option controls the number of private networks that can be
+# created per
+# project (or per tenant).
+#
+# Related options:
+#
+# * enable_network_quota
+# (integer value)
+# Minimum value: 0
+# This option is deprecated for removal since 14.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# CRUD operations on tenant networks are only available when using
+# nova-network
+# and nova-network is itself deprecated.
+#quota_networks = 3
+
+#
+# Filename that will be used for storing websocket frames received
+# and sent by a proxy service (like VNC, spice, serial) running on
+# this host.
+# If this is not set, no recording will be done.
+# (string value)
+#record = <None>
+
+# Run as a background process. (boolean value)
+#daemon = false
+
+# Disallow non-encrypted connections. (boolean value)
+#ssl_only = false
+
+# Set to True if source host is addressed with IPv6. (boolean value)
+#source_is_ipv6 = false
+
+# Path to SSL certificate file. (string value)
+#cert = self.pem
+
+# SSL key file (if separate from cert). (string value)
+#key = <None>
+
+#
+# Path to directory with content which will be served by a web server.
+# (string value)
+#web = /usr/share/spice-html5
+
+#
+# The directory where the Nova python modules are installed.
+#
+# This directory is used to store template files for networking and
+# remote
+# console access. It is also the default path for other config options
+# which
+# need to persist Nova internal data. It is very unlikely that you
+# need to
+# change this option from its default value.
+#
+# Possible values:
+#
+# * The full path to a directory.
+#
+# Related options:
+#
+# * ``state_path``
+# (string value)
+#pybasedir = /usr/lib/python2.7/dist-packages
+
+#
+# The directory where the Nova binaries are installed.
+#
+# This option is only relevant if the networking capabilities from
+# Nova are
+# used (see services below). Nova's networking capabilities are
+# targeted to
+# be fully replaced by Neutron in the future. It is very unlikely that
+# you need
+# to change this option from its default value.
+#
+# Possible values:
+#
+# * The full path to a directory.
+# (string value)
+#bindir = /usr/local/bin
+
+#
+# The top-level directory for maintaining Nova's state.
+#
+# This directory is used to store Nova's internal state. It is used by
+# a
+# variety of other config options which derive from this. In some
+# scenarios
+# (for example migrations) it makes sense to use a storage location
+# which is
+# shared between multiple compute hosts (for example via NFS). Unless
+# the
+# option ``instances_path`` gets overwritten, this directory can grow
+# very
+# large.
+#
+# Possible values:
+#
+# * The full path to a directory. Defaults to value provided in
+# ``pybasedir``.
+# (string value)
+state_path = /var/lib/nova
+
+#
+# Number of seconds indicating how frequently the state of services on
+# a
+# given hypervisor is reported. Nova needs to know this to determine
+# the
+# overall health of the deployment.
+#
+# Related Options:
+#
+# * service_down_time
+# report_interval should be less than service_down_time. If
+# service_down_time
+# is less than report_interval, services will routinely be
+# considered down,
+# because they report in too rarely.
+# (integer value)
+#report_interval = 10
+report_interval = {{ controller.get('report_interval', '60') }}
+
+#
+# Maximum time in seconds since last check-in for up service
+#
+# Each compute node periodically updates their database status based
+# on the
+# specified report interval. If the compute node hasn't updated the
+# status
+# for more than service_down_time, then the compute node is considered
+# down.
+#
+# Related Options:
+#
+# * report_interval (service_down_time should not be less than
+# report_interval)
+# (integer value)
+service_down_time = {{ controller.service_down_time|default('180') }}
+
+#
+# Enable periodic tasks.
+#
+# If set to true, this option allows services to periodically run
+# tasks
+# on the manager.
+#
+# In case of running multiple schedulers or conductors you may want to
+# run
+# periodic tasks on only one host - in this case disable this option
+# for all
+# hosts but one.
+# (boolean value)
+#periodic_enable = true
+
+#
+# Number of seconds to randomly delay when starting the periodic task
+# scheduler to reduce stampeding.
+#
+# When compute workers are restarted in unison across a cluster,
+# they all end up running the periodic tasks at the same time
+# causing problems for the external services. To mitigate this
+# behavior, periodic_fuzzy_delay option allows you to introduce a
+# random initial delay when starting the periodic task scheduler.
+#
+# Possible Values:
+#
+# * Any positive integer (in seconds)
+# * 0 : disable the random delay
+# (integer value)
+# Minimum value: 0
+#periodic_fuzzy_delay = 60
+
+# List of APIs to be enabled by default. (list value)
+enabled_apis = osapi_compute,metadata
+
+#
+# List of APIs with enabled SSL.
+#
+# Nova provides SSL support for the API servers. enabled_ssl_apis
+# option
+# allows configuring the SSL support.
+# (list value)
+#enabled_ssl_apis =
+
+#
+# IP address on which the OpenStack API will listen.
+#
+# The OpenStack API service listens on this IP address for incoming
+# requests.
+# (string value)
+#osapi_compute_listen = 0.0.0.0
+
+#
+# Port on which the OpenStack API will listen.
+#
+# The OpenStack API service listens on this port number for incoming
+# requests.
+# (port value)
+# Minimum value: 0
+# Maximum value: 65535
+#osapi_compute_listen_port = 8774
+
+#
+# Number of workers for OpenStack API service. The default will be the
+# number
+# of CPUs available.
+#
+# OpenStack API services can be configured to run as multi-process
+# (workers).
+# This overcomes the problem of reduction in throughput when API
+# request
+# concurrency increases. OpenStack API service will run in the
+# specified
+# number of processes.
+#
+# Possible Values:
+#
+# * Any positive integer
+# * None (default value)
+# (integer value)
+# Minimum value: 1
+#osapi_compute_workers = <None>
+
+#
+# IP address on which the metadata API will listen.
+#
+# The metadata API service listens on this IP address for incoming
+# requests.
+# (string value)
+#metadata_listen = 0.0.0.0
+metadata_listen={{ controller.get('metadata', {}).get('bind', {}).get('address', controller.bind.private_address) }}
+
+#
+# Port on which the metadata API will listen.
+#
+# The metadata API service listens on this port number for incoming
+# requests.
+# (port value)
+# Minimum value: 0
+# Maximum value: 65535
+#metadata_listen_port = 8775
+{%- if controller.get('metadata', {}).get('bind', {}).port is defined %}
+metadata_listen_port={{ controller.metadata.bind.port }}
+{%- else %}
+#metadata_listen_port=8775
+{%- endif %}
+
+#
+# Number of workers for metadata service. If not specified the number
+# of
+# available CPUs will be used.
+#
+# The metadata service can be configured to run as multi-process
+# (workers).
+# This overcomes the problem of reduction in throughput when API
+# request
+# concurrency increases. The metadata service will run in the
+# specified
+# number of processes.
+#
+# Possible Values:
+#
+# * Any positive integer
+# * None (default value)
+# (integer value)
+# Minimum value: 1
+#metadata_workers = <None>
+metadata_workers = {{ controller.workers }}
+
+# Full class name for the Manager for network (string value)
+# Possible values:
+# nova.network.manager.FlatManager - <No description provided>
+# nova.network.manager.FlatDHCPManager - <No description provided>
+# nova.network.manager.VlanManager - <No description provided>
+#network_manager = nova.network.manager.VlanManager
+
+#
+# This option specifies the driver to be used for the servicegroup
+# service.
+#
+# ServiceGroup API in nova enables checking status of a compute node.
+# When a
+# compute worker running the nova-compute daemon starts, it calls the
+# join API
+# to join the compute group. Services like nova scheduler can query
+# the
+# ServiceGroup API to check if a node is alive. Internally, the
+# ServiceGroup
+# client driver automatically updates the compute worker status. There
+# are
+# multiple backend implementations for this service: Database
+# ServiceGroup driver
+# and Memcache ServiceGroup driver.
+#
+# Possible Values:
+#
+# * db : Database ServiceGroup driver
+# * mc : Memcache ServiceGroup driver
+#
+# Related Options:
+#
+# * service_down_time (maximum time since last check-in for up
+# service)
+# (string value)
+# Possible values:
+# db - <No description provided>
+# mc - <No description provided>
+#servicegroup_driver = db
+
+#
+# From oslo.service.periodic_task
+#
+
+# Some periodic tasks can be run in a separate process. Should we run
+# them here? (boolean value)
+#run_external_periodic_tasks = true
+
+#
+# From oslo.service.service
+#
+
+# Enable eventlet backdoor. Acceptable values are 0, <port>, and
+# <start>:<end>, where 0 results in listening on a random tcp port
+# number; <port> results in listening on the specified port number
+# (and not enabling backdoor if that port is in use); and
+# <start>:<end> results in listening on the smallest unused port
+# number within the specified range of port numbers. The chosen port
+# is displayed in the service's log file. (string value)
+#backdoor_port = <None>
+
+# Enable eventlet backdoor, using the provided path as a unix socket
+# that can receive connections. This option is mutually exclusive with
+# 'backdoor_port' in that only one should be provided. If both are
+# provided then the existence of this option overrides the usage of
+# that option. (string value)
+#backdoor_socket = <None>
+
+# Enables or disables logging values of all registered options when
+# starting a service (at DEBUG level). (boolean value)
+#log_options = true
+
+# Specify a timeout after which a gracefully shutdown server will
+# exit. Zero value means endless wait. (integer value)
+#graceful_shutdown_timeout = 60
+
+{%- if controller.logging is defined %}
+{%- set _data = controller.logging %}
+{%- include "oslo_templates/files/queens/oslo/_log.conf" %}
+{%- endif %}
+
+{%- set _data = controller.message_queue %}
+{%- include "oslo_templates/files/queens/oslo/messaging/_default.conf" %}
+
+[api]
+#
+# Options under this group are used to define Nova API.
+
+#
+# From nova.conf
+#
+
+#
+# This determines the strategy to use for authentication: keystone or
+# noauth2.
+# 'noauth2' is designed for testing only, as it does no actual
+# credential
+# checking. 'noauth2' provides administrative credentials only if
+# 'admin' is
+# specified as the username.
+# (string value)
+# Possible values:
+# keystone - <No description provided>
+# noauth2 - <No description provided>
+auth_strategy = keystone
+
+#
+# When True, the 'X-Forwarded-For' header is treated as the canonical
+# remote
+# address. When False (the default), the 'remote_address' header is
+# used.
+#
+# You should only enable this if you have an HTML sanitizing proxy.
+# (boolean value)
+#use_forwarded_for = false
+
+#
+# When gathering the existing metadata for a config drive, the
+# EC2-style
+# metadata is returned for all versions that don't appear in this
+# option.
+# As of the Liberty release, the available versions are:
+#
+# * 1.0
+# * 2007-01-19
+# * 2007-03-01
+# * 2007-08-29
+# * 2007-10-10
+# * 2007-12-15
+# * 2008-02-01
+# * 2008-09-01
+# * 2009-04-04
+#
+# The option is in the format of a single string, with each version
+# separated
+# by a space.
+#
+# Possible values:
+#
+# * Any string that represents zero or more versions, separated by
+# spaces.
+# (string value)
+#config_drive_skip_versions = 1.0 2007-01-19 2007-03-01 2007-08-29 2007-10-10 2007-12-15 2008-02-01 2008-09-01
+
+#
+# A list of vendordata providers.
+#
+# vendordata providers are how deployers can provide metadata via
+# configdrive
+# and metadata that is specific to their deployment. There are
+# currently two
+# supported providers: StaticJSON and DynamicJSON.
+#
+# StaticJSON reads a JSON file configured by the flag
+# vendordata_jsonfile_path
+# and places the JSON from that file into vendor_data.json and
+# vendor_data2.json.
+#
+# DynamicJSON is configured via the vendordata_dynamic_targets flag,
+# which is
+# documented separately. For each of the endpoints specified in that
+# flag, a
+# section is added to the vendor_data2.json.
+#
+# For more information on the requirements for implementing a
+# vendordata
+# dynamic endpoint, please see the vendordata.rst file in the nova
+# developer
+# reference.
+#
+# Possible values:
+#
+# * A list of vendordata providers, with StaticJSON and DynamicJSON
+# being
+# current options.
+#
+# Related options:
+#
+# * vendordata_dynamic_targets
+# * vendordata_dynamic_ssl_certfile
+# * vendordata_dynamic_connect_timeout
+# * vendordata_dynamic_read_timeout
+# * vendordata_dynamic_failure_fatal
+# (list value)
+#vendordata_providers = StaticJSON
+
+#
+# A list of targets for the dynamic vendordata provider. These targets
+# are of
+# the form <name>@<url>.
+#
+# The dynamic vendordata provider collects metadata by contacting
+# external REST
+# services and querying them for information about the instance. This
+# behaviour
+# is documented in the vendordata.rst file in the nova developer
+# reference.
+# (list value)
+#vendordata_dynamic_targets =
+
+#
+# Path to an optional certificate file or CA bundle to verify dynamic
+# vendordata REST services ssl certificates against.
+#
+# Possible values:
+#
+# * An empty string, or a path to a valid certificate file
+#
+# Related options:
+#
+# * vendordata_providers
+# * vendordata_dynamic_targets
+# * vendordata_dynamic_connect_timeout
+# * vendordata_dynamic_read_timeout
+# * vendordata_dynamic_failure_fatal
+# (string value)
+#vendordata_dynamic_ssl_certfile =
+
+#
+# Maximum wait time for an external REST service to connect.
+#
+# Possible values:
+#
+# * Any integer with a value greater than three (the TCP packet
+# retransmission
+# timeout). Note that instance start may be blocked during this wait
+# time,
+# so this value should be kept small.
+#
+# Related options:
+#
+# * vendordata_providers
+# * vendordata_dynamic_targets
+# * vendordata_dynamic_ssl_certfile
+# * vendordata_dynamic_read_timeout
+# * vendordata_dynamic_failure_fatal
+# (integer value)
+# Minimum value: 3
+#vendordata_dynamic_connect_timeout = 5
+
+#
+# Maximum wait time for an external REST service to return data once
+# connected.
+#
+# Possible values:
+#
+# * Any integer. Note that instance start is blocked during this wait
+# time,
+# so this value should be kept small.
+#
+# Related options:
+#
+# * vendordata_providers
+# * vendordata_dynamic_targets
+# * vendordata_dynamic_ssl_certfile
+# * vendordata_dynamic_connect_timeout
+# * vendordata_dynamic_failure_fatal
+# (integer value)
+# Minimum value: 0
+#vendordata_dynamic_read_timeout = 5
+
+#
+# Should failures to fetch dynamic vendordata be fatal to instance
+# boot?
+#
+# Related options:
+#
+# * vendordata_providers
+# * vendordata_dynamic_targets
+# * vendordata_dynamic_ssl_certfile
+# * vendordata_dynamic_connect_timeout
+# * vendordata_dynamic_read_timeout
+# (boolean value)
+#vendordata_dynamic_failure_fatal = false
+
+#
+# This option is the time (in seconds) to cache metadata. When set to
+# 0,
+# metadata caching is disabled entirely; this is generally not
+# recommended for
+# performance reasons. Increasing this setting should improve response
+# times
+# of the metadata API when under heavy load. Higher values may
+# increase memory
+# usage, and result in longer times for host metadata changes to take
+# effect.
+# (integer value)
+# Minimum value: 0
+#metadata_cache_expiration = 15
+
+#
+# Cloud providers may store custom data in vendor data file that will
+# then be
+# available to the instances via the metadata service, and to the
+# rendering of
+# config-drive. The default class for this, JsonFileVendorData, loads
+# this
+# information from a JSON file, whose path is configured by this
+# option. If
+# there is no path set by this option, the class returns an empty
+# dictionary.
+#
+# Possible values:
+#
+# * Any string representing the path to the data file, or an empty
+# string
+# (default).
+# (string value)
+#vendordata_jsonfile_path = <None>
+
+#
+# As a query can potentially return many thousands of items, you can
+# limit the
+# maximum number of items in a single response by setting this option.
+# (integer value)
+# Minimum value: 0
+# Deprecated group/name - [DEFAULT]/osapi_max_limit
+#max_limit = 1000
+max_limit={{ controller.osapi_max_limit|default('1000') }}
+
+#
+# This string is prepended to the normal URL that is returned in links
+# to the
+# OpenStack Compute API. If it is empty (the default), the URLs are
+# returned
+# unchanged.
+#
+# Possible values:
+#
+# * Any string, including an empty string (the default).
+# (string value)
+# Deprecated group/name - [DEFAULT]/osapi_compute_link_prefix
+#compute_link_prefix = <None>
+
+#
+# This string is prepended to the normal URL that is returned in links
+# to
+# Glance resources. If it is empty (the default), the URLs are
+# returned
+# unchanged.
+#
+# Possible values:
+#
+# * Any string, including an empty string (the default).
+# (string value)
+# Deprecated group/name - [DEFAULT]/osapi_glance_link_prefix
+#glance_link_prefix = <None>
+
+# DEPRECATED:
+# Operators can turn off the ability for a user to take snapshots of
+# their
+# instances by setting this option to False. When disabled, any
+# attempt to
+# take a snapshot will result in a HTTP 400 response ("Bad Request").
+# (boolean value)
+# This option is deprecated for removal since 16.0.0.
+# Its value may be silently ignored in the future.
+# Reason: This option disables the createImage server action API in a
+# non-discoverable way and is thus a barrier to interoperability.
+# Also, it is not used for other APIs that create snapshots like
+# shelve or createBackup. Disabling snapshots should be done via
+# policy if so desired.
+#allow_instance_snapshots = true
+
+# DEPRECATED:
+# This option is a list of all instance states for which network
+# address
+# information should not be returned from the API.
+#
+# Possible values:
+#
+# A list of strings, where each string is a valid VM state, as
+# defined in
+# nova/compute/vm_states.py. As of the Newton release, they are:
+#
+# * "active"
+# * "building"
+# * "paused"
+# * "suspended"
+# * "stopped"
+# * "rescued"
+# * "resized"
+# * "soft-delete"
+# * "deleted"
+# * "error"
+# * "shelved"
+# * "shelved_offloaded"
+# (list value)
+# Deprecated group/name - [DEFAULT]/osapi_hide_server_address_states
+# This option is deprecated for removal since 17.0.0.
+# Its value may be silently ignored in the future.
+# Reason: This option hide the server address in server representation
+# for configured server states. Which makes GET server API controlled
+# by this config options. Due to this config options, user would not
+# be able to discover the API behavior on different clouds which leads
+# to the interop issue.
+#hide_server_address_states = building
+
+# The full path to the fping binary. (string value)
+fping_path = /usr/sbin/fping
+
+#
+# When True, the TenantNetworkController will query the Neutron API to
+# get the
+# default networks to use.
+#
+# Related options:
+#
+# * neutron_default_tenant_id
+# (boolean value)
+#use_neutron_default_nets = false
+
+#
+# Tenant ID for getting the default network from Neutron API (also
+# referred in
+# some places as the 'project ID') to use.
+#
+# Related options:
+#
+# * use_neutron_default_nets
+# (string value)
+#neutron_default_tenant_id = default
+
+#
+# Enables returning of the instance password by the relevant server
+# API calls
+# such as create, rebuild, evacuate, or rescue. If the hypervisor does
+# not
+# support password injection, then the password returned will not be
+# correct,
+# so if your hypervisor does not support password injection, set this
+# to False.
+# (boolean value)
+#enable_instance_password = true
+
+
+[api_database]
+#
+# The *Nova API Database* is a separate database which is used for
+# information
+# which is used across *cells*. This database is mandatory since the
+# Mitaka
+# release (13.0.0).
+
+#
+# From nova.conf
+#
+
+# The SQLAlchemy connection string to use to connect to the database.
+# (string value)
+#connection = <None>
+connection=sqlite:////var/lib/nova/nova.sqlite
+
+# If True, SQLite uses synchronous mode. (boolean value)
+#sqlite_synchronous = true
+
+# The SQLAlchemy connection string to use to connect to the slave
+# database. (string value)
+#slave_connection = <None>
+
+# The SQL mode to be used for MySQL sessions. This option, including
+# the default, overrides any server-set SQL mode. To use whatever SQL
+# mode is set by the server configuration, set this to no value.
+# Example: mysql_sql_mode= (string value)
+#mysql_sql_mode = TRADITIONAL
+
+# Connections which have been present in the connection pool longer
+# than this number of seconds will be replaced with a new one the next
+# time they are checked out from the pool. (integer value)
+# Deprecated group/name - [api_database]/idle_timeout
+#connection_recycle_time = 3600
+
+# Maximum number of SQL connections to keep open in a pool. Setting a
+# value of 0 indicates no limit. (integer value)
+#max_pool_size = <None>
+
+# Maximum number of database connection retries during startup. Set to
+# -1 to specify an infinite retry count. (integer value)
+#max_retries = 10
+
+# Interval between retries of opening a SQL connection. (integer
+# value)
+#retry_interval = 10
+
+# If set, use this value for max_overflow with SQLAlchemy. (integer
+# value)
+#max_overflow = <None>
+
+# Verbosity of SQL debugging information: 0=None, 100=Everything.
+# (integer value)
+#connection_debug = 0
+
+# Add Python stack traces to SQL as comment strings. (boolean value)
+#connection_trace = false
+
+# If set, use this value for pool_timeout with SQLAlchemy. (integer
+# value)
+#pool_timeout = <None>
+
+{%- if controller.get('barbican', {}).get('enabled', False) %}
+{%- set _data = controller.identity %}
+[barbican]
+{%- include "oslo_templates/files/queens/castellan/_barbican.conf" %}
+{%- endif %}
+
+[cache]
+
+#
+# From nova.conf
+#
+{%- if controller.cache is defined %}
+backend = oslo_cache.memcache_pool
+enabled = true
+memcache_servers={%- for member in controller.cache.members %}{{ member.host }}:11211{% if not loop.last %},{% endif %}{%- endfor %}
+{%- endif %}
+
+# Prefix for building the configuration dictionary for the cache
+# region. This should not need to be changed unless there is another
+# dogpile.cache region with the same configuration name. (string
+# value)
+#config_prefix = cache.oslo
+
+# Default TTL, in seconds, for any cached item in the dogpile.cache
+# region. This applies to any cached method that doesn't have an
+# explicit cache expiration time defined for it. (integer value)
+#expiration_time = 600
+
+# Cache backend module. For eventlet-based or environments with
+# hundreds of threaded servers, Memcache with pooling
+# (oslo_cache.memcache_pool) is recommended. For environments with
+# less than 100 threaded servers, Memcached (dogpile.cache.memcached)
+# or Redis (dogpile.cache.redis) is recommended. Test environments
+# with a single instance of the server can use the
+# dogpile.cache.memory backend. (string value)
+# Possible values:
+# oslo_cache.memcache_pool - <No description provided>
+# oslo_cache.dict - <No description provided>
+# oslo_cache.mongo - <No description provided>
+# oslo_cache.etcd3gw - <No description provided>
+# dogpile.cache.memcached - <No description provided>
+# dogpile.cache.pylibmc - <No description provided>
+# dogpile.cache.bmemcached - <No description provided>
+# dogpile.cache.dbm - <No description provided>
+# dogpile.cache.redis - <No description provided>
+# dogpile.cache.memory - <No description provided>
+# dogpile.cache.memory_pickle - <No description provided>
+# dogpile.cache.null - <No description provided>
+#backend = dogpile.cache.null
+
+# Arguments supplied to the backend module. Specify this option once
+# per argument to be passed to the dogpile.cache backend. Example
+# format: "<argname>:<value>". (multi valued)
+#backend_argument =
+
+# Proxy classes to import that will affect the way the dogpile.cache
+# backend functions. See the dogpile.cache documentation on changing-
+# backend-behavior. (list value)
+#proxies =
+
+# Global toggle for caching. (boolean value)
+#enabled = false
+
+# Extra debugging from the cache backend (cache keys,
+# get/set/delete/etc calls). This is only really useful if you need to
+# see the specific cache-backend get/set/delete calls with the
+# keys/values. Typically this should be left set to false. (boolean
+# value)
+#debug_cache_backend = false
+
+# Memcache servers in the format of "host:port".
+# (dogpile.cache.memcache and oslo_cache.memcache_pool backends only).
+# (list value)
+#memcache_servers = localhost:11211
+
+# Number of seconds memcached server is considered dead before it is
+# tried again. (dogpile.cache.memcache and oslo_cache.memcache_pool
+# backends only). (integer value)
+#memcache_dead_retry = 300
+
+# Timeout in seconds for every call to a server.
+# (dogpile.cache.memcache and oslo_cache.memcache_pool backends only).
+# (integer value)
+#memcache_socket_timeout = 3
+
+# Max total number of open connections to every memcached server.
+# (oslo_cache.memcache_pool backend only). (integer value)
+#memcache_pool_maxsize = 10
+
+# Number of seconds a connection to memcached is held unused in the
+# pool before it is closed. (oslo_cache.memcache_pool backend only).
+# (integer value)
+#memcache_pool_unused_timeout = 60
+
+# Number of seconds that an operation will wait to get a memcache
+# client connection. (integer value)
+#memcache_pool_connection_get_timeout = 10
+
+
+[cells]
+#
+# DEPRECATED: Cells options allow you to use cells v1 functionality in
+# an
+# OpenStack deployment.
+#
+# Note that the options in this group are only for cells v1
+# functionality, which
+# is considered experimental and not recommended for new deployments.
+# Cells v1
+# is being replaced with cells v2, which starting in the 15.0.0 Ocata
+# release is
+# required and all Nova deployments will be at least a cells v2 cell
+# of one.
+#
+
+#
+# From nova.conf
+#
+
+# DEPRECATED:
+# Enable cell v1 functionality.
+#
+# Note that cells v1 is considered experimental and not recommended
+# for new
+# Nova deployments. Cells v1 is being replaced by cells v2 which
+# starting in
+# the 15.0.0 Ocata release, all Nova deployments are at least a cells
+# v2 cell
+# of one. Setting this option, or any other options in the [cells]
+# group, is
+# not required for cells v2.
+#
+# When this functionality is enabled, it lets you to scale an
+# OpenStack
+# Compute cloud in a more distributed fashion without having to use
+# complicated technologies like database and message queue clustering.
+# Cells are configured as a tree. The top-level cell should have a
+# host
+# that runs a nova-api service, but no nova-compute services. Each
+# child cell should run all of the typical nova-* services in a
+# regular
+# Compute cloud except for nova-api. You can think of cells as a
+# normal
+# Compute deployment in that each cell has its own database server and
+# message queue broker.
+#
+# Related options:
+#
+# * name: A unique cell name must be given when this functionality
+# is enabled.
+# * cell_type: Cell type should be defined for all cells.
+# (boolean value)
+# This option is deprecated for removal since 16.0.0.
+# Its value may be silently ignored in the future.
+# Reason: Cells v1 is being replaced with Cells v2.
+enable = false
+
+# DEPRECATED:
+# Name of the current cell.
+#
+# This value must be unique for each cell. Name of a cell is used as
+# its id, leaving this option unset or setting the same name for
+# two or more cells may cause unexpected behaviour.
+#
+# Related options:
+#
+# * enabled: This option is meaningful only when cells service
+# is enabled
+# (string value)
+# This option is deprecated for removal since 16.0.0.
+# Its value may be silently ignored in the future.
+# Reason: Cells v1 is being replaced with Cells v2.
+#name = nova
+
+# DEPRECATED:
+# Cell capabilities.
+#
+# List of arbitrary key=value pairs defining capabilities of the
+# current cell to be sent to the parent cells. These capabilities
+# are intended to be used in cells scheduler filters/weighers.
+#
+# Possible values:
+#
+# * key=value pairs list for example;
+# ``hypervisor=xenserver;kvm,os=linux;windows``
+# (list value)
+# This option is deprecated for removal since 16.0.0.
+# Its value may be silently ignored in the future.
+# Reason: Cells v1 is being replaced with Cells v2.
+#capabilities = hypervisor=xenserver;kvm,os=linux;windows
+
+# DEPRECATED:
+# Call timeout.
+#
+# Cell messaging module waits for response(s) to be put into the
+# eventlet queue. This option defines the seconds waited for
+# response from a call to a cell.
+#
+# Possible values:
+#
+# * An integer, corresponding to the interval time in seconds.
+# (integer value)
+# Minimum value: 0
+# This option is deprecated for removal since 16.0.0.
+# Its value may be silently ignored in the future.
+# Reason: Cells v1 is being replaced with Cells v2.
+#call_timeout = 60
+
+# DEPRECATED:
+# Reserve percentage
+#
+# Percentage of cell capacity to hold in reserve, so the minimum
+# amount of free resource is considered to be;
+#
+# min_free = total * (reserve_percent / 100.0)
+#
+# This option affects both memory and disk utilization.
+#
+# The primary purpose of this reserve is to ensure some space is
+# available for users who want to resize their instance to be larger.
+# Note that currently once the capacity expands into this reserve
+# space this option is ignored.
+#
+# Possible values:
+#
+# * An integer or float, corresponding to the percentage of cell
+# capacity to
+# be held in reserve.
+# (floating point value)
+# This option is deprecated for removal since 16.0.0.
+# Its value may be silently ignored in the future.
+# Reason: Cells v1 is being replaced with Cells v2.
+#reserve_percent = 10.0
+
+# DEPRECATED:
+# Type of cell.
+#
+# When cells feature is enabled the hosts in the OpenStack Compute
+# cloud are partitioned into groups. Cells are configured as a tree.
+# The top-level cell's cell_type must be set to ``api``. All other
+# cells are defined as a ``compute cell`` by default.
+#
+# Related option:
+#
+# * quota_driver: Disable quota checking for the child cells.
+# (nova.quota.NoopQuotaDriver)
+# (string value)
+# Possible values:
+# api - <No description provided>
+# compute - <No description provided>
+# This option is deprecated for removal since 16.0.0.
+# Its value may be silently ignored in the future.
+# Reason: Cells v1 is being replaced with Cells v2.
+#cell_type = compute
+
+# DEPRECATED:
+# Mute child interval.
+#
+# Number of seconds after which a lack of capability and capacity
+# update the child cell is to be treated as a mute cell. Then the
+# child cell will be weighed as recommend highly that it be skipped.
+#
+# Possible values:
+#
+# * An integer, corresponding to the interval time in seconds.
+# (integer value)
+# This option is deprecated for removal since 16.0.0.
+# Its value may be silently ignored in the future.
+# Reason: Cells v1 is being replaced with Cells v2.
+#mute_child_interval = 300
+
+# DEPRECATED:
+# Bandwidth update interval.
+#
+# Seconds between bandwidth usage cache updates for cells.
+#
+# Possible values:
+#
+# * An integer, corresponding to the interval time in seconds.
+# (integer value)
+# This option is deprecated for removal since 16.0.0.
+# Its value may be silently ignored in the future.
+# Reason: Cells v1 is being replaced with Cells v2.
+#bandwidth_update_interval = 600
+
+# DEPRECATED:
+# Instance update sync database limit.
+#
+# Number of instances to pull from the database at one time for
+# a sync. If there are more instances to update the results will
+# be paged through.
+#
+# Possible values:
+#
+# * An integer, corresponding to a number of instances.
+# (integer value)
+# This option is deprecated for removal since 16.0.0.
+# Its value may be silently ignored in the future.
+# Reason: Cells v1 is being replaced with Cells v2.
+#instance_update_sync_database_limit = 100
+
+# DEPRECATED:
+# Mute weight multiplier.
+#
+# Multiplier used to weigh mute children. Mute children cells are
+# recommended to be skipped so their weight is multiplied by this
+# negative value.
+#
+# Possible values:
+#
+# * Negative numeric number
+# (floating point value)
+# This option is deprecated for removal since 16.0.0.
+# Its value may be silently ignored in the future.
+# Reason: Cells v1 is being replaced with Cells v2.
+#mute_weight_multiplier = -10000.0
+
+# DEPRECATED:
+# Ram weight multiplier.
+#
+# Multiplier used for weighing ram. Negative numbers indicate that
+# Compute should stack VMs on one host instead of spreading out new
+# VMs to more hosts in the cell.
+#
+# Possible values:
+#
+# * Numeric multiplier
+# (floating point value)
+# This option is deprecated for removal since 16.0.0.
+# Its value may be silently ignored in the future.
+# Reason: Cells v1 is being replaced with Cells v2.
+#ram_weight_multiplier = 10.0
+
+# DEPRECATED:
+# Offset weight multiplier
+#
+# Multiplier used to weigh offset weigher. Cells with higher
+# weight_offsets in the DB will be preferred. The weight_offset
+# is a property of a cell stored in the database. It can be used
+# by a deployer to have scheduling decisions favor or disfavor
+# cells based on the setting.
+#
+# Possible values:
+#
+# * Numeric multiplier
+# (floating point value)
+# This option is deprecated for removal since 16.0.0.
+# Its value may be silently ignored in the future.
+# Reason: Cells v1 is being replaced with Cells v2.
+#offset_weight_multiplier = 1.0
+
+# DEPRECATED:
+# Instance updated at threshold
+#
+# Number of seconds after an instance was updated or deleted to
+# continue to update cells. This option lets cells manager to only
+# attempt to sync instances that have been updated recently.
+# i.e., a threshold of 3600 means to only update instances that
+# have modified in the last hour.
+#
+# Possible values:
+#
+# * Threshold in seconds
+#
+# Related options:
+#
+# * This value is used with the ``instance_update_num_instances``
+# value in a periodic task run.
+# (integer value)
+# This option is deprecated for removal since 16.0.0.
+# Its value may be silently ignored in the future.
+# Reason: Cells v1 is being replaced with Cells v2.
+#instance_updated_at_threshold = 3600
+
+# DEPRECATED:
+# Instance update num instances
+#
+# On every run of the periodic task, nova cells manager will attempt
+# to
+# sync instance_updated_at_threshold number of instances. When the
+# manager gets the list of instances, it shuffles them so that
+# multiple
+# nova-cells services do not attempt to sync the same instances in
+# lockstep.
+#
+# Possible values:
+#
+# * Positive integer number
+#
+# Related options:
+#
+# * This value is used with the ``instance_updated_at_threshold``
+# value in a periodic task run.
+# (integer value)
+# This option is deprecated for removal since 16.0.0.
+# Its value may be silently ignored in the future.
+# Reason: Cells v1 is being replaced with Cells v2.
+#instance_update_num_instances = 1
+
+# DEPRECATED:
+# Maximum hop count
+#
+# When processing a targeted message, if the local cell is not the
+# target, a route is defined between neighbouring cells. And the
+# message is processed across the whole routing path. This option
+# defines the maximum hop counts until reaching the target.
+#
+# Possible values:
+#
+# * Positive integer value
+# (integer value)
+# This option is deprecated for removal since 16.0.0.
+# Its value may be silently ignored in the future.
+# Reason: Cells v1 is being replaced with Cells v2.
+#max_hop_count = 10
+
+# DEPRECATED:
+# Cells scheduler.
+#
+# The class of the driver used by the cells scheduler. This should be
+# the full Python path to the class to be used. If nothing is
+# specified
+# in this option, the CellsScheduler is used.
+# (string value)
+# This option is deprecated for removal since 16.0.0.
+# Its value may be silently ignored in the future.
+# Reason: Cells v1 is being replaced with Cells v2.
+#scheduler = nova.cells.scheduler.CellsScheduler
+
+# DEPRECATED:
+# RPC driver queue base.
+#
+# When sending a message to another cell by JSON-ifying the message
+# and making an RPC cast to 'process_message', a base queue is used.
+# This option defines the base queue name to be used when
+# communicating
+# between cells. Various topics by message type will be appended to
+# this.
+#
+# Possible values:
+#
+# * The base queue name to be used when communicating between cells.
+# (string value)
+# This option is deprecated for removal since 16.0.0.
+# Its value may be silently ignored in the future.
+# Reason: Cells v1 is being replaced with Cells v2.
+#rpc_driver_queue_base = cells.intercell
+
+# DEPRECATED:
+# Scheduler filter classes.
+#
+# Filter classes the cells scheduler should use. An entry of
+# "nova.cells.filters.all_filters" maps to all cells filters
+# included with nova. As of the Mitaka release the following
+# filter classes are available:
+#
+# Different cell filter: A scheduler hint of 'different_cell'
+# with a value of a full cell name may be specified to route
+# a build away from a particular cell.
+#
+# Image properties filter: Image metadata named
+# 'hypervisor_version_requires' with a version specification
+# may be specified to ensure the build goes to a cell which
+# has hypervisors of the required version. If either the version
+# requirement on the image or the hypervisor capability of the
+# cell is not present, this filter returns without filtering out
+# the cells.
+#
+# Target cell filter: A scheduler hint of 'target_cell' with a
+# value of a full cell name may be specified to route a build to
+# a particular cell. No error handling is done as there's no way
+# to know whether the full path is a valid.
+#
+# As an admin user, you can also add a filter that directs builds
+# to a particular cell.
+#
+# (list value)
+# This option is deprecated for removal since 16.0.0.
+# Its value may be silently ignored in the future.
+# Reason: Cells v1 is being replaced with Cells v2.
+#scheduler_filter_classes = nova.cells.filters.all_filters
+
+# DEPRECATED:
+# Scheduler weight classes.
+#
+# Weigher classes the cells scheduler should use. An entry of
+# "nova.cells.weights.all_weighers" maps to all cell weighers
+# included with nova. As of the Mitaka release the following
+# weight classes are available:
+#
+# mute_child: Downgrades the likelihood of child cells being
+# chosen for scheduling requests, which haven't sent capacity
+# or capability updates in a while. Options include
+# mute_weight_multiplier (multiplier for mute children; value
+# should be negative).
+#
+# ram_by_instance_type: Select cells with the most RAM capacity
+# for the instance type being requested. Because higher weights
+# win, Compute returns the number of available units for the
+# instance type requested. The ram_weight_multiplier option defaults
+# to 10.0 that adds to the weight by a factor of 10. Use a negative
+# number to stack VMs on one host instead of spreading out new VMs
+# to more hosts in the cell.
+#
+# weight_offset: Allows modifying the database to weight a particular
+# cell. The highest weight will be the first cell to be scheduled for
+# launching an instance. When the weight_offset of a cell is set to 0,
+# it is unlikely to be picked but it could be picked if other cells
+# have a lower weight, like if they're full. And when the
+# weight_offset
+# is set to a very high value (for example, '999999999999999'), it is
+# likely to be picked if another cell do not have a higher weight.
+# (list value)
+# This option is deprecated for removal since 16.0.0.
+# Its value may be silently ignored in the future.
+# Reason: Cells v1 is being replaced with Cells v2.
+#scheduler_weight_classes = nova.cells.weights.all_weighers
+
+# DEPRECATED:
+# Scheduler retries.
+#
+# How many retries when no cells are available. Specifies how many
+# times the scheduler tries to launch a new instance when no cells
+# are available.
+#
+# Possible values:
+#
+# * Positive integer value
+#
+# Related options:
+#
+# * This value is used with the ``scheduler_retry_delay`` value
+# while retrying to find a suitable cell.
+# (integer value)
+# This option is deprecated for removal since 16.0.0.
+# Its value may be silently ignored in the future.
+# Reason: Cells v1 is being replaced with Cells v2.
+#scheduler_retries = 10
+
+# DEPRECATED:
+# Scheduler retry delay.
+#
+# Specifies the delay (in seconds) between scheduling retries when no
+# cell can be found to place the new instance on. When the instance
+# could not be scheduled to a cell after ``scheduler_retries`` in
+# combination with ``scheduler_retry_delay``, then the scheduling
+# of the instance failed.
+#
+# Possible values:
+#
+# * Time in seconds.
+#
+# Related options:
+#
+# * This value is used with the ``scheduler_retries`` value
+# while retrying to find a suitable cell.
+# (integer value)
+# This option is deprecated for removal since 16.0.0.
+# Its value may be silently ignored in the future.
+# Reason: Cells v1 is being replaced with Cells v2.
+#scheduler_retry_delay = 2
+
+# DEPRECATED:
+# DB check interval.
+#
+# Cell state manager updates cell status for all cells from the DB
+# only after this particular interval time is passed. Otherwise cached
+# status are used. If this value is 0 or negative all cell status are
+# updated from the DB whenever a state is needed.
+#
+# Possible values:
+#
+# * Interval time, in seconds.
+#
+# (integer value)
+# This option is deprecated for removal since 16.0.0.
+# Its value may be silently ignored in the future.
+# Reason: Cells v1 is being replaced with Cells v2.
+#db_check_interval = 60
+
+# DEPRECATED:
+# Optional cells configuration.
+#
+# Configuration file from which to read cells configuration. If given,
+# overrides reading cells from the database.
+#
+# Cells store all inter-cell communication data, including user names
+# and passwords, in the database. Because the cells data is not
+# updated
+# very frequently, use this option to specify a JSON file to store
+# cells data. With this configuration, the database is no longer
+# consulted when reloading the cells data. The file must have columns
+# present in the Cell model (excluding common database fields and the
+# id column). You must specify the queue connection information
+# through
+# a transport_url field, instead of username, password, and so on.
+#
+# The transport_url has the following form:
+# rabbit://USERNAME:PASSWORD@HOSTNAME:PORT/VIRTUAL_HOST
+#
+# Possible values:
+#
+# The scheme can be either qpid or rabbit, the following sample shows
+# this optional configuration:
+#
+# {
+# "parent": {
+# "name": "parent",
+# "api_url": "http://api.example.com:8774",
+# "transport_url": "rabbit://rabbit.example.com",
+# "weight_offset": 0.0,
+# "weight_scale": 1.0,
+# "is_parent": true
+# },
+# "cell1": {
+# "name": "cell1",
+# "api_url": "http://api.example.com:8774",
+# "transport_url": "rabbit://rabbit1.example.com",
+# "weight_offset": 0.0,
+# "weight_scale": 1.0,
+# "is_parent": false
+# },
+# "cell2": {
+# "name": "cell2",
+# "api_url": "http://api.example.com:8774",
+# "transport_url": "rabbit://rabbit2.example.com",
+# "weight_offset": 0.0,
+# "weight_scale": 1.0,
+# "is_parent": false
+# }
+# }
+#
+# (string value)
+# This option is deprecated for removal since 16.0.0.
+# Its value may be silently ignored in the future.
+# Reason: Cells v1 is being replaced with Cells v2.
+#cells_config = <None>
+
+
+[cinder]
+
+#
+# From nova.conf
+#
+catalog_info=volumev2:cinderv2:internalURL
+{%- if controller.glance.get('protocol', 'http') == 'https' %}
+cafile={{ controller.identity.get('cacert_file', controller.cacert_file) }}
+{%- endif %}
+
+#
+# Info to match when looking for cinder in the service catalog.
+#
+# Possible values:
+#
+# * Format is separated values of the form:
+# <service_type>:<service_name>:<endpoint_type>
+#
+# Note: Nova does not support the Cinder v2 API since the Nova 17.0.0
+# Queens
+# release.
+#
+# Related options:
+#
+# * endpoint_template - Setting this option will override catalog_info
+# (string value)
+#catalog_info = volumev3:cinderv3:publicURL
+
+#
+# If this option is set then it will override service catalog lookup
+# with
+# this template for cinder endpoint
+#
+# Possible values:
+#
+# * URL for cinder endpoint API
+# e.g. http://localhost:8776/v3/%(project_id)s
+#
+# Note: Nova does not support the Cinder v2 API since the Nova 17.0.0
+# Queens
+# release.
+#
+# Related options:
+#
+# * catalog_info - If endpoint_template is not set, catalog_info will
+# be used.
+# (string value)
+#endpoint_template = <None>
+
+#
+# Region name of this node. This is used when picking the URL in the
+# service
+# catalog.
+#
+# Possible values:
+#
+# * Any string representing region name
+# (string value)
+#os_region_name = <None>
+os_region_name = {{ controller.identity.region }}
+
+#
+# Number of times cinderclient should retry on any failed http call.
+# 0 means connection is attempted only once. Setting it to any
+# positive integer
+# means that on failure connection is retried that many times e.g.
+# setting it
+# to 3 means total attempts to connect will be 4.
+#
+# Possible values:
+#
+# * Any integer value. 0 means connection is attempted only once
+# (integer value)
+# Minimum value: 0
+#http_retries = 3
+
+#
+# Allow attach between instance and volume in different availability
+# zones.
+#
+# If False, volumes attached to an instance must be in the same
+# availability
+# zone in Cinder as the instance availability zone in Nova.
+# This also means care should be taken when booting an instance from a
+# volume
+# where source is not "volume" because Nova will attempt to create a
+# volume using
+# the same availability zone as what is assigned to the instance.
+# If that AZ is not in Cinder (or
+# allow_availability_zone_fallback=False in
+# cinder.conf), the volume create request will fail and the instance
+# will fail
+# the build request.
+# By default there is no availability zone restriction on volume
+# attach.
+# (boolean value)
+#cross_az_attach = true
+{%- if controller.cross_az_attach is defined %}
+cross_az_attach={{ controller.cross_az_attach }}
+{%- endif %}
+
+# PEM encoded Certificate Authority to use when verifying HTTPs
+# connections. (string value)
+#cafile = <None>
+
+# PEM encoded client certificate cert file (string value)
+#certfile = <None>
+
+# PEM encoded client certificate key file (string value)
+#keyfile = <None>
+
+# Verify HTTPS connections. (boolean value)
+#insecure = false
+
+# Timeout value for http requests (integer value)
+#timeout = <None>
+
+# Authentication type to load (string value)
+# Deprecated group/name - [cinder]/auth_plugin
+#auth_type = <None>
+
+# Config Section from which to load plugin specific options (string
+# value)
+#auth_section = <None>
+
+# Authentication URL (string value)
+#auth_url = <None>
+
+# Scope for system operations (string value)
+#system_scope = <None>
+
+# Domain ID to scope to (string value)
+#domain_id = <None>
+
+# Domain name to scope to (string value)
+#domain_name = <None>
+
+# Project ID to scope to (string value)
+#project_id = <None>
+
+# Project name to scope to (string value)
+#project_name = <None>
+
+# Domain ID containing project (string value)
+#project_domain_id = <None>
+
+# Domain name containing project (string value)
+#project_domain_name = <None>
+
+# Trust ID (string value)
+#trust_id = <None>
+
+# Optional domain ID to use with v3 and v2 parameters. It will be used
+# for both the user and project domain in v3 and ignored in v2
+# authentication. (string value)
+#default_domain_id = <None>
+
+# Optional domain name to use with v3 API and v2 parameters. It will
+# be used for both the user and project domain in v3 and ignored in v2
+# authentication. (string value)
+#default_domain_name = <None>
+
+# User ID (string value)
+#user_id = <None>
+
+# Username (string value)
+# Deprecated group/name - [cinder]/user_name
+#username = <None>
+
+# User's domain id (string value)
+#user_domain_id = <None>
+
+# User's domain name (string value)
+#user_domain_name = <None>
+
+# User's password (string value)
+#password = <None>
+
+# Tenant ID (string value)
+#tenant_id = <None>
+
+# Tenant Name (string value)
+#tenant_name = <None>
+
+[conductor]
+#
+# Options under this group are used to define Conductor's
+# communication,
+# which manager should be act as a proxy between computes and
+# database,
+# and finally, how many worker processes will be used.
+
+#
+# From nova.conf
+#
+
+# DEPRECATED:
+# Topic exchange name on which conductor nodes listen.
+# (string value)
+# This option is deprecated for removal since 15.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# There is no need to let users choose the RPC topic for all services
+# - there
+# is little gain from this. Furthermore, it makes it really easy to
+# break Nova
+# by using this option.
+#topic = conductor
+
+#
+# Number of workers for OpenStack Conductor service. The default will
+# be the
+# number of CPUs available.
+# (integer value)
+workers = {{ controller.workers }}
+
+
+[console]
+#
+# Options under this group allow to tune the configuration of the
+# console proxy
+# service.
+#
+# Note: in configuration of every compute is a ``console_host``
+# option,
+# which allows to select the console proxy service to connect to.
+
+#
+# From nova.conf
+#
+
+#
+# Adds list of allowed origins to the console websocket proxy to allow
+# connections from other origin hostnames.
+# Websocket proxy matches the host header with the origin header to
+# prevent cross-site requests. This list specifies if any there are
+# values other than host are allowed in the origin header.
+#
+# Possible values:
+#
+# * A list where each element is an allowed origin hostnames, else an
+# empty list
+# (list value)
+# Deprecated group/name - [DEFAULT]/console_allowed_origins
+#allowed_origins =
+
+
+[consoleauth]
+
+#
+# From nova.conf
+#
+
+#
+# The lifetime of a console auth token (in seconds).
+#
+# A console auth token is used in authorizing console access for a
+# user.
+# Once the auth token time to live count has elapsed, the token is
+# considered expired. Expired tokens are then deleted.
+# (integer value)
+# Minimum value: 0
+# Deprecated group/name - [DEFAULT]/console_token_ttl
+#token_ttl = 600
+
+[cors]
+{%- if controller.cors is defined %}
+{%- set _data = controller.cors %}
+{%- include "oslo_templates/files/queens/oslo/_cors.conf" %}
+{%- endif %}
+
+[crypto]
+
+#
+# From nova.conf
+#
+
+#
+# Filename of root CA (Certificate Authority). This is a container
+# format
+# and includes root certificates.
+#
+# Possible values:
+#
+# * Any file name containing root CA, cacert.pem is default
+#
+# Related options:
+#
+# * ca_path
+# (string value)
+#ca_file = cacert.pem
+
+#
+# Filename of a private key.
+#
+# Related options:
+#
+# * keys_path
+# (string value)
+#key_file = private/cakey.pem
+
+#
+# Filename of root Certificate Revocation List (CRL). This is a list
+# of
+# certificates that have been revoked, and therefore, entities
+# presenting
+# those (revoked) certificates should no longer be trusted.
+#
+# Related options:
+#
+# * ca_path
+# (string value)
+#crl_file = crl.pem
+
+#
+# Directory path where keys are located.
+#
+# Related options:
+#
+# * key_file
+# (string value)
+#keys_path = $state_path/keys
+
+#
+# Directory path where root CA is located.
+#
+# Related options:
+#
+# * ca_file
+# (string value)
+#ca_path = $state_path/CA
+
+# Option to enable/disable use of CA for each project. (boolean value)
+#use_project_ca = false
+
+#
+# Subject for certificate for users, %s for
+# project, user, timestamp
+# (string value)
+#user_cert_subject = /C=US/ST=California/O=OpenStack/OU=NovaDev/CN=%.16s-%.16s-%s
+
+#
+# Subject for certificate for projects, %s for
+# project, timestamp
+# (string value)
+#project_cert_subject = /C=US/ST=California/O=OpenStack/OU=NovaDev/CN=project-ca-%.16s-%s
+
+
+[devices]
+
+#
+# From nova.conf
+#
+
+#
+# A list of the vGPU types enabled in the compute node.
+#
+# Some pGPUs (e.g. NVIDIA GRID K1) support different vGPU types. User
+# can use
+# this option to specify a list of enabled vGPU types that may be
+# assigned to a
+# guest instance. But please note that Nova only supports a single
+# type in the
+# Queens release. If more than one vGPU type is specified (as a comma-
+# separated
+# list), only the first one will be used. An example is as the
+# following:
+# [devices]
+# enabled_vgpu_types = GRID K100,Intel GVT-g,MxGPU.2,nvidia-11
+# (list value)
+#enabled_vgpu_types =
+
+
+[ephemeral_storage_encryption]
+
+#
+# From nova.conf
+#
+
+#
+# Enables/disables LVM ephemeral storage encryption.
+# (boolean value)
+#enabled = false
+
+#
+# Cipher-mode string to be used.
+#
+# The cipher and mode to be used to encrypt ephemeral storage. The set
+# of
+# cipher-mode combinations available depends on kernel support.
+# According
+# to the dm-crypt documentation, the cipher is expected to be in the
+# format:
+# "<cipher>-<chainmode>-<ivmode>".
+#
+# Possible values:
+#
+# * Any crypto option listed in ``/proc/crypto``.
+# (string value)
+#cipher = aes-xts-plain64
+
+#
+# Encryption key length in bits.
+#
+# The bit length of the encryption key to be used to encrypt ephemeral
+# storage.
+# In XTS mode only half of the bits are used for encryption key.
+# (integer value)
+# Minimum value: 1
+#key_size = 512
+
+
+[filter_scheduler]
+
+#
+# From nova.conf
+#
+
+#
+# Size of subset of best hosts selected by scheduler.
+#
+# New instances will be scheduled on a host chosen randomly from a
+# subset of the
+# N best hosts, where N is the value set by this option.
+#
+# Setting this to a value greater than 1 will reduce the chance that
+# multiple
+# scheduler processes handling similar requests will select the same
+# host,
+# creating a potential race condition. By selecting a host randomly
+# from the N
+# hosts that best fit the request, the chance of a conflict is
+# reduced. However,
+# the higher you set this value, the less optimal the chosen host may
+# be for a
+# given request.
+#
+# This option is only used by the FilterScheduler and its subclasses;
+# if you use
+# a different scheduler, this option has no effect.
+#
+# Possible values:
+#
+# * An integer, where the integer corresponds to the size of a host
+# subset. Any
+# integer is valid, although any value less than 1 will be treated
+# as 1
+# (integer value)
+# Minimum value: 1
+# Deprecated group/name - [DEFAULT]/scheduler_host_subset_size
+host_subset_size = 30
+
+#
+# The number of instances that can be actively performing IO on a
+# host.
+#
+# Instances performing IO includes those in the following states:
+# build, resize,
+# snapshot, migrate, rescue, unshelve.
+#
+# This option is only used by the FilterScheduler and its subclasses;
+# if you use
+# a different scheduler, this option has no effect. Also note that
+# this setting
+# only affects scheduling if the 'io_ops_filter' filter is enabled.
+#
+# Possible values:
+#
+# * An integer, where the integer corresponds to the max number of
+# instances
+# that can be actively performing IO on any given host.
+# (integer value)
+max_io_ops_per_host = 8
+
+#
+# Maximum number of instances that be active on a host.
+#
+# If you need to limit the number of instances on any given host, set
+# this option
+# to the maximum number of instances you want to allow. The
+# num_instances_filter
+# will reject any host that has at least as many instances as this
+# option's
+# value.
+#
+# This option is only used by the FilterScheduler and its subclasses;
+# if you use
+# a different scheduler, this option has no effect. Also note that
+# this setting
+# only affects scheduling if the 'num_instances_filter' filter is
+# enabled.
+#
+# Possible values:
+#
+# * An integer, where the integer corresponds to the max instances
+# that can be
+# scheduled on a host.
+# (integer value)
+# Minimum value: 1
+max_instances_per_host = 50
+
+#
+# Enable querying of individual hosts for instance information.
+#
+# The scheduler may need information about the instances on a host in
+# order to
+# evaluate its filters and weighers. The most common need for this
+# information is
+# for the (anti-)affinity filters, which need to choose a host based
+# on the
+# instances already running on a host.
+#
+# If the configured filters and weighers do not need this information,
+# disabling
+# this option will improve performance. It may also be disabled when
+# the tracking
+# overhead proves too heavy, although this will cause classes
+# requiring host
+# usage data to query the database on each request instead.
+#
+# This option is only used by the FilterScheduler and its subclasses;
+# if you use
+# a different scheduler, this option has no effect.
+#
+# NOTE: In a multi-cell (v2) setup where the cell MQ is separated from
+# the
+# top-level, computes cannot directly communicate with the scheduler.
+# Thus,
+# this option cannot be enabled in that scenario. See also the
+# [workarounds]/disable_group_policy_check_upcall option.
+# (boolean value)
+# Deprecated group/name - [DEFAULT]/scheduler_tracks_instance_changes
+#track_instance_changes = true
+
+#
+# Filters that the scheduler can use.
+#
+# An unordered list of the filter classes the nova scheduler may
+# apply. Only the
+# filters specified in the 'enabled_filters' option will be used, but
+# any filter appearing in that option must also be included in this
+# list.
+#
+# By default, this is set to all filters that are included with nova.
+#
+# This option is only used by the FilterScheduler and its subclasses;
+# if you use
+# a different scheduler, this option has no effect.
+#
+# Possible values:
+#
+# * A list of zero or more strings, where each string corresponds to
+# the name of
+# a filter that may be used for selecting a host
+#
+# Related options:
+#
+# * enabled_filters
+# (multi valued)
+# Deprecated group/name - [DEFAULT]/scheduler_available_filters
+#available_filters = nova.scheduler.filters.all_filters
+available_filters=nova.scheduler.filters.all_filters
+available_filters=nova.scheduler.filters.pci_passthrough_filter.PciPassthroughFilter
+{% for filter in controller.get('scheduler_custom_filters', []) %}
+scheduler_available_filters = {{ filter }}
+{% endfor %}
+
+#
+# Filters that the scheduler will use.
+#
+# An ordered list of filter class names that will be used for
+# filtering
+# hosts. These filters will be applied in the order they are listed so
+# place your most restrictive filters first to make the filtering
+# process more
+# efficient.
+#
+# This option is only used by the FilterScheduler and its subclasses;
+# if you use
+# a different scheduler, this option has no effect.
+#
+# Possible values:
+#
+# * A list of zero or more strings, where each string corresponds to
+# the name of
+# a filter to be used for selecting a host
+#
+# Related options:
+#
+# * All of the filters in this option *must* be present in the
+# 'scheduler_available_filters' option, or a
+# SchedulerHostFilterNotFound
+# exception will be raised.
+# (list value)
+# Deprecated group/name - [DEFAULT]/scheduler_default_filters
+#enabled_filters = RetryFilter,AvailabilityZoneFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,ServerGroupAntiAffinityFilter,ServerGroupAffinityFilter
+enabled_filters={{ controller.scheduler_default_filters }}
+
+# DEPRECATED:
+# Filters used for filtering baremetal hosts.
+#
+# Filters are applied in order, so place your most restrictive filters
+# first to
+# make the filtering process more efficient.
+#
+# This option is only used by the FilterScheduler and its subclasses;
+# if you use
+# a different scheduler, this option has no effect.
+#
+# Possible values:
+#
+# * A list of zero or more strings, where each string corresponds to
+# the name of
+# a filter to be used for selecting a baremetal host
+#
+# Related options:
+#
+# * If the 'scheduler_use_baremetal_filters' option is False, this
+# option has
+# no effect.
+# (list value)
+# Deprecated group/name - [DEFAULT]/baremetal_scheduler_default_filters
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+# Reason:
+# These filters were used to overcome some of the baremetal scheduling
+# limitations in Nova prior to the use of the Placement API. Now
+# scheduling will
+# use the custom resource class defined for each baremetal node to
+# make its
+# selection.
+#baremetal_enabled_filters = RetryFilter,AvailabilityZoneFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,ExactRamFilter,ExactDiskFilter,ExactCoreFilter
+
+# DEPRECATED:
+# Enable baremetal filters.
+#
+# Set this to True to tell the nova scheduler that it should use the
+# filters
+# specified in the 'baremetal_enabled_filters' option. If you are not
+# scheduling baremetal nodes, leave this at the default setting of
+# False.
+#
+# This option is only used by the FilterScheduler and its subclasses;
+# if you use
+# a different scheduler, this option has no effect.
+#
+# Related options:
+#
+# * If this option is set to True, then the filters specified in the
+# 'baremetal_enabled_filters' are used instead of the filters
+# specified in 'enabled_filters'.
+# (boolean value)
+# Deprecated group/name - [DEFAULT]/scheduler_use_baremetal_filters
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+# Reason:
+# These filters were used to overcome some of the baremetal scheduling
+# limitations in Nova prior to the use of the Placement API. Now
+# scheduling will
+# use the custom resource class defined for each baremetal node to
+# make its
+# selection.
+#use_baremetal_filters = false
+
+#
+# Weighers that the scheduler will use.
+#
+# Only hosts which pass the filters are weighed. The weight for any
+# host starts
+# at 0, and the weighers order these hosts by adding to or subtracting
+# from the
+# weight assigned by the previous weigher. Weights may become
+# negative. An
+# instance will be scheduled to one of the N most-weighted hosts,
+# where N is
+# 'scheduler_host_subset_size'.
+#
+# By default, this is set to all weighers that are included with Nova.
+#
+# This option is only used by the FilterScheduler and its subclasses;
+# if you use
+# a different scheduler, this option has no effect.
+#
+# Possible values:
+#
+# * A list of zero or more strings, where each string corresponds to
+# the name of
+# a weigher that will be used for selecting a host
+# (list value)
+# Deprecated group/name - [DEFAULT]/scheduler_weight_classes
+#weight_classes = nova.scheduler.weights.all_weighers
+
+#
+# Ram weight multipler ratio.
+#
+# This option determines how hosts with more or less available RAM are
+# weighed. A
+# positive value will result in the scheduler preferring hosts with
+# more
+# available RAM, and a negative number will result in the scheduler
+# preferring
+# hosts with less available RAM. Another way to look at it is that
+# positive
+# values for this option will tend to spread instances across many
+# hosts, while
+# negative values will tend to fill up (stack) hosts as much as
+# possible before
+# scheduling to a less-used host. The absolute value, whether positive
+# or
+# negative, controls how strong the RAM weigher is relative to other
+# weighers.
+#
+# This option is only used by the FilterScheduler and its subclasses;
+# if you use
+# a different scheduler, this option has no effect. Also note that
+# this setting
+# only affects scheduling if the 'ram' weigher is enabled.
+#
+# Possible values:
+#
+# * An integer or float value, where the value corresponds to the
+# multipler
+# ratio for this weigher.
+# (floating point value)
+#ram_weight_multiplier = 1.0
+
+#
+# Disk weight multipler ratio.
+#
+# Multiplier used for weighing free disk space. Negative numbers mean
+# to
+# stack vs spread.
+#
+# This option is only used by the FilterScheduler and its subclasses;
+# if you use
+# a different scheduler, this option has no effect. Also note that
+# this setting
+# only affects scheduling if the 'disk' weigher is enabled.
+#
+# Possible values:
+#
+# * An integer or float value, where the value corresponds to the
+# multipler
+# ratio for this weigher.
+# (floating point value)
+#disk_weight_multiplier = 1.0
+
+#
+# IO operations weight multipler ratio.
+#
+# This option determines how hosts with differing workloads are
+# weighed. Negative
+# values, such as the default, will result in the scheduler preferring
+# hosts with
+# lighter workloads whereas positive values will prefer hosts with
+# heavier
+# workloads. Another way to look at it is that positive values for
+# this option
+# will tend to schedule instances onto hosts that are already busy,
+# while
+# negative values will tend to distribute the workload across more
+# hosts. The
+# absolute value, whether positive or negative, controls how strong
+# the io_ops
+# weigher is relative to other weighers.
+#
+# This option is only used by the FilterScheduler and its subclasses;
+# if you use
+# a different scheduler, this option has no effect. Also note that
+# this setting
+# only affects scheduling if the 'io_ops' weigher is enabled.
+#
+# Possible values:
+#
+# * An integer or float value, where the value corresponds to the
+# multipler
+# ratio for this weigher.
+# (floating point value)
+#io_ops_weight_multiplier = -1.0
+
+#
+# PCI device affinity weight multiplier.
+#
+# The PCI device affinity weighter computes a weighting based on the
+# number of
+# PCI devices on the host and the number of PCI devices requested by
+# the
+# instance. The ``NUMATopologyFilter`` filter must be enabled for this
+# to have
+# any significance. For more information, refer to the filter
+# documentation:
+#
+# https://docs.openstack.org/nova/latest/user/filter-
+# scheduler.html
+#
+# Possible values:
+#
+# * A positive integer or float value, where the value corresponds to
+# the
+# multiplier ratio for this weigher.
+# (floating point value)
+# Minimum value: 0
+#pci_weight_multiplier = 1.0
+
+#
+# Multiplier used for weighing hosts for group soft-affinity.
+#
+# Possible values:
+#
+# * An integer or float value, where the value corresponds to weight
+# multiplier
+# for hosts with group soft affinity. Only a positive value are
+# meaningful, as
+# negative values would make this behave as a soft anti-affinity
+# weigher.
+# (floating point value)
+#soft_affinity_weight_multiplier = 1.0
+
+#
+# Multiplier used for weighing hosts for group soft-anti-affinity.
+#
+# Possible values:
+#
+# * An integer or float value, where the value corresponds to weight
+# multiplier
+# for hosts with group soft anti-affinity. Only a positive value are
+# meaningful, as negative values would make this behave as a soft
+# affinity
+# weigher.
+# (floating point value)
+#soft_anti_affinity_weight_multiplier = 1.0
+
+#
+# Enable spreading the instances between hosts with the same best
+# weight.
+#
+# Enabling it is beneficial for cases when host_subset_size is 1
+# (default), but there is a large number of hosts with same maximal
+# weight.
+# This scenario is common in Ironic deployments where there are
+# typically many
+# baremetal nodes with identical weights returned to the scheduler.
+# In such case enabling this option will reduce contention and chances
+# for
+# rescheduling events.
+# At the same time it will make the instance packing (even in
+# unweighed case)
+# less dense.
+# (boolean value)
+#shuffle_best_same_weighed_hosts = false
+
+#
+# The default architecture to be used when using the image properties
+# filter.
+#
+# When using the ImagePropertiesFilter, it is possible that you want
+# to define
+# a default architecture to make the user experience easier and avoid
+# having
+# something like x86_64 images landing on aarch64 compute nodes
+# because the
+# user did not specify the 'hw_architecture' property in Glance.
+#
+# Possible values:
+#
+# * CPU Architectures such as x86_64, aarch64, s390x.
+# (string value)
+# Possible values:
+# alpha - <No description provided>
+# armv6 - <No description provided>
+# armv7l - <No description provided>
+# armv7b - <No description provided>
+# aarch64 - <No description provided>
+# cris - <No description provided>
+# i686 - <No description provided>
+# ia64 - <No description provided>
+# lm32 - <No description provided>
+# m68k - <No description provided>
+# microblaze - <No description provided>
+# microblazeel - <No description provided>
+# mips - <No description provided>
+# mipsel - <No description provided>
+# mips64 - <No description provided>
+# mips64el - <No description provided>
+# openrisc - <No description provided>
+# parisc - <No description provided>
+# parisc64 - <No description provided>
+# ppc - <No description provided>
+# ppcle - <No description provided>
+# ppc64 - <No description provided>
+# ppc64le - <No description provided>
+# ppcemb - <No description provided>
+# s390 - <No description provided>
+# s390x - <No description provided>
+# sh4 - <No description provided>
+# sh4eb - <No description provided>
+# sparc - <No description provided>
+# sparc64 - <No description provided>
+# unicore32 - <No description provided>
+# x86_64 - <No description provided>
+# xtensa - <No description provided>
+# xtensaeb - <No description provided>
+#image_properties_default_architecture = <None>
+
+#
+# List of UUIDs for images that can only be run on certain hosts.
+#
+# If there is a need to restrict some images to only run on certain
+# designated
+# hosts, list those image UUIDs here.
+#
+# This option is only used by the FilterScheduler and its subclasses;
+# if you use
+# a different scheduler, this option has no effect. Also note that
+# this setting
+# only affects scheduling if the 'IsolatedHostsFilter' filter is
+# enabled.
+#
+# Possible values:
+#
+# * A list of UUID strings, where each string corresponds to the UUID
+# of an
+# image
+#
+# Related options:
+#
+# * scheduler/isolated_hosts
+# * scheduler/restrict_isolated_hosts_to_isolated_images
+# (list value)
+#isolated_images =
+
+#
+# List of hosts that can only run certain images.
+#
+# If there is a need to restrict some images to only run on certain
+# designated
+# hosts, list those host names here.
+#
+# This option is only used by the FilterScheduler and its subclasses;
+# if you use
+# a different scheduler, this option has no effect. Also note that
+# this setting
+# only affects scheduling if the 'IsolatedHostsFilter' filter is
+# enabled.
+#
+# Possible values:
+#
+# * A list of strings, where each string corresponds to the name of a
+# host
+#
+# Related options:
+#
+# * scheduler/isolated_images
+# * scheduler/restrict_isolated_hosts_to_isolated_images
+# (list value)
+#isolated_hosts =
+
+#
+# Prevent non-isolated images from being built on isolated hosts.
+#
+# This option is only used by the FilterScheduler and its subclasses;
+# if you use
+# a different scheduler, this option has no effect. Also note that
+# this setting
+# only affects scheduling if the 'IsolatedHostsFilter' filter is
+# enabled. Even
+# then, this option doesn't affect the behavior of requests for
+# isolated images,
+# which will *always* be restricted to isolated hosts.
+#
+# Related options:
+#
+# * scheduler/isolated_images
+# * scheduler/isolated_hosts
+# (boolean value)
+#restrict_isolated_hosts_to_isolated_images = true
+
+#
+# Image property namespace for use in the host aggregate.
+#
+# Images and hosts can be configured so that certain images can only
+# be scheduled
+# to hosts in a particular aggregate. This is done with metadata
+# values set on
+# the host aggregate that are identified by beginning with the value
+# of this
+# option. If the host is part of an aggregate with such a metadata
+# key, the image
+# in the request spec must have the value of that metadata in its
+# properties in
+# order for the scheduler to consider the host as acceptable.
+#
+# This option is only used by the FilterScheduler and its subclasses;
+# if you use
+# a different scheduler, this option has no effect. Also note that
+# this setting
+# only affects scheduling if the
+# 'aggregate_image_properties_isolation' filter is
+# enabled.
+#
+# Possible values:
+#
+# * A string, where the string corresponds to an image property
+# namespace
+#
+# Related options:
+#
+# * aggregate_image_properties_isolation_separator
+# (string value)
+#aggregate_image_properties_isolation_namespace = <None>
+
+#
+# Separator character(s) for image property namespace and name.
+#
+# When using the aggregate_image_properties_isolation filter, the
+# relevant
+# metadata keys are prefixed with the namespace defined in the
+# aggregate_image_properties_isolation_namespace configuration option
+# plus a
+# separator. This option defines the separator to be used.
+#
+# This option is only used by the FilterScheduler and its subclasses;
+# if you use
+# a different scheduler, this option has no effect. Also note that
+# this setting
+# only affects scheduling if the
+# 'aggregate_image_properties_isolation' filter
+# is enabled.
+#
+# Possible values:
+#
+# * A string, where the string corresponds to an image property
+# namespace
+# separator character
+#
+# Related options:
+#
+# * aggregate_image_properties_isolation_namespace
+# (string value)
+#aggregate_image_properties_isolation_separator = .
+
+
+[glance]
+# Configuration options for the Image service
+
+#
+# From nova.conf
+#
+
+#
+# List of glance api servers endpoints available to nova.
+#
+# https is used for ssl-based glance api servers.
+#
+# NOTE: The preferred mechanism for endpoint discovery is via
+# keystoneauth1
+# loading options. Only use api_servers if you need multiple endpoints
+# and are
+# unable to use a load balancer for some reason.
+#
+# Possible values:
+#
+# * A list of any fully qualified url of the form
+# "scheme://hostname:port[/path]"
+# (i.e. "http://10.0.1.0:9292" or "https://my.glance.server/image").
+# (list value)
+#api_servers = <None>
+api_servers = {{ controller.glance.get('protocol', 'http') }}://{{ controller.glance.host }}:{{ controller.glance.get('port', 9292) }}
+
+#
+# Enable glance operation retries.
+#
+# Specifies the number of retries when uploading / downloading
+# an image to / from glance. 0 means no retries.
+# (integer value)
+# Minimum value: 0
+#num_retries = 0
+
+# DEPRECATED:
+# List of url schemes that can be directly accessed.
+#
+# This option specifies a list of url schemes that can be downloaded
+# directly via the direct_url. This direct_URL can be fetched from
+# Image metadata which can be used by nova to get the
+# image more efficiently. nova-compute could benefit from this by
+# invoking a copy when it has access to the same file system as
+# glance.
+#
+# Possible values:
+#
+# * [file], Empty list (default)
+# (list value)
+# This option is deprecated for removal since 17.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# This was originally added for the 'nova.image.download.file'
+# FileTransfer
+# extension which was removed in the 16.0.0 Pike release. The
+# 'nova.image.download.modules' extension point is not maintained
+# and there is no indication of its use in production clouds.
+#allowed_direct_url_schemes =
+
+#
+# Enable image signature verification.
+#
+# nova uses the image signature metadata from glance and verifies the
+# signature
+# of a signed image while downloading that image. If the image
+# signature cannot
+# be verified or if the image signature metadata is either incomplete
+# or
+# unavailable, then nova will not boot the image and instead will
+# place the
+# instance into an error state. This provides end users with stronger
+# assurances
+# of the integrity of the image data they are using to create servers.
+#
+# Related options:
+#
+# * The options in the `key_manager` group, as the key_manager is used
+# for the signature validation.
+# * Both enable_certificate_validation and
+# default_trusted_certificate_ids
+# below depend on this option being enabled.
+# (boolean value)
+{%- if controller.get('barbican', {}).get('enabled', False) %}
+verify_glance_signatures=true
+{%- else %}
+#verify_glance_signatures=false
+{%- endif %}
+
+# DEPRECATED:
+# Enable certificate validation for image signature verification.
+#
+# During image signature verification nova will first verify the
+# validity of the
+# image's signing certificate using the set of trusted certificates
+# associated
+# with the instance. If certificate validation fails, signature
+# verification
+# will not be performed and the image will be placed into an error
+# state. This
+# provides end users with stronger assurances that the image data is
+# unmodified
+# and trustworthy. If left disabled, image signature verification can
+# still
+# occur but the end user will not have any assurance that the signing
+# certificate used to generate the image signature is still
+# trustworthy.
+#
+# Related options:
+#
+# * This option only takes effect if verify_glance_signatures is
+# enabled.
+# * The value of default_trusted_certificate_ids may be used when this
+# option
+# is enabled.
+# (boolean value)
+# This option is deprecated for removal since 16.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# This option is intended to ease the transition for deployments
+# leveraging
+# image signature verification. The intended state long-term is for
+# signature
+# verification and certificate validation to always happen together.
+#enable_certificate_validation = false
+
+#
+# List of certificate IDs for certificates that should be trusted.
+#
+# May be used as a default list of trusted certificate IDs for
+# certificate
+# validation. The value of this option will be ignored if the user
+# provides a
+# list of trusted certificate IDs with an instance API request. The
+# value of
+# this option will be persisted with the instance data if signature
+# verification
+# and certificate validation are enabled and if the user did not
+# provide an
+# alternative list. If left empty when certificate validation is
+# enabled the
+# user must provide a list of trusted certificate IDs otherwise
+# certificate
+# validation will fail.
+#
+# Related options:
+#
+# * The value of this option may be used if both
+# verify_glance_signatures and
+# enable_certificate_validation are enabled.
+# (list value)
+#default_trusted_certificate_ids =
+
+# Enable or disable debug logging with glanceclient. (boolean value)
+#debug = false
+
+# PEM encoded Certificate Authority to use when verifying HTTPs
+# connections. (string value)
+#cafile = <None>
+
+# PEM encoded client certificate cert file (string value)
+#certfile = <None>
+
+# PEM encoded client certificate key file (string value)
+#keyfile = <None>
+
+# Verify HTTPS connections. (boolean value)
+#insecure = false
+
+# Timeout value for http requests (integer value)
+#timeout = <None>
+
+# The default service_type for endpoint URL discovery. (string value)
+#service_type = image
+
+# The default service_name for endpoint URL discovery. (string value)
+#service_name = <None>
+
+# List of interfaces, in order of preference, for endpoint URL. (list
+# value)
+#valid_interfaces = internal,public
+
+# The default region_name for endpoint URL discovery. (string value)
+#region_name = <None>
+
+# Always use this endpoint URL for requests for this client. NOTE: The
+# unversioned endpoint should be specified here; to request a
+# particular API version, use the `version`, `min-version`, and/or
+# `max-version` options. (string value)
+#endpoint_override = <None>
+
+
+[guestfs]
+#
+# libguestfs is a set of tools for accessing and modifying virtual
+# machine (VM) disk images. You can use this for viewing and editing
+# files inside guests, scripting changes to VMs, monitoring disk
+# used/free statistics, creating guests, P2V, V2V, performing backups,
+# cloning VMs, building VMs, formatting disks and resizing disks.
+
+#
+# From nova.conf
+#
+
+#
+# Enable/disables guestfs logging.
+#
+# This configures guestfs to debug messages and push them to OpenStack
+# logging system. When set to True, it traces libguestfs API calls and
+# enable verbose debug messages. In order to use the above feature,
+# "libguestfs" package must be installed.
+#
+# Related options:
+# Since libguestfs access and modifies VM's managed by libvirt, below
+# options
+# should be set to give access to those VM's.
+# * libvirt.inject_key
+# * libvirt.inject_partition
+# * libvirt.inject_password
+# (boolean value)
+#debug = false
+
+
+[hyperv]
+#
+# The hyperv feature allows you to configure the Hyper-V hypervisor
+# driver to be used within an OpenStack deployment.
+
+#
+# From nova.conf
+#
+
+#
+# Dynamic memory ratio
+#
+# Enables dynamic memory allocation (ballooning) when set to a value
+# greater than 1. The value expresses the ratio between the total RAM
+# assigned to an instance and its startup RAM amount. For example a
+# ratio of 2.0 for an instance with 1024MB of RAM implies 512MB of
+# RAM allocated at startup.
+#
+# Possible values:
+#
+# * 1.0: Disables dynamic memory allocation (Default).
+# * Float values greater than 1.0: Enables allocation of total implied
+# RAM divided by this value for startup.
+# (floating point value)
+#dynamic_memory_ratio = 1.0
+
+#
+# Enable instance metrics collection
+#
+# Enables metrics collections for an instance by using Hyper-V's
+# metric APIs. Collected data can be retrieved by other apps and
+# services, e.g.: Ceilometer.
+# (boolean value)
+#enable_instance_metrics_collection = false
+
+#
+# Instances path share
+#
+# The name of a Windows share mapped to the "instances_path" dir
+# and used by the resize feature to copy files to the target host.
+# If left blank, an administrative share (hidden network share) will
+# be used, looking for the same "instances_path" used locally.
+#
+# Possible values:
+#
+# * "": An administrative share will be used (Default).
+# * Name of a Windows share.
+#
+# Related options:
+#
+# * "instances_path": The directory which will be used if this option
+# here is left blank.
+# (string value)
+#instances_path_share =
+
+#
+# Limit CPU features
+#
+# This flag is needed to support live migration to hosts with
+# different CPU features and checked during instance creation
+# in order to limit the CPU features used by the instance.
+# (boolean value)
+#limit_cpu_features = false
+
+#
+# Mounted disk query retry count
+#
+# The number of times to retry checking for a mounted disk.
+# The query runs until the device can be found or the retry
+# count is reached.
+#
+# Possible values:
+#
+# * Positive integer values. Values greater than 1 is recommended
+# (Default: 10).
+#
+# Related options:
+#
+# * Time interval between disk mount retries is declared with
+# "mounted_disk_query_retry_interval" option.
+# (integer value)
+# Minimum value: 0
+#mounted_disk_query_retry_count = 10
+
+#
+# Mounted disk query retry interval
+#
+# Interval between checks for a mounted disk, in seconds.
+#
+# Possible values:
+#
+# * Time in seconds (Default: 5).
+#
+# Related options:
+#
+# * This option is meaningful when the mounted_disk_query_retry_count
+# is greater than 1.
+# * The retry loop runs with mounted_disk_query_retry_count and
+# mounted_disk_query_retry_interval configuration options.
+# (integer value)
+# Minimum value: 0
+#mounted_disk_query_retry_interval = 5
+
+#
+# Power state check timeframe
+#
+# The timeframe to be checked for instance power state changes.
+# This option is used to fetch the state of the instance from Hyper-V
+# through the WMI interface, within the specified timeframe.
+#
+# Possible values:
+#
+# * Timeframe in seconds (Default: 60).
+# (integer value)
+# Minimum value: 0
+#power_state_check_timeframe = 60
+
+#
+# Power state event polling interval
+#
+# Instance power state change event polling frequency. Sets the
+# listener interval for power state events to the given value.
+# This option enhances the internal lifecycle notifications of
+# instances that reboot themselves. It is unlikely that an operator
+# has to change this value.
+#
+# Possible values:
+#
+# * Time in seconds (Default: 2).
+# (integer value)
+# Minimum value: 0
+#power_state_event_polling_interval = 2
+
+#
+# qemu-img command
+#
+# qemu-img is required for some of the image related operations
+# like converting between different image types. You can get it
+# from here: (http://qemu.weilnetz.de/) or you can install the
+# Cloudbase OpenStack Hyper-V Compute Driver
+# (https://cloudbase.it/openstack-hyperv-driver/) which automatically
+# sets the proper path for this config option. You can either give the
+# full path of qemu-img.exe or set its path in the PATH environment
+# variable and leave this option to the default value.
+#
+# Possible values:
+#
+# * Name of the qemu-img executable, in case it is in the same
+# directory as the nova-compute service or its path is in the
+# PATH environment variable (Default).
+# * Path of qemu-img command (DRIVELETTER:\PATH\TO\QEMU-IMG\COMMAND).
+#
+# Related options:
+#
+# * If the config_drive_cdrom option is False, qemu-img will be used
+# to
+# convert the ISO to a VHD, otherwise the configuration drive will
+# remain an ISO. To use configuration drive with Hyper-V, you must
+# set the mkisofs_cmd value to the full path to an mkisofs.exe
+# installation.
+# (string value)
+#qemu_img_cmd = qemu-img.exe
+
+#
+# External virtual switch name
+#
+# The Hyper-V Virtual Switch is a software-based layer-2 Ethernet
+# network switch that is available with the installation of the
+# Hyper-V server role. The switch includes programmatically managed
+# and extensible capabilities to connect virtual machines to both
+# virtual networks and the physical network. In addition, Hyper-V
+# Virtual Switch provides policy enforcement for security, isolation,
+# and service levels. The vSwitch represented by this config option
+# must be an external one (not internal or private).
+#
+# Possible values:
+#
+# * If not provided, the first of a list of available vswitches
+# is used. This list is queried using WQL.
+# * Virtual switch name.
+# (string value)
+#vswitch_name = <None>
+
+#
+# Wait soft reboot seconds
+#
+# Number of seconds to wait for instance to shut down after soft
+# reboot request is made. We fall back to hard reboot if instance
+# does not shutdown within this window.
+#
+# Possible values:
+#
+# * Time in seconds (Default: 60).
+# (integer value)
+# Minimum value: 0
+#wait_soft_reboot_seconds = 60
+
+#
+# Configuration drive cdrom
+#
+# OpenStack can be configured to write instance metadata to
+# a configuration drive, which is then attached to the
+# instance before it boots. The configuration drive can be
+# attached as a disk drive (default) or as a CD drive.
+#
+# Possible values:
+#
+# * True: Attach the configuration drive image as a CD drive.
+# * False: Attach the configuration drive image as a disk drive
+# (Default).
+#
+# Related options:
+#
+# * This option is meaningful with force_config_drive option set to
+# 'True'
+# or when the REST API call to create an instance will have
+# '--config-drive=True' flag.
+# * config_drive_format option must be set to 'iso9660' in order to
+# use
+# CD drive as the configuration drive image.
+# * To use configuration drive with Hyper-V, you must set the
+# mkisofs_cmd value to the full path to an mkisofs.exe installation.
+# Additionally, you must set the qemu_img_cmd value to the full path
+# to an qemu-img command installation.
+# * You can configure the Compute service to always create a
+# configuration
+# drive by setting the force_config_drive option to 'True'.
+# (boolean value)
+#config_drive_cdrom = false
+
+#
+# Configuration drive inject password
+#
+# Enables setting the admin password in the configuration drive image.
+#
+# Related options:
+#
+# * This option is meaningful when used with other options that enable
+# configuration drive usage with Hyper-V, such as
+# force_config_drive.
+# * Currently, the only accepted config_drive_format is 'iso9660'.
+# (boolean value)
+#config_drive_inject_password = false
+
+#
+# Volume attach retry count
+#
+# The number of times to retry attaching a volume. Volume attachment
+# is retried until success or the given retry count is reached.
+#
+# Possible values:
+#
+# * Positive integer values (Default: 10).
+#
+# Related options:
+#
+# * Time interval between attachment attempts is declared with
+# volume_attach_retry_interval option.
+# (integer value)
+# Minimum value: 0
+#volume_attach_retry_count = 10
+
+#
+# Volume attach retry interval
+#
+# Interval between volume attachment attempts, in seconds.
+#
+# Possible values:
+#
+# * Time in seconds (Default: 5).
+#
+# Related options:
+#
+# * This options is meaningful when volume_attach_retry_count
+# is greater than 1.
+# * The retry loop runs with volume_attach_retry_count and
+# volume_attach_retry_interval configuration options.
+# (integer value)
+# Minimum value: 0
+#volume_attach_retry_interval = 5
+
+#
+# Enable RemoteFX feature
+#
+# This requires at least one DirectX 11 capable graphics adapter for
+# Windows / Hyper-V Server 2012 R2 or newer and RDS-Virtualization
+# feature has to be enabled.
+#
+# Instances with RemoteFX can be requested with the following flavor
+# extra specs:
+#
+# **os:resolution**. Guest VM screen resolution size. Acceptable
+# values::
+#
+# 1024x768, 1280x1024, 1600x1200, 1920x1200, 2560x1600, 3840x2160
+#
+# ``3840x2160`` is only available on Windows / Hyper-V Server 2016.
+#
+# **os:monitors**. Guest VM number of monitors. Acceptable values::
+#
+# [1, 4] - Windows / Hyper-V Server 2012 R2
+# [1, 8] - Windows / Hyper-V Server 2016
+#
+# **os:vram**. Guest VM VRAM amount. Only available on
+# Windows / Hyper-V Server 2016. Acceptable values::
+#
+# 64, 128, 256, 512, 1024
+# (boolean value)
+#enable_remotefx = false
+
+#
+# Use multipath connections when attaching iSCSI or FC disks.
+#
+# This requires the Multipath IO Windows feature to be enabled. MPIO
+# must be
+# configured to claim such devices.
+# (boolean value)
+#use_multipath_io = false
+
+#
+# List of iSCSI initiators that will be used for estabilishing iSCSI
+# sessions.
+#
+# If none are specified, the Microsoft iSCSI initiator service will
+# choose the
+# initiator.
+# (list value)
+#iscsi_initiator_list =
+
+{% if controller.ironic is defined -%}
+[ironic]
+#
+# Configuration options for Ironic driver (Bare Metal).
+# If using the Ironic driver following options must be set:
+# * auth_type
+# * auth_url
+# * project_name
+# * username
+# * password
+# * project_domain_id or project_domain_name
+# * user_domain_id or user_domain_name
+
+#
+# From nova.conf
+#
+
+# DEPRECATED: URL override for the Ironic API endpoint. (uri value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+# Reason: Endpoint lookup uses the service catalog via common
+# keystoneauth1 Adapter configuration options. In the current release,
+# api_endpoint will override this behavior, but will be ignored and/or
+# removed in a future release. To achieve the same result, use the
+# endpoint_override option instead.
+#api_endpoint = http://ironic.example.org:6385/
+api_endpoint={{ controller.ironic.get('protocol', 'http') }}://{{ controller.ironic.host }}:{{ controller.ironic.port }}
+
+#
+# The number of times to retry when a request conflicts.
+# If set to 0, only try once, no retries.
+#
+# Related options:
+#
+# * api_retry_interval
+# (integer value)
+# Minimum value: 0
+#api_max_retries = 60
+
+#
+# The number of seconds to wait before retrying the request.
+#
+# Related options:
+#
+# * api_max_retries
+# (integer value)
+# Minimum value: 0
+#api_retry_interval = 2
+
+# Timeout (seconds) to wait for node serial console state changed. Set
+# to 0 to disable timeout. (integer value)
+# Minimum value: 0
+#serial_console_state_timeout = 10
+
+# PEM encoded Certificate Authority to use when verifying HTTPs
+# connections. (string value)
+#cafile = <None>
+{%- if controller.ironic.get('protocol', 'http') == 'https' %}
+cafile={{ controller.identity.get('cacert_file', controller.cacert_file) }}
+{%- endif %}
+
+# PEM encoded client certificate cert file (string value)
+#certfile = <None>
+
+# PEM encoded client certificate key file (string value)
+#keyfile = <None>
+
+# Verify HTTPS connections. (boolean value)
+#insecure = false
+
+# Timeout value for http requests (integer value)
+#timeout = <None>
+
+# Authentication type to load (string value)
+# Deprecated group/name - [ironic]/auth_plugin
+#auth_type = <None>
+auth_type={{ controller.ironic.auth_type }}
+
+# Config Section from which to load plugin specific options (string
+# value)
+#auth_section = <None>
+
+# Authentication URL (string value)
+#auth_url = <None>
+auth_url={{ controller.identity.get('protocol', 'http') }}://{{ controller.identity.host }}:{{ controller.identity.port }}/v3
+
+# Scope for system operations (string value)
+#system_scope = <None>
+
+# Domain ID to scope to (string value)
+#domain_id = <None>
+
+# Domain name to scope to (string value)
+#domain_name = <None>
+
+# Project ID to scope to (string value)
+#project_id = <None>
+
+# Project name to scope to (string value)
+#project_name = <None>
+project_name={{ controller.identity.tenant }}
+
+# Domain ID containing project (string value)
+#project_domain_id = <None>
+
+# Domain name containing project (string value)
+#project_domain_name = <None>
+project_domain_name={{ controller.ironic.project_domain_name }}
+
+# Trust ID (string value)
+#trust_id = <None>
+
+# User ID (string value)
+#user_id = <None>
+
+# Username (string value)
+# Deprecated group/name - [ironic]/user_name
+#username = <None>
+username={{ controller.ironic.user }}
+
+# User's domain id (string value)
+#user_domain_id = <None>
+
+# User's domain name (string value)
+#user_domain_name = <None>
+user_domain_name={{ controller.ironic.user_domain_name }}
+
+
+# User's password (string value)
+#password = <None>
+password={{ controller.ironic.password }}
+# The default service_type for endpoint URL discovery. (string value)
+#service_type = baremetal
+
+# The default service_name for endpoint URL discovery. (string value)
+#service_name = <None>
+
+# List of interfaces, in order of preference, for endpoint URL. (list
+# value)
+#valid_interfaces = internal,public
+
+# The default region_name for endpoint URL discovery. (string value)
+#region_name = <None>
+
+# Always use this endpoint URL for requests for this client. NOTE: The
+# unversioned endpoint should be specified here; to request a
+# particular API version, use the `version`, `min-version`, and/or
+# `max-version` options. (string value)
+# Deprecated group/name - [ironic]/api_endpoint
+#endpoint_override = <None>
+{%- endif %}
+
+
+[key_manager]
+
+#
+# From nova.conf
+#
+
+#
+# Fixed key returned by key manager, specified in hex.
+#
+# Possible values:
+#
+# * Empty string or a key in hex value
+# (string value)
+#fixed_key = <None>
+{%- if controller.get('barbican', {}).get('enabled', False) %}
+api_class=castellan.key_manager.barbican_key_manager.BarbicanKeyManager
+{%- endif %}
+
+# Specify the key manager implementation. Options are "barbican" and
+# "vault". Default is "barbican". Will support the values earlier
+# set using [key_manager]/api_class for some time. (string value)
+# Deprecated group/name - [key_manager]/api_class
+#backend = barbican
+
+# The type of authentication credential to create. Possible values are
+# 'token', 'password', 'keystone_token', and 'keystone_password'.
+# Required if no context is passed to the credential factory. (string
+# value)
+#auth_type = <None>
+
+# Token for authentication. Required for 'token' and 'keystone_token'
+# auth_type if no context is passed to the credential factory. (string
+# value)
+#token = <None>
+
+# Username for authentication. Required for 'password' auth_type.
+# Optional for the 'keystone_password' auth_type. (string value)
+#username = <None>
+
+# Password for authentication. Required for 'password' and
+# 'keystone_password' auth_type. (string value)
+#password = <None>
+
+# Use this endpoint to connect to Keystone. (string value)
+#auth_url = <None>
+
+# User ID for authentication. Optional for 'keystone_token' and
+# 'keystone_password' auth_type. (string value)
+#user_id = <None>
+
+# User's domain ID for authentication. Optional for 'keystone_token'
+# and 'keystone_password' auth_type. (string value)
+#user_domain_id = <None>
+
+# User's domain name for authentication. Optional for 'keystone_token'
+# and 'keystone_password' auth_type. (string value)
+#user_domain_name = <None>
+
+# Trust ID for trust scoping. Optional for 'keystone_token' and
+# 'keystone_password' auth_type. (string value)
+#trust_id = <None>
+
+# Domain ID for domain scoping. Optional for 'keystone_token' and
+# 'keystone_password' auth_type. (string value)
+#domain_id = <None>
+
+# Domain name for domain scoping. Optional for 'keystone_token' and
+# 'keystone_password' auth_type. (string value)
+#domain_name = <None>
+
+# Project ID for project scoping. Optional for 'keystone_token' and
+# 'keystone_password' auth_type. (string value)
+#project_id = <None>
+
+# Project name for project scoping. Optional for 'keystone_token' and
+# 'keystone_password' auth_type. (string value)
+#project_name = <None>
+
+# Project's domain ID for project. Optional for 'keystone_token' and
+# 'keystone_password' auth_type. (string value)
+#project_domain_id = <None>
+
+# Project's domain name for project. Optional for 'keystone_token' and
+# 'keystone_password' auth_type. (string value)
+#project_domain_name = <None>
+
+# Allow fetching a new token if the current one is going to expire.
+# Optional for 'keystone_token' and 'keystone_password' auth_type.
+# (boolean value)
+#reauthenticate = true
+
+
+[keystone]
+# Configuration options for the identity service
+
+#
+# From nova.conf
+#
+
+# PEM encoded Certificate Authority to use when verifying HTTPs
+# connections. (string value)
+#cafile = <None>
+
+# PEM encoded client certificate cert file (string value)
+#certfile = <None>
+
+# PEM encoded client certificate key file (string value)
+#keyfile = <None>
+
+# Verify HTTPS connections. (boolean value)
+#insecure = false
+
+# Timeout value for http requests (integer value)
+#timeout = <None>
+
+# The default service_type for endpoint URL discovery. (string value)
+#service_type = identity
+
+# The default service_name for endpoint URL discovery. (string value)
+#service_name = <None>
+
+# List of interfaces, in order of preference, for endpoint URL. (list
+# value)
+#valid_interfaces = internal,public
+
+# The default region_name for endpoint URL discovery. (string value)
+#region_name = <None>
+
+# Always use this endpoint URL for requests for this client. NOTE: The
+# unversioned endpoint should be specified here; to request a
+# particular API version, use the `version`, `min-version`, and/or
+# `max-version` options. (string value)
+#endpoint_override = <None>
+
+
+[libvirt]
+#
+# Libvirt options allows cloud administrator to configure related
+# libvirt hypervisor driver to be used within an OpenStack deployment.
+#
+# Almost all of the libvirt config options are influence by
+# ``virt_type`` config
+# which describes the virtualization type (or so called domain type)
+# libvirt
+# should use for specific features such as live migration, snapshot.
+
+#
+# From nova.conf
+#
+virt_type = kvm
+
+inject_partition = -1
+
+#
+# The ID of the image to boot from to rescue data from a corrupted
+# instance.
+#
+# If the rescue REST API operation doesn't provide an ID of an image
+# to
+# use, the image which is referenced by this ID is used. If this
+# option is not set, the image from the instance is used.
+#
+# Possible values:
+#
+# * An ID of an image or nothing. If it points to an *Amazon Machine
+# Image* (AMI), consider to set the config options
+# ``rescue_kernel_id``
+# and ``rescue_ramdisk_id`` too. If nothing is set, the image of the
+# instance
+# is used.
+#
+# Related options:
+#
+# * ``rescue_kernel_id``: If the chosen rescue image allows the
+# separate
+# definition of its kernel disk, the value of this option is used,
+# if specified. This is the case when *Amazon*'s AMI/AKI/ARI image
+# format is used for the rescue image.
+# * ``rescue_ramdisk_id``: If the chosen rescue image allows the
+# separate
+# definition of its RAM disk, the value of this option is used if,
+# specified. This is the case when *Amazon*'s AMI/AKI/ARI image
+# format is used for the rescue image.
+# (string value)
+#rescue_image_id = <None>
+
+#
+# The ID of the kernel (AKI) image to use with the rescue image.
+#
+# If the chosen rescue image allows the separate definition of its
+# kernel
+# disk, the value of this option is used, if specified. This is the
+# case
+# when *Amazon*'s AMI/AKI/ARI image format is used for the rescue
+# image.
+#
+# Possible values:
+#
+# * An ID of an kernel image or nothing. If nothing is specified, the
+# kernel
+# disk from the instance is used if it was launched with one.
+#
+# Related options:
+#
+# * ``rescue_image_id``: If that option points to an image in
+# *Amazon*'s
+# AMI/AKI/ARI image format, it's useful to use ``rescue_kernel_id``
+# too.
+# (string value)
+#rescue_kernel_id = <None>
+
+#
+# The ID of the RAM disk (ARI) image to use with the rescue image.
+#
+# If the chosen rescue image allows the separate definition of its RAM
+# disk, the value of this option is used, if specified. This is the
+# case
+# when *Amazon*'s AMI/AKI/ARI image format is used for the rescue
+# image.
+#
+# Possible values:
+#
+# * An ID of a RAM disk image or nothing. If nothing is specified, the
+# RAM
+# disk from the instance is used if it was launched with one.
+#
+# Related options:
+#
+# * ``rescue_image_id``: If that option points to an image in
+# *Amazon*'s
+# AMI/AKI/ARI image format, it's useful to use ``rescue_ramdisk_id``
+# too.
+# (string value)
+#rescue_ramdisk_id = <None>
+
+#
+# Describes the virtualization type (or so called domain type) libvirt
+# should
+# use.
+#
+# The choice of this type must match the underlying virtualization
+# strategy
+# you have chosen for this host.
+#
+# Possible values:
+#
+# * See the predefined set of case-sensitive values.
+#
+# Related options:
+#
+# * ``connection_uri``: depends on this
+# * ``disk_prefix``: depends on this
+# * ``cpu_mode``: depends on this
+# * ``cpu_model``: depends on this
+# (string value)
+# Possible values:
+# kvm - <No description provided>
+# lxc - <No description provided>
+# qemu - <No description provided>
+# uml - <No description provided>
+# xen - <No description provided>
+# parallels - <No description provided>
+#virt_type = kvm
+
+#
+# Overrides the default libvirt URI of the chosen virtualization type.
+#
+# If set, Nova will use this URI to connect to libvirt.
+#
+# Possible values:
+#
+# * An URI like ``qemu:///system`` or ``xen+ssh://oirase/`` for
+# example.
+# This is only necessary if the URI differs to the commonly known
+# URIs
+# for the chosen virtualization type.
+#
+# Related options:
+#
+# * ``virt_type``: Influences what is used as default value here.
+# (string value)
+#connection_uri =
+
+#
+# Algorithm used to hash the injected password.
+# Note that it must be supported by libc on the compute host
+# _and_ by libc inside *any guest image* that will be booted by this
+# compute
+# host whith requested password injection.
+# In case the specified algorithm is not supported by libc on the
+# compute host,
+# a fallback to DES algorithm will be performed.
+#
+# Related options:
+#
+# * ``inject_password``
+# * ``inject_partition``
+# (string value)
+# Possible values:
+# SHA-512 - <No description provided>
+# SHA-256 - <No description provided>
+# MD5 - <No description provided>
+#inject_password_algorithm = MD5
+
+#
+# Allow the injection of an admin password for instance only at
+# ``create`` and
+# ``rebuild`` process.
+#
+# There is no agent needed within the image to do this. If
+# *libguestfs* is
+# available on the host, it will be used. Otherwise *nbd* is used. The
+# file
+# system of the image will be mounted and the admin password, which is
+# provided
+# in the REST API call will be injected as password for the root user.
+# If no
+# root user is available, the instance won't be launched and an error
+# is thrown.
+# Be aware that the injection is *not* possible when the instance gets
+# launched
+# from a volume.
+#
+# Possible values:
+#
+# * True: Allows the injection.
+# * False (default): Disallows the injection. Any via the REST API
+# provided
+# admin password will be silently ignored.
+#
+# Related options:
+#
+# * ``inject_partition``: That option will decide about the discovery
+# and usage
+# of the file system. It also can disable the injection at all.
+# (boolean value)
+#inject_password = false
+
+#
+# Allow the injection of an SSH key at boot time.
+#
+# There is no agent needed within the image to do this. If
+# *libguestfs* is
+# available on the host, it will be used. Otherwise *nbd* is used. The
+# file
+# system of the image will be mounted and the SSH key, which is
+# provided
+# in the REST API call will be injected as SSH key for the root user
+# and
+# appended to the ``authorized_keys`` of that user. The SELinux
+# context will
+# be set if necessary. Be aware that the injection is *not* possible
+# when the
+# instance gets launched from a volume.
+#
+# This config option will enable directly modifying the instance disk
+# and does
+# not affect what cloud-init may do using data from config_drive
+# option or the
+# metadata service.
+#
+# Related options:
+#
+# * ``inject_partition``: That option will decide about the discovery
+# and usage
+# of the file system. It also can disable the injection at all.
+# (boolean value)
+#inject_key = false
+
+#
+# Determines the way how the file system is chosen to inject data into
+# it.
+#
+# *libguestfs* will be used a first solution to inject data. If that's
+# not
+# available on the host, the image will be locally mounted on the host
+# as a
+# fallback solution. If libguestfs is not able to determine the root
+# partition
+# (because there are more or less than one root partition) or cannot
+# mount the
+# file system it will result in an error and the instance won't be
+# boot.
+#
+# Possible values:
+#
+# * -2 => disable the injection of data.
+# * -1 => find the root partition with the file system to mount with
+# libguestfs
+# * 0 => The image is not partitioned
+# * >0 => The number of the partition to use for the injection
+#
+# Related options:
+#
+# * ``inject_key``: If this option allows the injection of a SSH key
+# it depends
+# on value greater or equal to -1 for ``inject_partition``.
+# * ``inject_password``: If this option allows the injection of an
+# admin password
+# it depends on value greater or equal to -1 for
+# ``inject_partition``.
+# * ``guestfs`` You can enable the debug log level of libguestfs with
+# this
+# config option. A more verbose output will help in debugging
+# issues.
+# * ``virt_type``: If you use ``lxc`` as virt_type it will be treated
+# as a
+# single partition image
+# (integer value)
+# Minimum value: -2
+#inject_partition = -2
+
+# DEPRECATED:
+# Enable a mouse cursor within a graphical VNC or SPICE sessions.
+#
+# This will only be taken into account if the VM is fully virtualized
+# and VNC
+# and/or SPICE is enabled. If the node doesn't support a graphical
+# framebuffer,
+# then it is valid to set this to False.
+#
+# Related options:
+# * ``[vnc]enabled``: If VNC is enabled, ``use_usb_tablet`` will have
+# an effect.
+# * ``[spice]enabled`` + ``[spice].agent_enabled``: If SPICE is
+# enabled and the
+# spice agent is disabled, the config value of ``use_usb_tablet``
+# will have
+# an effect.
+# (boolean value)
+# This option is deprecated for removal since 14.0.0.
+# Its value may be silently ignored in the future.
+# Reason: This option is being replaced by the 'pointer_model' option.
+use_usb_tablet = true
+
+#
+# The IP address or hostname to be used as the target for live
+# migration traffic.
+#
+# If this option is set to None, the hostname of the migration target
+# compute
+# node will be used.
+#
+# This option is useful in environments where the live-migration
+# traffic can
+# impact the network plane significantly. A separate network for live-
+# migration
+# traffic can then use this config option and avoids the impact on the
+# management network.
+#
+# Possible values:
+#
+# * A valid IP address or hostname, else None.
+#
+# Related options:
+#
+# * ``live_migration_tunnelled``: The live_migration_inbound_addr
+# value is
+# ignored if tunneling is enabled.
+# (string value)
+#live_migration_inbound_addr = <None>
+
+# DEPRECATED:
+# Live migration target URI to use.
+#
+# Override the default libvirt live migration target URI (which is
+# dependent
+# on virt_type). Any included "%s" is replaced with the migration
+# target
+# hostname.
+#
+# If this option is set to None (which is the default), Nova will
+# automatically
+# generate the `live_migration_uri` value based on only 4 supported
+# `virt_type`
+# in following list:
+#
+# * 'kvm': 'qemu+tcp://%s/system'
+# * 'qemu': 'qemu+tcp://%s/system'
+# * 'xen': 'xenmigr://%s/system'
+# * 'parallels': 'parallels+tcp://%s/system'
+#
+# Related options:
+#
+# * ``live_migration_inbound_addr``: If
+# ``live_migration_inbound_addr`` value
+# is not None and ``live_migration_tunnelled`` is False, the
+# ip/hostname
+# address of target compute node is used instead of
+# ``live_migration_uri`` as
+# the uri for live migration.
+# * ``live_migration_scheme``: If ``live_migration_uri`` is not set,
+# the scheme
+# used for live migration is taken from ``live_migration_scheme``
+# instead.
+# (string value)
+# This option is deprecated for removal since 15.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# live_migration_uri is deprecated for removal in favor of two other
+# options that
+# allow to change live migration scheme and target URI:
+# ``live_migration_scheme``
+# and ``live_migration_inbound_addr`` respectively.
+#live_migration_uri = <None>
+
+#
+# URI scheme used for live migration.
+#
+# Override the default libvirt live migration scheme (which is
+# dependent on
+# virt_type). If this option is set to None, nova will automatically
+# choose a
+# sensible default based on the hypervisor. It is not recommended that
+# you change
+# this unless you are very sure that hypervisor supports a particular
+# scheme.
+#
+# Related options:
+#
+# * ``virt_type``: This option is meaningful only when ``virt_type``
+# is set to
+# `kvm` or `qemu`.
+# * ``live_migration_uri``: If ``live_migration_uri`` value is not
+# None, the
+# scheme used for live migration is taken from
+# ``live_migration_uri`` instead.
+# (string value)
+#live_migration_scheme = <None>
+
+#
+# Enable tunnelled migration.
+#
+# This option enables the tunnelled migration feature, where migration
+# data is
+# transported over the libvirtd connection. If enabled, we use the
+# VIR_MIGRATE_TUNNELLED migration flag, avoiding the need to configure
+# the network to allow direct hypervisor to hypervisor communication.
+# If False, use the native transport. If not set, Nova will choose a
+# sensible default based on, for example the availability of native
+# encryption support in the hypervisor. Enabling this option will
+# definitely
+# impact performance massively.
+#
+# Note that this option is NOT compatible with use of block migration.
+#
+# Related options:
+#
+# * ``live_migration_inbound_addr``: The live_migration_inbound_addr
+# value is
+# ignored if tunneling is enabled.
+# (boolean value)
+#live_migration_tunnelled = false
+
+#
+# Maximum bandwidth(in MiB/s) to be used during migration.
+#
+# If set to 0, the hypervisor will choose a suitable default. Some
+# hypervisors
+# do not support this feature and will return an error if bandwidth is
+# not 0.
+# Please refer to the libvirt documentation for further details.
+# (integer value)
+#live_migration_bandwidth = 0
+
+#
+# Maximum permitted downtime, in milliseconds, for live migration
+# switchover.
+#
+# Will be rounded up to a minimum of 100ms. You can increase this
+# value
+# if you want to allow live-migrations to complete faster, or avoid
+# live-migration timeout errors by allowing the guest to be paused for
+# longer during the live-migration switch over.
+#
+# Related options:
+#
+# * live_migration_completion_timeout
+# (integer value)
+# Minimum value: 100
+#live_migration_downtime = 500
+
+#
+# Number of incremental steps to reach max downtime value.
+#
+# Will be rounded up to a minimum of 3 steps.
+# (integer value)
+# Minimum value: 3
+#live_migration_downtime_steps = 10
+
+#
+# Time to wait, in seconds, between each step increase of the
+# migration
+# downtime.
+#
+# Minimum delay is 3 seconds. Value is per GiB of guest RAM + disk to
+# be
+# transferred, with lower bound of a minimum of 2 GiB per device.
+# (integer value)
+# Minimum value: 3
+#live_migration_downtime_delay = 75
+
+#
+# Time to wait, in seconds, for migration to successfully complete
+# transferring
+# data before aborting the operation.
+#
+# Value is per GiB of guest RAM + disk to be transferred, with lower
+# bound of
+# a minimum of 2 GiB. Should usually be larger than downtime delay *
+# downtime
+# steps. Set to 0 to disable timeouts.
+#
+# Related options:
+#
+# * live_migration_downtime
+# * live_migration_downtime_steps
+# * live_migration_downtime_delay
+# (integer value)
+# Note: This option can be changed without restarting.
+#live_migration_completion_timeout = 800
+
+# DEPRECATED:
+# Time to wait, in seconds, for migration to make forward progress in
+# transferring data before aborting the operation.
+#
+# Set to 0 to disable timeouts.
+#
+# This is deprecated, and now disabled by default because we have
+# found serious
+# bugs in this feature that caused false live-migration timeout
+# failures. This
+# feature will be removed or replaced in a future release.
+# (integer value)
+# Note: This option can be changed without restarting.
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+# Reason: Serious bugs found in this feature.
+#live_migration_progress_timeout = 0
+
+#
+# This option allows nova to switch an on-going live migration to
+# post-copy
+# mode, i.e., switch the active VM to the one on the destination node
+# before the
+# migration is complete, therefore ensuring an upper bound on the
+# memory that
+# needs to be transferred. Post-copy requires libvirt>=1.3.3 and
+# QEMU>=2.5.0.
+#
+# When permitted, post-copy mode will be automatically activated if a
+# live-migration memory copy iteration does not make percentage
+# increase of at
+# least 10% over the last iteration.
+#
+# The live-migration force complete API also uses post-copy when
+# permitted. If
+# post-copy mode is not available, force complete falls back to
+# pausing the VM
+# to ensure the live-migration operation will complete.
+#
+# When using post-copy mode, if the source and destination hosts loose
+# network
+# connectivity, the VM being live-migrated will need to be rebooted.
+# For more
+# details, please see the Administration guide.
+#
+# Related options:
+#
+# * live_migration_permit_auto_converge
+# (boolean value)
+#live_migration_permit_post_copy = false
+
+#
+# This option allows nova to start live migration with auto converge
+# on.
+#
+# Auto converge throttles down CPU if a progress of on-going live
+# migration
+# is slow. Auto converge will only be used if this flag is set to True
+# and
+# post copy is not permitted or post copy is unavailable due to the
+# version
+# of libvirt and QEMU in use.
+#
+# Related options:
+#
+# * live_migration_permit_post_copy
+# (boolean value)
+#live_migration_permit_auto_converge = false
+{%- if controller.get('libvirt', {}).live_migration_permit_auto_converge is defined %}
+live_migration_permit_auto_converge={{ controller.libvirt.live_migration_permit_auto_converge|lower }}
+{%- endif %}
+
+#
+# Determine the snapshot image format when sending to the image
+# service.
+#
+# If set, this decides what format is used when sending the snapshot
+# to the
+# image service.
+# If not set, defaults to same type as source image.
+#
+# Possible values:
+#
+# * ``raw``: RAW disk format
+# * ``qcow2``: KVM default disk format
+# * ``vmdk``: VMWare default disk format
+# * ``vdi``: VirtualBox default disk format
+# * If not set, defaults to same type as source image.
+# (string value)
+# Possible values:
+# raw - <No description provided>
+# qcow2 - <No description provided>
+# vmdk - <No description provided>
+# vdi - <No description provided>
+#snapshot_image_format = <None>
+
+#
+# Override the default disk prefix for the devices attached to an
+# instance.
+#
+# If set, this is used to identify a free disk device name for a bus.
+#
+# Possible values:
+#
+# * Any prefix which will result in a valid disk device name like
+# 'sda' or 'hda'
+# for example. This is only necessary if the device names differ to
+# the
+# commonly known device name prefixes for a virtualization type such
+# as: sd,
+# xvd, uvd, vd.
+#
+# Related options:
+#
+# * ``virt_type``: Influences which device type is used, which
+# determines
+# the default disk prefix.
+# (string value)
+#disk_prefix = <None>
+
+# Number of seconds to wait for instance to shut down after soft
+# reboot request is made. We fall back to hard reboot if instance does
+# not shutdown within this window. (integer value)
+#wait_soft_reboot_seconds = 120
+
+#
+# Is used to set the CPU mode an instance should have.
+#
+# If virt_type="kvm|qemu", it will default to "host-model", otherwise
+# it will
+# default to "none".
+#
+# Possible values:
+#
+# * ``host-model``: Clones the host CPU feature flags
+# * ``host-passthrough``: Use the host CPU model exactly
+# * ``custom``: Use a named CPU model
+# * ``none``: Don't set a specific CPU model. For instances with
+# ``virt_type`` as KVM/QEMU, the default CPU model from QEMU will be
+# used,
+# which provides a basic set of CPU features that are compatible with
+# most
+# hosts.
+#
+# Related options:
+#
+# * ``cpu_model``: This should be set ONLY when ``cpu_mode`` is set to
+# ``custom``. Otherwise, it would result in an error and the instance
+# launch will fail.
+#
+# (string value)
+# Possible values:
+# host-model - <No description provided>
+# host-passthrough - <No description provided>
+# custom - <No description provided>
+# none - <No description provided>
+cpu_model = host-passthrough
+
+#
+# Set the name of the libvirt CPU model the instance should use.
+#
+# Possible values:
+#
+# * The named CPU models listed in ``/usr/share/libvirt/cpu_map.xml``
+#
+# Related options:
+#
+# * ``cpu_mode``: This should be set to ``custom`` ONLY when you want
+# to
+# configure (via ``cpu_model``) a specific named CPU model.
+# Otherwise, it
+# would result in an error and the instance launch will fail.
+#
+# * ``virt_type``: Only the virtualization types ``kvm`` and ``qemu``
+# use this.
+# (string value)
+#cpu_model = <None>
+
+#
+# This allows specifying granular CPU feature flags when specifying
+# CPU
+# models. For example, to explicitly specify the ``pcid``
+# (Process-Context ID, an Intel processor feature) flag to the
+# "IvyBridge"
+# virtual CPU model::
+#
+# [libvirt]
+# cpu_mode = custom
+# cpu_model = IvyBridge
+# cpu_model_extra_flags = pcid
+#
+# Currently, the choice is restricted to only one option: ``pcid``
+# (the
+# option is case-insensitive, so ``PCID`` is also valid). This flag
+# is
+# now required to address the guest performance degradation as a
+# result of
+# applying the "Meltdown" CVE fixes on certain Intel CPU models.
+#
+# Note that when using this config attribute to set the 'PCID' CPU
+# flag,
+# not all virtual (i.e. libvirt / QEMU) CPU models need it:
+#
+# * The only virtual CPU models that include the 'PCID' capability are
+# Intel "Haswell", "Broadwell", and "Skylake" variants.
+#
+# * The libvirt / QEMU CPU models "Nehalem", "Westmere",
+# "SandyBridge",
+# and "IvyBridge" will _not_ expose the 'PCID' capability by
+# default,
+# even if the host CPUs by the same name include it. I.e. 'PCID'
+# needs
+# to be explicitly specified when using the said virtual CPU models.
+#
+# For now, the ``cpu_model_extra_flags`` config attribute is valid
+# only in
+# combination with ``cpu_mode`` + ``cpu_model`` options.
+#
+# Besides ``custom``, the libvirt driver has two other CPU modes: The
+# default, ``host-model``, tells it to do the right thing with respect
+# to
+# handling 'PCID' CPU flag for the guest -- *assuming* you are running
+# updated processor microcode, host and guest kernel, libvirt, and
+# QEMU.
+# The other mode, ``host-passthrough``, checks if 'PCID' is available
+# in
+# the hardware, and if so directly passes it through to the Nova
+# guests.
+# Thus, in context of 'PCID', with either of these CPU modes
+# (``host-model`` or ``host-passthrough``), there is no need to use
+# the
+# ``cpu_model_extra_flags``.
+#
+# Related options:
+#
+# * cpu_mode
+# * cpu_model
+# (list value)
+#cpu_model_extra_flags =
+
+# Location where libvirt driver will store snapshots before uploading
+# them to image service (string value)
+#snapshots_directory = $instances_path/snapshots
+
+# Location where the Xen hvmloader is kept (string value)
+#xen_hvmloader_path = /usr/lib/xen/boot/hvmloader
+
+#
+# Specific cache modes to use for different disk types.
+#
+# For example: file=directsync,block=none,network=writeback
+#
+# For local or direct-attached storage, it is recommended that you use
+# writethrough (default) mode, as it ensures data integrity and has
+# acceptable
+# I/O performance for applications running in the guest, especially
+# for read
+# operations. However, caching mode none is recommended for remote NFS
+# storage,
+# because direct I/O operations (O_DIRECT) perform better than
+# synchronous I/O
+# operations (with O_SYNC). Caching mode none effectively turns all
+# guest I/O
+# operations into direct I/O operations on the host, which is the NFS
+# client in
+# this environment.
+#
+# Possible cache modes:
+#
+# * default: Same as writethrough.
+# * none: With caching mode set to none, the host page cache is
+# disabled, but
+# the disk write cache is enabled for the guest. In this mode, the
+# write
+# performance in the guest is optimal because write operations
+# bypass the host
+# page cache and go directly to the disk write cache. If the disk
+# write cache
+# is battery-backed, or if the applications or storage stack in the
+# guest
+# transfer data properly (either through fsync operations or file
+# system
+# barriers), then data integrity can be ensured. However, because
+# the host
+# page cache is disabled, the read performance in the guest would
+# not be as
+# good as in the modes where the host page cache is enabled, such as
+# writethrough mode. Shareable disk devices, like for a multi-
+# attachable block
+# storage volume, will have their cache mode set to 'none'
+# regardless of
+# configuration.
+# * writethrough: writethrough mode is the default caching mode. With
+# caching set to writethrough mode, the host page cache is enabled,
+# but the
+# disk write cache is disabled for the guest. Consequently, this
+# caching mode
+# ensures data integrity even if the applications and storage stack
+# in the
+# guest do not transfer data to permanent storage properly (either
+# through
+# fsync operations or file system barriers). Because the host page
+# cache is
+# enabled in this mode, the read performance for applications
+# running in the
+# guest is generally better. However, the write performance might be
+# reduced
+# because the disk write cache is disabled.
+# * writeback: With caching set to writeback mode, both the host page
+# cache
+# and the disk write cache are enabled for the guest. Because of
+# this, the
+# I/O performance for applications running in the guest is good, but
+# the data
+# is not protected in a power failure. As a result, this caching
+# mode is
+# recommended only for temporary data where potential data loss is
+# not a
+# concern.
+# * directsync: Like "writethrough", but it bypasses the host page
+# cache.
+# * unsafe: Caching mode of unsafe ignores cache transfer operations
+# completely. As its name implies, this caching mode should be used
+# only for
+# temporary data where data loss is not a concern. This mode can be
+# useful for
+# speeding up guest installations, but you should switch to another
+# caching
+# mode in production environments.
+# (list value)
+#disk_cachemodes =
+
+# A path to a device that will be used as source of entropy on the
+# host. Permitted options are: /dev/random or /dev/hwrng (string
+# value)
+#rng_dev_path = <None>
+
+# For qemu or KVM guests, set this option to specify a default machine
+# type per host architecture. You can find a list of supported machine
+# types in your environment by checking the output of the "virsh
+# capabilities"command. The format of the value for this config option
+# is host-arch=machine-type. For example:
+# x86_64=machinetype1,armv7l=machinetype2 (list value)
+#hw_machine_type = <None>
+
+# The data source used to the populate the host "serial" UUID exposed
+# to guest in the virtual BIOS. (string value)
+# Possible values:
+# none - <No description provided>
+# os - <No description provided>
+# hardware - <No description provided>
+# auto - <No description provided>
+#sysinfo_serial = auto
+
+# A number of seconds to memory usage statistics period. Zero or
+# negative value mean to disable memory usage statistics. (integer
+# value)
+#mem_stats_period_seconds = 10
+
+# List of uid targets and ranges.Syntax is guest-uid:host-
+# uid:countMaximum of 5 allowed. (list value)
+#uid_maps =
+
+# List of guid targets and ranges.Syntax is guest-gid:host-
+# gid:countMaximum of 5 allowed. (list value)
+#gid_maps =
+
+# In a realtime host context vCPUs for guest will run in that
+# scheduling priority. Priority depends on the host kernel (usually
+# 1-99) (integer value)
+#realtime_scheduler_priority = 1
+
+#
+# This is a performance event list which could be used as monitor.
+# These events
+# will be passed to libvirt domain xml while creating a new instances.
+# Then event statistics data can be collected from libvirt. The
+# minimum
+# libvirt version is 2.0.0. For more information about `Performance
+# monitoring
+# events`, refer https://libvirt.org/formatdomain.html#elementsPerf .
+#
+# Possible values:
+# * A string list. For example: ``enabled_perf_events = cmt, mbml,
+# mbmt``
+# The supported events list can be found in
+# https://libvirt.org/html/libvirt-libvirt-domain.html ,
+# which you may need to search key words ``VIR_PERF_PARAM_*``
+# (list value)
+#enabled_perf_events =
+
+#
+# VM Images format.
+#
+# If default is specified, then use_cow_images flag is used instead of
+# this
+# one.
+#
+# Related options:
+#
+# * virt.use_cow_images
+# * images_volume_group
+# (string value)
+# Possible values:
+# raw - <No description provided>
+# flat - <No description provided>
+# qcow2 - <No description provided>
+# lvm - <No description provided>
+# rbd - <No description provided>
+# ploop - <No description provided>
+# default - <No description provided>
+#images_type = default
+
+#
+# LVM Volume Group that is used for VM images, when you specify
+# images_type=lvm
+#
+# Related options:
+#
+# * images_type
+# (string value)
+#images_volume_group = <None>
+
+#
+# Create sparse logical volumes (with virtualsize) if this flag is set
+# to True.
+# (boolean value)
+#sparse_logical_volumes = false
+
+# The RADOS pool in which rbd volumes are stored (string value)
+#images_rbd_pool = rbd
+
+# Path to the ceph configuration file to use (string value)
+#images_rbd_ceph_conf =
+
+#
+# Discard option for nova managed disks.
+#
+# Requires:
+#
+# * Libvirt >= 1.0.6
+# * Qemu >= 1.5 (raw format)
+# * Qemu >= 1.6 (qcow2 format)
+# (string value)
+# Possible values:
+# ignore - <No description provided>
+# unmap - <No description provided>
+#hw_disk_discard = <None>
+
+# DEPRECATED: Allows image information files to be stored in non-
+# standard locations (string value)
+# This option is deprecated for removal since 14.0.0.
+# Its value may be silently ignored in the future.
+# Reason: Image info files are no longer used by the image cache
+#image_info_filename_pattern = $instances_path/$image_cache_subdirectory_name/%(image)s.info
+
+# Unused resized base images younger than this will not be removed
+# (integer value)
+#remove_unused_resized_minimum_age_seconds = 3600
+
+# DEPRECATED: Write a checksum for files in _base to disk (boolean
+# value)
+# This option is deprecated for removal since 14.0.0.
+# Its value may be silently ignored in the future.
+# Reason: The image cache no longer periodically calculates checksums
+# of stored images. Data integrity can be checked at the block or
+# filesystem level.
+#checksum_base_images = false
+
+# DEPRECATED: How frequently to checksum base images (integer value)
+# This option is deprecated for removal since 14.0.0.
+# Its value may be silently ignored in the future.
+# Reason: The image cache no longer periodically calculates checksums
+# of stored images. Data integrity can be checked at the block or
+# filesystem level.
+#checksum_interval_seconds = 3600
+
+#
+# Method used to wipe ephemeral disks when they are deleted. Only
+# takes effect
+# if LVM is set as backing storage.
+#
+# Possible values:
+#
+# * none - do not wipe deleted volumes
+# * zero - overwrite volumes with zeroes
+# * shred - overwrite volume repeatedly
+#
+# Related options:
+#
+# * images_type - must be set to ``lvm``
+# * volume_clear_size
+# (string value)
+# Possible values:
+# none - <No description provided>
+# zero - <No description provided>
+# shred - <No description provided>
+#volume_clear = zero
+
+#
+# Size of area in MiB, counting from the beginning of the allocated
+# volume,
+# that will be cleared using method set in ``volume_clear`` option.
+#
+# Possible values:
+#
+# * 0 - clear whole volume
+# * >0 - clear specified amount of MiB
+#
+# Related options:
+#
+# * images_type - must be set to ``lvm``
+# * volume_clear - must be set and the value must be different than
+# ``none``
+# for this option to have any impact
+# (integer value)
+# Minimum value: 0
+#volume_clear_size = 0
+
+#
+# Enable snapshot compression for ``qcow2`` images.
+#
+# Note: you can set ``snapshot_image_format`` to ``qcow2`` to force
+# all
+# snapshots to be in ``qcow2`` format, independently from their
+# original image
+# type.
+#
+# Related options:
+#
+# * snapshot_image_format
+# (boolean value)
+#snapshot_compression = false
+
+# Use virtio for bridge interfaces with KVM/QEMU (boolean value)
+use_virtio_for_bridges = true
+
+#
+# Use multipath connection of the iSCSI or FC volume
+#
+# Volumes can be connected in the LibVirt as multipath devices. This
+# will
+# provide high availability and fault tolerance.
+# (boolean value)
+# Deprecated group/name - [libvirt]/iscsi_use_multipath
+#volume_use_multipath = false
+
+#
+# Number of times to scan given storage protocol to find volume.
+# (integer value)
+# Deprecated group/name - [libvirt]/num_iscsi_scan_tries
+#num_volume_scan_tries = 5
+
+#
+# Number of times to rediscover AoE target to find volume.
+#
+# Nova provides support for block storage attaching to hosts via AOE
+# (ATA over
+# Ethernet). This option allows the user to specify the maximum number
+# of retry
+# attempts that can be made to discover the AoE device.
+# (integer value)
+#num_aoe_discover_tries = 3
+
+#
+# The iSCSI transport iface to use to connect to target in case
+# offload support
+# is desired.
+#
+# Default format is of the form <transport_name>.<hwaddress> where
+# <transport_name> is one of (be2iscsi, bnx2i, cxgb3i, cxgb4i,
+# qla4xxx, ocs) and
+# <hwaddress> is the MAC address of the interface and can be generated
+# via the
+# iscsiadm -m iface command. Do not confuse the iscsi_iface parameter
+# to be
+# provided here with the actual transport name.
+# (string value)
+# Deprecated group/name - [libvirt]/iscsi_transport
+#iscsi_iface = <None>
+
+#
+# Number of times to scan iSER target to find volume.
+#
+# iSER is a server network protocol that extends iSCSI protocol to use
+# Remote
+# Direct Memory Access (RDMA). This option allows the user to specify
+# the maximum
+# number of scan attempts that can be made to find iSER volume.
+# (integer value)
+#num_iser_scan_tries = 5
+
+#
+# Use multipath connection of the iSER volume.
+#
+# iSER volumes can be connected as multipath devices. This will
+# provide high
+# availability and fault tolerance.
+# (boolean value)
+#iser_use_multipath = false
+
+#
+# The RADOS client name for accessing rbd(RADOS Block Devices)
+# volumes.
+#
+# Libvirt will refer to this user when connecting and authenticating
+# with
+# the Ceph RBD server.
+# (string value)
+#rbd_user = <None>
+
+#
+# The libvirt UUID of the secret for the rbd_user volumes.
+# (string value)
+#rbd_secret_uuid = <None>
+
+#
+# Directory where the NFS volume is mounted on the compute node.
+# The default is 'mnt' directory of the location where nova's Python
+# module
+# is installed.
+#
+# NFS provides shared storage for the OpenStack Block Storage service.
+#
+# Possible values:
+#
+# * A string representing absolute path of mount point.
+# (string value)
+#nfs_mount_point_base = $state_path/mnt
+
+#
+# Mount options passed to the NFS client. See section of the nfs man
+# page
+# for details.
+#
+# Mount options controls the way the filesystem is mounted and how the
+# NFS client behaves when accessing files on this mount point.
+#
+# Possible values:
+#
+# * Any string representing mount options separated by commas.
+# * Example string: vers=3,lookupcache=pos
+# (string value)
+#nfs_mount_options = <None>
+
+#
+# Directory where the Quobyte volume is mounted on the compute node.
+#
+# Nova supports Quobyte volume driver that enables storing Block
+# Storage
+# service volumes on a Quobyte storage back end. This Option specifies
+# the
+# path of the directory where Quobyte volume is mounted.
+#
+# Possible values:
+#
+# * A string representing absolute path of mount point.
+# (string value)
+#quobyte_mount_point_base = $state_path/mnt
+
+# Path to a Quobyte Client configuration file. (string value)
+#quobyte_client_cfg = <None>
+
+#
+# Directory where the SMBFS shares are mounted on the compute node.
+# (string value)
+#smbfs_mount_point_base = $state_path/mnt
+
+#
+# Mount options passed to the SMBFS client.
+#
+# Provide SMBFS options as a single string containing all parameters.
+# See mount.cifs man page for details. Note that the libvirt-qemu
+# ``uid``
+# and ``gid`` must be specified.
+# (string value)
+#smbfs_mount_options =
+
+#
+# libvirt's transport method for remote file operations.
+#
+# Because libvirt cannot use RPC to copy files over network to/from
+# other
+# compute nodes, other method must be used for:
+#
+# * creating directory on remote host
+# * creating file on remote host
+# * removing file from remote host
+# * copying file to remote host
+# (string value)
+# Possible values:
+# ssh - <No description provided>
+# rsync - <No description provided>
+#remote_filesystem_transport = ssh
+
+#
+# Directory where the Virtuozzo Storage clusters are mounted on the
+# compute
+# node.
+#
+# This option defines non-standard mountpoint for Vzstorage cluster.
+#
+# Related options:
+#
+# * vzstorage_mount_* group of parameters
+# (string value)
+#vzstorage_mount_point_base = $state_path/mnt
+
+#
+# Mount owner user name.
+#
+# This option defines the owner user of Vzstorage cluster mountpoint.
+#
+# Related options:
+#
+# * vzstorage_mount_* group of parameters
+# (string value)
+#vzstorage_mount_user = stack
+
+#
+# Mount owner group name.
+#
+# This option defines the owner group of Vzstorage cluster mountpoint.
+#
+# Related options:
+#
+# * vzstorage_mount_* group of parameters
+# (string value)
+#vzstorage_mount_group = qemu
+
+#
+# Mount access mode.
+#
+# This option defines the access bits of Vzstorage cluster mountpoint,
+# in the format similar to one of chmod(1) utility, like this: 0770.
+# It consists of one to four digits ranging from 0 to 7, with missing
+# lead digits assumed to be 0's.
+#
+# Related options:
+#
+# * vzstorage_mount_* group of parameters
+# (string value)
+#vzstorage_mount_perms = 0770
+
+#
+# Path to vzstorage client log.
+#
+# This option defines the log of cluster operations,
+# it should include "%(cluster_name)s" template to separate
+# logs from multiple shares.
+#
+# Related options:
+#
+# * vzstorage_mount_opts may include more detailed logging options.
+# (string value)
+#vzstorage_log_path = /var/log/vstorage/%(cluster_name)s/nova.log.gz
+
+#
+# Path to the SSD cache file.
+#
+# You can attach an SSD drive to a client and configure the drive to
+# store
+# a local cache of frequently accessed data. By having a local cache
+# on a
+# client's SSD drive, you can increase the overall cluster performance
+# by
+# up to 10 and more times.
+# WARNING! There is a lot of SSD models which are not server grade and
+# may loose arbitrary set of data changes on power loss.
+# Such SSDs should not be used in Vstorage and are dangerous as may
+# lead
+# to data corruptions and inconsistencies. Please consult with the
+# manual
+# on which SSD models are known to be safe or verify it using
+# vstorage-hwflush-check(1) utility.
+#
+# This option defines the path which should include "%(cluster_name)s"
+# template to separate caches from multiple shares.
+#
+# Related options:
+#
+# * vzstorage_mount_opts may include more detailed cache options.
+# (string value)
+#vzstorage_cache_path = <None>
+
+#
+# Extra mount options for pstorage-mount
+#
+# For full description of them, see
+# https://static.openvz.org/vz-man/man1/pstorage-mount.1.gz.html
+# Format is a python string representation of arguments list, like:
+# "['-v', '-R', '500']"
+# Shouldn't include -c, -l, -C, -u, -g and -m as those have
+# explicit vzstorage_* options.
+#
+# Related options:
+#
+# * All other vzstorage_* options
+# (list value)
+#vzstorage_mount_opts =
+
+
+[metrics]
+#
+# Configuration options for metrics
+#
+# Options under this group allow to adjust how values assigned to
+# metrics are
+# calculated.
+
+#
+# From nova.conf
+#
+
+#
+# When using metrics to weight the suitability of a host, you can use
+# this option
+# to change how the calculated weight influences the weight assigned
+# to a host as
+# follows:
+#
+# * >1.0: increases the effect of the metric on overall weight
+# * 1.0: no change to the calculated weight
+# * >0.0,<1.0: reduces the effect of the metric on overall weight
+# * 0.0: the metric value is ignored, and the value of the
+# 'weight_of_unavailable' option is returned instead
+# * >-1.0,<0.0: the effect is reduced and reversed
+# * -1.0: the effect is reversed
+# * <-1.0: the effect is increased proportionally and reversed
+#
+# This option is only used by the FilterScheduler and its subclasses;
+# if you use
+# a different scheduler, this option has no effect.
+#
+# Possible values:
+#
+# * An integer or float value, where the value corresponds to the
+# multipler
+# ratio for this weigher.
+#
+# Related options:
+#
+# * weight_of_unavailable
+# (floating point value)
+#weight_multiplier = 1.0
+
+#
+# This setting specifies the metrics to be weighed and the relative
+# ratios for
+# each metric. This should be a single string value, consisting of a
+# series of
+# one or more 'name=ratio' pairs, separated by commas, where 'name' is
+# the name
+# of the metric to be weighed, and 'ratio' is the relative weight for
+# that
+# metric.
+#
+# Note that if the ratio is set to 0, the metric value is ignored, and
+# instead
+# the weight will be set to the value of the 'weight_of_unavailable'
+# option.
+#
+# As an example, let's consider the case where this option is set to:
+#
+# ``name1=1.0, name2=-1.3``
+#
+# The final weight will be:
+#
+# ``(name1.value * 1.0) + (name2.value * -1.3)``
+#
+# This option is only used by the FilterScheduler and its subclasses;
+# if you use
+# a different scheduler, this option has no effect.
+#
+# Possible values:
+#
+# * A list of zero or more key/value pairs separated by commas, where
+# the key is
+# a string representing the name of a metric and the value is a
+# numeric weight
+# for that metric. If any value is set to 0, the value is ignored
+# and the
+# weight will be set to the value of the 'weight_of_unavailable'
+# option.
+#
+# Related options:
+#
+# * weight_of_unavailable
+# (list value)
+#weight_setting =
+
+#
+# This setting determines how any unavailable metrics are treated. If
+# this option
+# is set to True, any hosts for which a metric is unavailable will
+# raise an
+# exception, so it is recommended to also use the MetricFilter to
+# filter out
+# those hosts before weighing.
+#
+# This option is only used by the FilterScheduler and its subclasses;
+# if you use
+# a different scheduler, this option has no effect.
+#
+# Possible values:
+#
+# * True or False, where False ensures any metric being unavailable
+# for a host
+# will set the host weight to 'weight_of_unavailable'.
+#
+# Related options:
+#
+# * weight_of_unavailable
+# (boolean value)
+#required = true
+
+#
+# When any of the following conditions are met, this value will be
+# used in place
+# of any actual metric value:
+#
+# * One of the metrics named in 'weight_setting' is not available for
+# a host,
+# and the value of 'required' is False
+# * The ratio specified for a metric in 'weight_setting' is 0
+# * The 'weight_multiplier' option is set to 0
+#
+# This option is only used by the FilterScheduler and its subclasses;
+# if you use
+# a different scheduler, this option has no effect.
+#
+# Possible values:
+#
+# * An integer or float value, where the value corresponds to the
+# multipler
+# ratio for this weigher.
+#
+# Related options:
+#
+# * weight_setting
+# * required
+# * weight_multiplier
+# (floating point value)
+#weight_of_unavailable = -10000.0
+
+
+[mks]
+#
+# Nova compute node uses WebMKS, a desktop sharing protocol to provide
+# instance console access to VM's created by VMware hypervisors.
+#
+# Related options:
+# Following options must be set to provide console access.
+# * mksproxy_base_url
+# * enabled
+
+#
+# From nova.conf
+#
+
+#
+# Location of MKS web console proxy
+#
+# The URL in the response points to a WebMKS proxy which
+# starts proxying between client and corresponding vCenter
+# server where instance runs. In order to use the web based
+# console access, WebMKS proxy should be installed and configured
+#
+# Possible values:
+#
+# * Must be a valid URL of the form:``http://host:port/`` or
+# ``https://host:port/``
+# (uri value)
+#mksproxy_base_url = http://127.0.0.1:6090/
+
+#
+# Enables graphical console access for virtual machines.
+# (boolean value)
+#enabled = false
+
+
+[neutron]
+#
+# Configuration options for neutron (network connectivity as a
+# service).
+
+#
+# From nova.conf
+#
+
+# DEPRECATED:
+# This option specifies the URL for connecting to Neutron.
+#
+# Possible values:
+#
+# * Any valid URL that points to the Neutron API service is
+# appropriate here.
+# This typically matches the URL returned for the 'network' service
+# type
+# from the Keystone service catalog.
+# (uri value)
+# This option is deprecated for removal since 17.0.0.
+# Its value may be silently ignored in the future.
+# Reason: Endpoint lookup uses the service catalog via common
+# keystoneauth1 Adapter configuration options. In the current release,
+# "url" will override this behavior, but will be ignored and/or
+# removed in a future release. To achieve the same result, use the
+# endpoint_override option instead.
+#url = http://127.0.0.1:9696
+
+#
+# Default name for the Open vSwitch integration bridge.
+#
+# Specifies the name of an integration bridge interface used by
+# OpenvSwitch.
+# This option is only used if Neutron does not specify the OVS bridge
+# name in
+# port binding responses.
+# (string value)
+#ovs_bridge = br-int
+
+#
+# Default name for the floating IP pool.
+#
+# Specifies the name of floating IP pool used for allocating floating
+# IPs. This
+# option is only used if Neutron does not specify the floating IP pool
+# name in
+# port binding reponses.
+# (string value)
+#default_floating_pool = nova
+
+#
+# Integer value representing the number of seconds to wait before
+# querying
+# Neutron for extensions. After this number of seconds the next time
+# Nova
+# needs to create a resource in Neutron it will requery Neutron for
+# the
+# extensions that it has loaded. Setting value to 0 will refresh the
+# extensions with no wait.
+# (integer value)
+# Minimum value: 0
+#extension_sync_interval = 600
+
+#
+# When set to True, this option indicates that Neutron will be used to
+# proxy
+# metadata requests and resolve instance ids. Otherwise, the instance
+# ID must be
+# passed to the metadata request in the 'X-Instance-ID' header.
+#
+# Related options:
+#
+# * metadata_proxy_shared_secret
+# (boolean value)
+#service_metadata_proxy = false
+
+#
+# This option holds the shared secret string used to validate proxy
+# requests to
+# Neutron metadata requests. In order to be used, the
+# 'X-Metadata-Provider-Signature' header must be supplied in the
+# request.
+#
+# Related options:
+#
+# * service_metadata_proxy
+# (string value)
+#metadata_proxy_shared_secret =
+
+# PEM encoded Certificate Authority to use when verifying HTTPs
+# connections. (string value)
+#cafile = <None>
+{%- if controller.network.get('protocol', 'http') == 'https' %}
+cafile={{ controller.network.get('cacert_file', controller.cacert_file) }}
+{%- endif %}
+
+# PEM encoded client certificate cert file (string value)
+#certfile = <None>
+
+# PEM encoded client certificate key file (string value)
+#keyfile = <None>
+
+# Verify HTTPS connections. (boolean value)
+#insecure = false
+
+# Timeout value for http requests (integer value)
+#timeout = <None>
+timeout=300
+
+# Authentication type to load (string value)
+# Deprecated group/name - [neutron]/auth_plugin
+#auth_type = <None>
+auth_type = v3password
+
+# Config Section from which to load plugin specific options (string
+# value)
+#auth_section = <None>
+
+# Authentication URL (string value)
+#auth_url = <None>
+auth_url = {{ controller.identity.get('protocol', 'http') }}://{{ controller.identity.host }}:{{ controller.identity.port }}/v3
+
+# Scope for system operations (string value)
+#system_scope = <None>
+
+# Domain ID to scope to (string value)
+#domain_id = <None>
+
+# Domain name to scope to (string value)
+#domain_name = <None>
+
+# Project ID to scope to (string value)
+#project_id = <None>
+
+# Project name to scope to (string value)
+#project_name = <None>
+
+# Domain ID containing project (string value)
+#project_domain_id = <None>
+
+# Domain name containing project (string value)
+#project_domain_name = <None>
+project_domain_name = {{ controller.get('project_domain_name', 'Default') }}
+
+# Trust ID (string value)
+#trust_id = <None>
+
+# Optional domain ID to use with v3 and v2 parameters. It will be used
+# for both the user and project domain in v3 and ignored in v2
+# authentication. (string value)
+#default_domain_id = <None>
+
+# Optional domain name to use with v3 API and v2 parameters. It will
+# be used for both the user and project domain in v3 and ignored in v2
+# authentication. (string value)
+#default_domain_name = <None>
+
+# User ID (string value)
+#user_id = <None>
+
+# Username (string value)
+# Deprecated group/name - [neutron]/user_name
+#username = <None>
+
+# User's domain id (string value)
+#user_domain_id = <None>
+
+# User's domain name (string value)
+#user_domain_name = <None>
+user_domain_name = {{ controller.get('user_domain_name', 'Default') }}
+
+# User's password (string value)
+#password = <None>
+
+# Tenant ID (string value)
+#tenant_id = <None>
+
+# Tenant Name (string value)
+#tenant_name = <None>
+
+# The default service_type for endpoint URL discovery. (string value)
+#service_type = network
+
+# The default service_name for endpoint URL discovery. (string value)
+#service_name = <None>
+
+# List of interfaces, in order of preference, for endpoint URL. (list
+# value)
+#valid_interfaces = internal,public
+
+# The default region_name for endpoint URL discovery. (string value)
+#region_name = <None>
+
+# Always use this endpoint URL for requests for this client. NOTE: The
+# unversioned endpoint should be specified here; to request a
+# particular API version, use the `version`, `min-version`, and/or
+# `max-version` options. (string value)
+#endpoint_override = <None>
+{% if pillar.neutron is defined and pillar.neutron.server is defined %}
+password = {{ pillar.neutron.server.identity.password }}
+project_name = {{ pillar.neutron.server.identity.tenant }}
+username = {{ pillar.neutron.server.identity.user }}
+region_name = {{ pillar.neutron.server.identity.region }}
+{%- else %}
+password = {{ controller.network.password }}
+project_name = {{ controller.network.tenant }}
+username = {{ controller.network.user }}
+region_name = {{ controller.network.region }}
+{%- endif %}
+
+
+[notifications]
+#
+# Most of the actions in Nova which manipulate the system state
+# generate
+# notifications which are posted to the messaging component (e.g.
+# RabbitMQ) and
+# can be consumed by any service outside the OpenStack. More technical
+# details
+# at
+# https://docs.openstack.org/nova/latest/reference/notifications.html
+
+#
+# From nova.conf
+#
+
+#
+# If set, send compute.instance.update notifications on
+# instance state changes.
+#
+# Please refer to
+# https://docs.openstack.org/nova/latest/reference/notifications.html
+# for
+# additional information on notifications.
+#
+# Possible values:
+#
+# * None - no notifications
+# * "vm_state" - notifications are sent with VM state transition
+# information in
+# the ``old_state`` and ``state`` fields. The ``old_task_state`` and
+# ``new_task_state`` fields will be set to the current task_state of
+# the
+# instance.
+# * "vm_and_task_state" - notifications are sent with VM and task
+# state
+# transition information.
+# (string value)
+# Possible values:
+# <None> - <No description provided>
+# vm_state - <No description provided>
+# vm_and_task_state - <No description provided>
+#notify_on_state_change = <None>
+{%- if controller.get('notification', {}).notify_on is defined %}
+{%- for key, value in controller.notification.notify_on.iteritems() %}
+notify_on_{{ key }} = {{ value }}
+{%- endfor %}
+{%- endif %}
+
+#
+# If enabled, send api.fault notifications on caught exceptions in the
+# API service.
+# (boolean value)
+# Deprecated group/name - [DEFAULT]/notify_api_faults
+#notify_on_api_faults=false
+notify_on_api_faults=false
+
+# Default notification level for outgoing notifications. (string
+# value)
+# Possible values:
+# DEBUG - <No description provided>
+# INFO - <No description provided>
+# WARN - <No description provided>
+# ERROR - <No description provided>
+# CRITICAL - <No description provided>
+# Deprecated group/name - [DEFAULT]/default_notification_level
+#default_level = INFO
+
+# DEPRECATED:
+# Default publisher_id for outgoing notifications. If you consider
+# routing
+# notifications using different publisher, change this value
+# accordingly.
+#
+# Possible values:
+#
+# * Defaults to the current hostname of this host, but it can be any
+# valid
+# oslo.messaging publisher_id
+#
+# Related options:
+#
+# * host - Hostname, FQDN or IP address of this host.
+# (string value)
+# This option is deprecated for removal since 17.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# This option is only used when ``monkey_patch=True`` and
+# ``monkey_patch_modules`` is configured to specify the legacy
+# notify_decorator.
+# Since the monkey_patch and monkey_patch_modules options are
+# deprecated, this
+# option is also deprecated.
+#default_publisher_id = $host
+
+#
+# Specifies which notification format shall be used by nova.
+#
+# The default value is fine for most deployments and rarely needs to
+# be changed.
+# This value can be set to 'versioned' once the infrastructure moves
+# closer to
+# consuming the newer format of notifications. After this occurs, this
+# option
+# will be removed.
+#
+# Note that notifications can be completely disabled by setting
+# ``driver=noop``
+# in the ``[oslo_messaging_notifications]`` group.
+#
+# Possible values:
+# * unversioned: Only the legacy unversioned notifications are
+# emitted.
+# * versioned: Only the new versioned notifications are emitted.
+# * both: Both the legacy unversioned and the new versioned
+# notifications are
+# emitted. (Default)
+#
+# The list of versioned notifications is visible in
+# https://docs.openstack.org/nova/latest/reference/notifications.html
+# (string value)
+# Possible values:
+# unversioned - <No description provided>
+# versioned - <No description provided>
+# both - <No description provided>
+#notification_format = both
+
+#
+# Specifies the topics for the versioned notifications issued by nova.
+#
+# The default value is fine for most deployments and rarely needs to
+# be changed.
+# However, if you have a third-party service that consumes versioned
+# notifications, it might be worth getting a topic for that service.
+# Nova will send a message containing a versioned notification payload
+# to each
+# topic queue in this list.
+#
+# The list of versioned notifications is visible in
+# https://docs.openstack.org/nova/latest/reference/notifications.html
+# (list value)
+#versioned_notifications_topics = versioned_notifications
+
+#
+# If enabled, include block device information in the versioned
+# notification
+# payload. Sending block device information is disabled by default as
+# providing
+# that information can incur some overhead on the system since the
+# information
+# may need to be loaded from the database.
+# (boolean value)
+#bdms_in_notifications = false
+
+
+[osapi_v21]
+
+#
+# From nova.conf
+#
+
+# DEPRECATED:
+# This option is a string representing a regular expression (regex)
+# that matches
+# the project_id as contained in URLs. If not set, it will match
+# normal UUIDs
+# created by keystone.
+#
+# Possible values:
+#
+# * A string representing any legal regular expression
+# (string value)
+# This option is deprecated for removal since 13.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# Recent versions of nova constrain project IDs to hexadecimal
+# characters and
+# dashes. If your installation uses IDs outside of this range, you
+# should use
+# this option to provide your own regex and give you time to migrate
+# offending
+# projects to valid IDs before the next release.
+#project_id_regex = <None>
+
+
+[pci]
+
+#
+# From nova.conf
+#
+
+#
+# An alias for a PCI passthrough device requirement.
+#
+# This allows users to specify the alias in the extra specs for a
+# flavor, without
+# needing to repeat all the PCI property requirements.
+#
+# Possible Values:
+#
+# * A list of JSON values which describe the aliases. For example::
+#
+# alias = {
+# "name": "QuickAssist",
+# "product_id": "0443",
+# "vendor_id": "8086",
+# "device_type": "type-PCI",
+# "numa_policy": "required"
+# }
+#
+# This defines an alias for the Intel QuickAssist card. (multi
+# valued). Valid
+# key values are :
+#
+# ``name``
+# Name of the PCI alias.
+#
+# ``product_id``
+# Product ID of the device in hexadecimal.
+#
+# ``vendor_id``
+# Vendor ID of the device in hexadecimal.
+#
+# ``device_type``
+# Type of PCI device. Valid values are: ``type-PCI``, ``type-PF``
+# and
+# ``type-VF``.
+#
+# ``numa_policy``
+# Required NUMA affinity of device. Valid values are: ``legacy``,
+# ``preferred`` and ``required``.
+# (multi valued)
+# Deprecated group/name - [DEFAULT]/pci_alias
+#alias =
+
+#
+# White list of PCI devices available to VMs.
+#
+# Possible values:
+#
+# * A JSON dictionary which describe a whitelisted PCI device. It
+# should take
+# the following format:
+#
+# ["vendor_id": "<id>",] ["product_id": "<id>",]
+# ["address": "[[[[<domain>]:]<bus>]:][<slot>][.[<function>]]" |
+# "devname": "<name>",]
+# {"<tag>": "<tag_value>",}
+#
+# Where '[' indicates zero or one occurrences, '{' indicates zero or
+# multiple
+# occurrences, and '|' mutually exclusive options. Note that any
+# missing
+# fields are automatically wildcarded.
+#
+# Valid key values are :
+#
+# * "vendor_id": Vendor ID of the device in hexadecimal.
+# * "product_id": Product ID of the device in hexadecimal.
+# * "address": PCI address of the device.
+# * "devname": Device name of the device (for e.g. interface name).
+# Not all
+# PCI devices have a name.
+# * "<tag>": Additional <tag> and <tag_value> used for matching PCI
+# devices.
+# Supported <tag>: "physical_network".
+#
+# The address key supports traditional glob style and regular
+# expression
+# syntax. Valid examples are:
+#
+# passthrough_whitelist = {"devname":"eth0",
+# "physical_network":"physnet"}
+# passthrough_whitelist = {"address":"*:0a:00.*"}
+# passthrough_whitelist = {"address":":0a:00.",
+# "physical_network":"physnet1"}
+# passthrough_whitelist = {"vendor_id":"1137",
+# "product_id":"0071"}
+# passthrough_whitelist = {"vendor_id":"1137",
+# "product_id":"0071",
+# "address": "0000:0a:00.1",
+# "physical_network":"physnet1"}
+# passthrough_whitelist = {"address":{"domain": ".*",
+# "bus": "02", "slot": "01",
+# "function": "[2-7]"},
+# "physical_network":"physnet1"}
+# passthrough_whitelist = {"address":{"domain": ".*",
+# "bus": "02", "slot":
+# "0[1-2]",
+# "function": ".*"},
+# "physical_network":"physnet1"}
+#
+# The following are invalid, as they specify mutually exclusive
+# options:
+#
+# passthrough_whitelist = {"devname":"eth0",
+# "physical_network":"physnet",
+# "address":"*:0a:00.*"}
+#
+# * A JSON list of JSON dictionaries corresponding to the above
+# format. For
+# example:
+#
+# passthrough_whitelist = [{"product_id":"0001",
+# "vendor_id":"8086"},
+# {"product_id":"0002",
+# "vendor_id":"8086"}]
+# (multi valued)
+# Deprecated group/name - [DEFAULT]/pci_passthrough_whitelist
+#passthrough_whitelist =
+{%- if controller.get('sriov', false) %}
+{%- for nic_name, sriov in controller.sriov.iteritems() %}
+passthrough_whitelist = {"devname":"{{ sriov.devname }}","physical_network":"{{ sriov.physical_network }}"}
+{%- endfor %}
+{%- endif %}
+
+[placement]
+
+#
+# From nova.conf
+#
+
+# DEPRECATED:
+# Region name of this node. This is used when picking the URL in the
+# service
+# catalog.
+#
+# Possible values:
+#
+# * Any string representing region name
+# (string value)
+# This option is deprecated for removal since 17.0.0.
+# Its value may be silently ignored in the future.
+# Reason: Endpoint lookup uses the service catalog via common
+# keystoneauth1 Adapter configuration options. Use the region_name
+# option instead.
+os_region_name = {{ controller.identity.region }}
+
+# DEPRECATED:
+# Endpoint interface for this node. This is used when picking the URL
+# in the
+# service catalog.
+# (string value)
+# This option is deprecated for removal since 17.0.0.
+# Its value may be silently ignored in the future.
+# Reason: Endpoint lookup uses the service catalog via common
+# keystoneauth1 Adapter configuration options. Use the
+# valid_interfaces option instead.
+#os_interface = <None>
+
+#
+# If True, when limiting allocation candidate results, the results
+# will be
+# a random sampling of the full result set. If False, allocation
+# candidates
+# are returned in a deterministic but undefined order. That is, all
+# things
+# being equal, two requests for allocation candidates will return the
+# same
+# results in the same order; but no guarantees are made as to how that
+# order
+# is determined.
+# (boolean value)
+#randomize_allocation_candidates = false
+
+# PEM encoded Certificate Authority to use when verifying HTTPs
+# connections. (string value)
+#cafile = <None>
+{%- if controller.identity.get('protocol', 'http') == 'https' %}
+cafile={{ controller.identity.get('cacert_file', controller.cacert_file) }}
+{%- endif %}
+
+# PEM encoded client certificate cert file (string value)
+#certfile = <None>
+
+# PEM encoded client certificate key file (string value)
+#keyfile = <None>
+
+# Verify HTTPS connections. (boolean value)
+#insecure = false
+
+# Timeout value for http requests (integer value)
+#timeout = <None>
+
+# Authentication type to load (string value)
+# Deprecated group/name - [placement]/auth_plugin
+auth_type = password
+
+# Config Section from which to load plugin specific options (string
+# value)
+#auth_section = <None>
+
+# Authentication URL (string value)
+#auth_url = <None>
+auth_url={{ controller.identity.get('protocol', 'http') }}://{{ controller.identity.host }}:35357/v3
+
+# Scope for system operations (string value)
+#system_scope = <None>
+
+# Domain ID to scope to (string value)
+#domain_id = <None>
+
+# Domain name to scope to (string value)
+#domain_name = <None>
+
+# Project ID to scope to (string value)
+#project_id = <None>
+
+# Project name to scope to (string value)
+project_name = {{ controller.identity.tenant }}
+
+# Domain ID containing project (string value)
+project_domain_id = {{ controller.identity.get('domain', 'default') }}
+
+# Domain name containing project (string value)
+#project_domain_name = <None>
+
+# Trust ID (string value)
+#trust_id = <None>
+
+# Optional domain ID to use with v3 and v2 parameters. It will be used
+# for both the user and project domain in v3 and ignored in v2
+# authentication. (string value)
+#default_domain_id = <None>
+
+# Optional domain name to use with v3 API and v2 parameters. It will
+# be used for both the user and project domain in v3 and ignored in v2
+# authentication. (string value)
+#default_domain_name = <None>
+
+# User ID (string value)
+#user_id = <None>
+
+# Username (string value)
+# Deprecated group/name - [placement]/user_name
+username = {{ controller.identity.user }}
+
+# User's domain id (string value)
+user_domain_id = {{ controller.identity.get('domain', 'default') }}
+
+# User's domain name (string value)
+#user_domain_name = <None>
+
+# User's password (string value)
+password = {{ controller.identity.password }}
+
+# Tenant ID (string value)
+#tenant_id = <None>
+
+# Tenant Name (string value)
+#tenant_name = <None>
+
+# The default service_type for endpoint URL discovery. (string value)
+#service_type = placement
+
+# The default service_name for endpoint URL discovery. (string value)
+#service_name = <None>
+
+# List of interfaces, in order of preference, for endpoint URL. (list
+# value)
+# Deprecated group/name - [placement]/os_interface
+valid_interfaces = internal
+
+# The default region_name for endpoint URL discovery. (string value)
+# Deprecated group/name - [placement]/os_region_name
+#region_name = <None>
+
+# Always use this endpoint URL for requests for this client. NOTE: The
+# unversioned endpoint should be specified here; to request a
+# particular API version, use the `version`, `min-version`, and/or
+# `max-version` options. (string value)
+#endpoint_override = <None>
+
+
+[quota]
+#
+# Quota options allow to manage quotas in openstack deployment.
+
+#
+# From nova.conf
+#
+
+#
+# The number of instances allowed per project.
+#
+# Possible Values
+#
+# * A positive integer or 0.
+# * -1 to disable the quota.
+# (integer value)
+# Minimum value: -1
+# Deprecated group/name - [DEFAULT]/quota_instances
+#instances = 10
+
+#
+# The number of instance cores or vCPUs allowed per project.
+#
+# Possible values:
+#
+# * A positive integer or 0.
+# * -1 to disable the quota.
+# (integer value)
+# Minimum value: -1
+# Deprecated group/name - [DEFAULT]/quota_cores
+#cores = 20
+
+#
+# The number of megabytes of instance RAM allowed per project.
+#
+# Possible values:
+#
+# * A positive integer or 0.
+# * -1 to disable the quota.
+# (integer value)
+# Minimum value: -1
+# Deprecated group/name - [DEFAULT]/quota_ram
+#ram = 51200
+
+# DEPRECATED:
+# The number of floating IPs allowed per project.
+#
+# Floating IPs are not allocated to instances by default. Users need
+# to select
+# them from the pool configured by the OpenStack administrator to
+# attach to their
+# instances.
+#
+# Possible values:
+#
+# * A positive integer or 0.
+# * -1 to disable the quota.
+# (integer value)
+# Minimum value: -1
+# Deprecated group/name - [DEFAULT]/quota_floating_ips
+# This option is deprecated for removal since 15.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# nova-network is deprecated, as are any related configuration
+# options.
+#floating_ips = 10
+
+# DEPRECATED:
+# The number of fixed IPs allowed per project.
+#
+# Unlike floating IPs, fixed IPs are allocated dynamically by the
+# network
+# component when instances boot up. This quota value should be at
+# least the
+# number of instances allowed
+#
+# Possible values:
+#
+# * A positive integer or 0.
+# * -1 to disable the quota.
+# (integer value)
+# Minimum value: -1
+# Deprecated group/name - [DEFAULT]/quota_fixed_ips
+# This option is deprecated for removal since 15.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# nova-network is deprecated, as are any related configuration
+# options.
+#fixed_ips = -1
+
+#
+# The number of metadata items allowed per instance.
+#
+# Users can associate metadata with an instance during instance
+# creation. This
+# metadata takes the form of key-value pairs.
+#
+# Possible values:
+#
+# * A positive integer or 0.
+# * -1 to disable the quota.
+# (integer value)
+# Minimum value: -1
+# Deprecated group/name - [DEFAULT]/quota_metadata_items
+#metadata_items = 128
+
+#
+# The number of injected files allowed.
+#
+# File injection allows users to customize the personality of an
+# instance by
+# injecting data into it upon boot. Only text file injection is
+# permitted: binary
+# or ZIP files are not accepted. During file injection, any existing
+# files that
+# match specified files are renamed to include ``.bak`` extension
+# appended with a
+# timestamp.
+#
+# Possible values:
+#
+# * A positive integer or 0.
+# * -1 to disable the quota.
+# (integer value)
+# Minimum value: -1
+# Deprecated group/name - [DEFAULT]/quota_injected_files
+#injected_files = 5
+
+#
+# The number of bytes allowed per injected file.
+#
+# Possible values:
+#
+# * A positive integer or 0.
+# * -1 to disable the quota.
+# (integer value)
+# Minimum value: -1
+# Deprecated group/name - [DEFAULT]/quota_injected_file_content_bytes
+#injected_file_content_bytes = 10240
+
+#
+# The maximum allowed injected file path length.
+#
+# Possible values:
+#
+# * A positive integer or 0.
+# * -1 to disable the quota.
+# (integer value)
+# Minimum value: -1
+# Deprecated group/name - [DEFAULT]/quota_injected_file_path_length
+#injected_file_path_length = 255
+
+# DEPRECATED:
+# The number of security groups per project.
+#
+# Possible values:
+#
+# * A positive integer or 0.
+# * -1 to disable the quota.
+# (integer value)
+# Minimum value: -1
+# Deprecated group/name - [DEFAULT]/quota_security_groups
+# This option is deprecated for removal since 15.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# nova-network is deprecated, as are any related configuration
+# options.
+#security_groups = 10
+
+# DEPRECATED:
+# The number of security rules per security group.
+#
+# The associated rules in each security group control the traffic to
+# instances in
+# the group.
+#
+# Possible values:
+#
+# * A positive integer or 0.
+# * -1 to disable the quota.
+# (integer value)
+# Minimum value: -1
+# Deprecated group/name - [DEFAULT]/quota_security_group_rules
+# This option is deprecated for removal since 15.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# nova-network is deprecated, as are any related configuration
+# options.
+#security_group_rules = 20
+
+#
+# The maximum number of key pairs allowed per user.
+#
+# Users can create at least one key pair for each project and use the
+# key pair
+# for multiple instances that belong to that project.
+#
+# Possible values:
+#
+# * A positive integer or 0.
+# * -1 to disable the quota.
+# (integer value)
+# Minimum value: -1
+# Deprecated group/name - [DEFAULT]/quota_key_pairs
+#key_pairs = 100
+
+#
+# The maxiumum number of server groups per project.
+#
+# Server groups are used to control the affinity and anti-affinity
+# scheduling
+# policy for a group of servers or instances. Reducing the quota will
+# not affect
+# any existing group, but new servers will not be allowed into groups
+# that have
+# become over quota.
+#
+# Possible values:
+#
+# * A positive integer or 0.
+# * -1 to disable the quota.
+# (integer value)
+# Minimum value: -1
+# Deprecated group/name - [DEFAULT]/quota_server_groups
+#server_groups = 10
+
+#
+# The maximum number of servers per server group.
+#
+# Possible values:
+#
+# * A positive integer or 0.
+# * -1 to disable the quota.
+# (integer value)
+# Minimum value: -1
+# Deprecated group/name - [DEFAULT]/quota_server_group_members
+#server_group_members = 10
+
+#
+# The number of seconds until a reservation expires.
+#
+# This quota represents the time period for invalidating quota
+# reservations.
+# (integer value)
+#reservation_expire = 86400
+
+#
+# The count of reservations until usage is refreshed.
+#
+# This defaults to 0 (off) to avoid additional load but it is useful
+# to turn on
+# to help keep quota usage up-to-date and reduce the impact of out of
+# sync usage
+# issues.
+# (integer value)
+# Minimum value: 0
+#until_refresh = 0
+
+#
+# The number of seconds between subsequent usage refreshes.
+#
+# This defaults to 0 (off) to avoid additional load but it is useful
+# to turn on
+# to help keep quota usage up-to-date and reduce the impact of out of
+# sync usage
+# issues. Note that quotas are not updated on a periodic task, they
+# will update
+# on a new reservation if max_age has passed since the last
+# reservation.
+# (integer value)
+# Minimum value: 0
+#max_age = 0
+
+# DEPRECATED:
+# The quota enforcer driver.
+#
+# Provides abstraction for quota checks. Users can configure a
+# specific
+# driver to use for quota checks.
+#
+# Possible values:
+#
+# * nova.quota.DbQuotaDriver (default) or any string representing
+# fully
+# qualified class name.
+# (string value)
+# Deprecated group/name - [DEFAULT]/quota_driver
+# This option is deprecated for removal since 14.0.0.
+# Its value may be silently ignored in the future.
+#driver = nova.quota.DbQuotaDriver
+
+#
+# Recheck quota after resource creation to prevent allowing quota to
+# be exceeded.
+#
+# This defaults to True (recheck quota after resource creation) but
+# can be set to
+# False to avoid additional load if allowing quota to be exceeded
+# because of
+# racing requests is considered acceptable. For example, when set to
+# False, if a
+# user makes highly parallel REST API requests to create servers, it
+# will be
+# possible for them to create more servers than their allowed quota
+# during the
+# race. If their quota is 10 servers, they might be able to create 50
+# during the
+# burst. After the burst, they will not be able to create any more
+# servers but
+# they will be able to keep their 50 servers until they delete them.
+#
+# The initial quota check is done before resources are created, so if
+# multiple
+# parallel requests arrive at the same time, all could pass the quota
+# check and
+# create resources, potentially exceeding quota. When recheck_quota is
+# True,
+# quota will be checked a second time after resources have been
+# created and if
+# the resource is over quota, it will be deleted and OverQuota will be
+# raised,
+# usually resulting in a 403 response to the REST API user. This makes
+# it
+# impossible for a user to exceed their quota with the caveat that it
+# will,
+# however, be possible for a REST API user to be rejected with a 403
+# response in
+# the event of a collision close to reaching their quota limit, even
+# if the user
+# has enough quota available when they made the request.
+# (boolean value)
+#recheck_quota = true
+
+
+[rdp]
+#
+# Options under this group enable and configure Remote Desktop
+# Protocol (
+# RDP) related features.
+#
+# This group is only relevant to Hyper-V users.
+
+#
+# From nova.conf
+#
+
+#
+# Enable Remote Desktop Protocol (RDP) related features.
+#
+# Hyper-V, unlike the majority of the hypervisors employed on Nova
+# compute
+# nodes, uses RDP instead of VNC and SPICE as a desktop sharing
+# protocol to
+# provide instance console access. This option enables RDP for
+# graphical
+# console access for virtual machines created by Hyper-V.
+#
+# **Note:** RDP should only be enabled on compute nodes that support
+# the Hyper-V
+# virtualization platform.
+#
+# Related options:
+#
+# * ``compute_driver``: Must be hyperv.
+#
+# (boolean value)
+#enabled = false
+
+#
+# The URL an end user would use to connect to the RDP HTML5 console
+# proxy.
+# The console proxy service is called with this token-embedded URL and
+# establishes the connection to the proper instance.
+#
+# An RDP HTML5 console proxy service will need to be configured to
+# listen on the
+# address configured here. Typically the console proxy service would
+# be run on a
+# controller node. The localhost address used as default would only
+# work in a
+# single node environment i.e. devstack.
+#
+# An RDP HTML5 proxy allows a user to access via the web the text or
+# graphical
+# console of any Windows server or workstation using RDP. RDP HTML5
+# console
+# proxy services include FreeRDP, wsgate.
+# See https://github.com/FreeRDP/FreeRDP-WebConnect
+#
+# Possible values:
+#
+# * <scheme>://<ip-address>:<port-number>/
+#
+# The scheme must be identical to the scheme configured for the RDP
+# HTML5
+# console proxy service. It is ``http`` or ``https``.
+#
+# The IP address must be identical to the address on which the RDP
+# HTML5
+# console proxy service is listening.
+#
+# The port must be identical to the port on which the RDP HTML5
+# console proxy
+# service is listening.
+#
+# Related options:
+#
+# * ``rdp.enabled``: Must be set to ``True`` for
+# ``html5_proxy_base_url`` to be
+# effective.
+# (uri value)
+#html5_proxy_base_url = http://127.0.0.1:6083/
+
+
+[remote_debug]
+
+#
+# From nova.conf
+#
+
+#
+# Debug host (IP or name) to connect to. This command line parameter
+# is used when
+# you want to connect to a nova service via a debugger running on a
+# different
+# host.
+#
+# Note that using the remote debug option changes how Nova uses the
+# eventlet
+# library to support async IO. This could result in failures that do
+# not occur
+# under normal operation. Use at your own risk.
+#
+# Possible Values:
+#
+# * IP address of a remote host as a command line parameter
+# to a nova service. For Example:
+#
+# /usr/local/bin/nova-compute --config-file /etc/nova/nova.conf
+# --remote_debug-host <IP address where the debugger is running>
+# (unknown value)
+#host = <None>
+
+#
+# Debug port to connect to. This command line parameter allows you to
+# specify
+# the port you want to use to connect to a nova service via a debugger
+# running
+# on different host.
+#
+# Note that using the remote debug option changes how Nova uses the
+# eventlet
+# library to support async IO. This could result in failures that do
+# not occur
+# under normal operation. Use at your own risk.
+#
+# Possible Values:
+#
+# * Port number you want to use as a command line parameter
+# to a nova service. For Example:
+#
+# /usr/local/bin/nova-compute --config-file /etc/nova/nova.conf
+# --remote_debug-host <IP address where the debugger is running>
+# --remote_debug-port <port> it's listening on>.
+# (port value)
+# Minimum value: 0
+# Maximum value: 65535
+#port = <None>
+
+
+[scheduler]
+
+#
+# From nova.conf
+#
+
+#
+# The scheduler host manager to use.
+#
+# The host manager manages the in-memory picture of the hosts that the
+# scheduler
+# uses. The options values are chosen from the entry points under the
+# namespace
+# 'nova.scheduler.host_manager' in 'setup.cfg'.
+#
+# NOTE: The "ironic_host_manager" option is deprecated as of the
+# 17.0.0 Queens
+# release.
+# (string value)
+# Possible values:
+# host_manager - <No description provided>
+# ironic_host_manager - <No description provided>
+# Deprecated group/name - [DEFAULT]/scheduler_host_manager
+#host_manager = host_manager
+
+#
+# The class of the driver used by the scheduler. This should be chosen
+# from one
+# of the entrypoints under the namespace 'nova.scheduler.driver' of
+# file
+# 'setup.cfg'. If nothing is specified in this option, the
+# 'filter_scheduler' is
+# used.
+#
+# Other options are:
+#
+# * 'caching_scheduler' which aggressively caches the system state for
+# better
+# individual scheduler performance at the risk of more retries when
+# running
+# multiple schedulers. [DEPRECATED]
+# * 'chance_scheduler' which simply picks a host at random.
+# [DEPRECATED]
+# * 'fake_scheduler' which is used for testing.
+#
+# Possible values:
+#
+# * Any of the drivers included in Nova:
+# ** filter_scheduler
+# ** caching_scheduler
+# ** chance_scheduler
+# ** fake_scheduler
+# * You may also set this to the entry point name of a custom
+# scheduler driver,
+# but you will be responsible for creating and maintaining it in
+# your setup.cfg
+# file.
+# (string value)
+# Deprecated group/name - [DEFAULT]/scheduler_driver
+#driver = filter_scheduler
+
+#
+# Periodic task interval.
+#
+# This value controls how often (in seconds) to run periodic tasks in
+# the
+# scheduler. The specific tasks that are run for each period are
+# determined by
+# the particular scheduler being used.
+#
+# If this is larger than the nova-service 'service_down_time' setting,
+# Nova may
+# report the scheduler service as down. This is because the scheduler
+# driver is
+# responsible for sending a heartbeat and it will only do that as
+# often as this
+# option allows. As each scheduler can work a little differently than
+# the others,
+# be sure to test this with your selected scheduler.
+#
+# Possible values:
+#
+# * An integer, where the integer corresponds to periodic task
+# interval in
+# seconds. 0 uses the default interval (60 seconds). A negative
+# value disables
+# periodic tasks.
+#
+# Related options:
+#
+# * ``nova-service service_down_time``
+# (integer value)
+# Deprecated group/name - [DEFAULT]/scheduler_driver_task_period
+#periodic_task_interval = 60
+
+#
+# This is the maximum number of attempts that will be made for a given
+# instance
+# build/move operation. It limits the number of alternate hosts
+# returned by the
+# scheduler. When that list of hosts is exhausted, a
+# MaxRetriesExceeded
+# exception is raised and the instance is set to an error state.
+#
+# Possible values:
+#
+# * A positive integer, where the integer corresponds to the max
+# number of
+# attempts that can be made when building or moving an instance.
+# (integer value)
+# Minimum value: 1
+# Deprecated group/name - [DEFAULT]/scheduler_max_attempts
+#max_attempts = 3
+
+#
+# Periodic task interval.
+#
+# This value controls how often (in seconds) the scheduler should
+# attempt
+# to discover new hosts that have been added to cells. If negative
+# (the
+# default), no automatic discovery will occur.
+#
+# Deployments where compute nodes come and go frequently may want this
+# enabled, where others may prefer to manually discover hosts when one
+# is added to avoid any overhead from constantly checking. If enabled,
+# every time this runs, we will select any unmapped hosts out of each
+# cell database on every run.
+# (integer value)
+# Minimum value: -1
+#discover_hosts_in_cells_interval = -1
+
+#
+# This setting determines the maximum limit on results received from
+# the
+# placement service during a scheduling operation. It effectively
+# limits
+# the number of hosts that may be considered for scheduling requests
+# that
+# match a large number of candidates.
+#
+# A value of 1 (the minimum) will effectively defer scheduling to the
+# placement
+# service strictly on "will it fit" grounds. A higher value will put
+# an upper
+# cap on the number of results the scheduler will consider during the
+# filtering
+# and weighing process. Large deployments may need to set this lower
+# than the
+# total number of hosts available to limit memory consumption, network
+# traffic,
+# etc. of the scheduler.
+#
+# This option is only used by the FilterScheduler; if you use a
+# different
+# scheduler, this option has no effect.
+# (integer value)
+# Minimum value: 1
+#max_placement_results = 1000
+
+
+[serial_console]
+#
+# The serial console feature allows you to connect to a guest in case
+# a
+# graphical console like VNC, RDP or SPICE is not available. This is
+# only
+# currently supported for the libvirt, Ironic and hyper-v drivers.
+
+#
+# From nova.conf
+#
+
+#
+# Enable the serial console feature.
+#
+# In order to use this feature, the service ``nova-serialproxy`` needs
+# to run.
+# This service is typically executed on the controller node.
+# (boolean value)
+#enabled = false
+
+#
+# A range of TCP ports a guest can use for its backend.
+#
+# Each instance which gets created will use one port out of this
+# range. If the
+# range is not big enough to provide another port for an new instance,
+# this
+# instance won't get launched.
+#
+# Possible values:
+#
+# * Each string which passes the regex ``\d+:\d+`` For example
+# ``10000:20000``.
+# Be sure that the first port number is lower than the second port
+# number
+# and that both are in range from 0 to 65535.
+# (string value)
+#port_range = 10000:20000
+
+#
+# The URL an end user would use to connect to the ``nova-serialproxy``
+# service.
+#
+# The ``nova-serialproxy`` service is called with this token enriched
+# URL
+# and establishes the connection to the proper instance.
+#
+# Related options:
+#
+# * The IP address must be identical to the address to which the
+# ``nova-serialproxy`` service is listening (see option
+# ``serialproxy_host``
+# in this section).
+# * The port must be the same as in the option ``serialproxy_port`` of
+# this
+# section.
+# * If you choose to use a secured websocket connection, then start
+# this option
+# with ``wss://`` instead of the unsecured ``ws://``. The options
+# ``cert``
+# and ``key`` in the ``[DEFAULT]`` section have to be set for that.
+# (uri value)
+#base_url = ws://127.0.0.1:6083/
+
+#
+# The IP address to which proxy clients (like ``nova-serialproxy``)
+# should
+# connect to get the serial console of an instance.
+#
+# This is typically the IP address of the host of a ``nova-compute``
+# service.
+# (string value)
+#proxyclient_address = 127.0.0.1
+
+#
+# The IP address which is used by the ``nova-serialproxy`` service to
+# listen
+# for incoming requests.
+#
+# The ``nova-serialproxy`` service listens on this IP address for
+# incoming
+# connection requests to instances which expose serial console.
+#
+# Related options:
+#
+# * Ensure that this is the same IP address which is defined in the
+# option
+# ``base_url`` of this section or use ``0.0.0.0`` to listen on all
+# addresses.
+# (string value)
+#serialproxy_host = 0.0.0.0
+
+#
+# The port number which is used by the ``nova-serialproxy`` service to
+# listen
+# for incoming requests.
+#
+# The ``nova-serialproxy`` service listens on this port number for
+# incoming
+# connection requests to instances which expose serial console.
+#
+# Related options:
+#
+# * Ensure that this is the same port number which is defined in the
+# option
+# ``base_url`` of this section.
+# (port value)
+# Minimum value: 0
+# Maximum value: 65535
+#serialproxy_port = 6083
+
+
+[service_user]
+#
+# Configuration options for service to service authentication using a
+# service
+# token. These options allow sending a service token along with the
+# user's token
+# when contacting external REST APIs.
+
+#
+# From nova.conf
+#
+
+#
+# When True, if sending a user token to a REST API, also send a
+# service token.
+#
+# Nova often reuses the user token provided to the nova-api to talk to
+# other REST
+# APIs, such as Cinder, Glance and Neutron. It is possible that while
+# the user
+# token was valid when the request was made to Nova, the token may
+# expire before
+# it reaches the other service. To avoid any failures, and to make it
+# clear it is
+# Nova calling the service on the user's behalf, we include a service
+# token along
+# with the user token. Should the user's token have expired, a valid
+# service
+# token ensures the REST API request will still be accepted by the
+# keystone
+# middleware.
+# (boolean value)
+#send_service_user_token = false
+
+# PEM encoded Certificate Authority to use when verifying HTTPs
+# connections. (string value)
+#cafile = <None>
+
+# PEM encoded client certificate cert file (string value)
+#certfile = <None>
+
+# PEM encoded client certificate key file (string value)
+#keyfile = <None>
+
+# Verify HTTPS connections. (boolean value)
+#insecure = false
+
+# Timeout value for http requests (integer value)
+#timeout = <None>
+
+# Authentication type to load (string value)
+# Deprecated group/name - [service_user]/auth_plugin
+#auth_type = <None>
+
+# Config Section from which to load plugin specific options (string
+# value)
+#auth_section = <None>
+
+# Authentication URL (string value)
+#auth_url = <None>
+
+# Scope for system operations (string value)
+#system_scope = <None>
+
+# Domain ID to scope to (string value)
+#domain_id = <None>
+
+# Domain name to scope to (string value)
+#domain_name = <None>
+
+# Project ID to scope to (string value)
+#project_id = <None>
+
+# Project name to scope to (string value)
+#project_name = <None>
+
+# Domain ID containing project (string value)
+#project_domain_id = <None>
+
+# Domain name containing project (string value)
+#project_domain_name = <None>
+
+# Trust ID (string value)
+#trust_id = <None>
+
+# Optional domain ID to use with v3 and v2 parameters. It will be used
+# for both the user and project domain in v3 and ignored in v2
+# authentication. (string value)
+#default_domain_id = <None>
+
+# Optional domain name to use with v3 API and v2 parameters. It will
+# be used for both the user and project domain in v3 and ignored in v2
+# authentication. (string value)
+#default_domain_name = <None>
+
+# User ID (string value)
+#user_id = <None>
+
+# Username (string value)
+# Deprecated group/name - [service_user]/user_name
+#username = <None>
+
+# User's domain id (string value)
+#user_domain_id = <None>
+
+# User's domain name (string value)
+#user_domain_name = <None>
+
+# User's password (string value)
+#password = <None>
+
+# Tenant ID (string value)
+#tenant_id = <None>
+
+# Tenant Name (string value)
+#tenant_name = <None>
+
+
+[spice]
+#
+# SPICE console feature allows you to connect to a guest virtual
+# machine.
+# SPICE is a replacement for fairly limited VNC protocol.
+#
+# Following requirements must be met in order to use SPICE:
+#
+# * Virtualization driver must be libvirt
+# * spice.enabled set to True
+# * vnc.enabled set to False
+# * update html5proxy_base_url
+# * update server_proxyclient_address
+
+#
+# From nova.conf
+#
+
+#
+# Enable SPICE related features.
+#
+# Related options:
+#
+# * VNC must be explicitly disabled to get access to the SPICE
+# console. Set the
+# enabled option to False in the [vnc] section to disable the VNC
+# console.
+# (boolean value)
+#enabled = false
+enabled = false
+#
+# Enable the SPICE guest agent support on the instances.
+#
+# The Spice agent works with the Spice protocol to offer a better
+# guest console
+# experience. However, the Spice console can still be used without the
+# Spice
+# Agent. With the Spice agent installed the following features are
+# enabled:
+#
+# * Copy & Paste of text and images between the guest and client
+# machine
+# * Automatic adjustment of resolution when the client screen changes
+# - e.g.
+# if you make the Spice console full screen the guest resolution
+# will adjust to
+# match it rather than letterboxing.
+# * Better mouse integration - The mouse can be captured and released
+# without
+# needing to click inside the console or press keys to release it.
+# The
+# performance of mouse movement is also improved.
+# (boolean value)
+#agent_enabled = true
+
+#
+# Location of the SPICE HTML5 console proxy.
+#
+# End user would use this URL to connect to the `nova-
+# spicehtml5proxy``
+# service. This service will forward request to the console of an
+# instance.
+#
+# In order to use SPICE console, the service ``nova-spicehtml5proxy``
+# should be
+# running. This service is typically launched on the controller node.
+#
+# Possible values:
+#
+# * Must be a valid URL of the form:
+# ``http://host:port/spice_auto.html``
+# where host is the node running ``nova-spicehtml5proxy`` and the
+# port is
+# typically 6082. Consider not using default value as it is not well
+# defined
+# for any real deployment.
+#
+# Related options:
+#
+# * This option depends on ``html5proxy_host`` and ``html5proxy_port``
+# options.
+# The access URL returned by the compute node must have the host
+# and port where the ``nova-spicehtml5proxy`` service is listening.
+# (uri value)
+#html5proxy_base_url = http://127.0.0.1:6082/spice_auto.html
+{%- if controller.vncproxy_url is defined %}
+html5proxy_base_url = {{ controller.vncproxy_url }}/spice_auto.html
+{%- endif %}
+
+#
+# The address where the SPICE server running on the instances should
+# listen.
+#
+# Typically, the ``nova-spicehtml5proxy`` proxy client runs on the
+# controller
+# node and connects over the private network to this address on the
+# compute
+# node(s).
+#
+# Possible values:
+#
+# * IP address to listen on.
+# (string value)
+#server_listen = 127.0.0.1
+
+#
+# The address used by ``nova-spicehtml5proxy`` client to connect to
+# instance
+# console.
+#
+# Typically, the ``nova-spicehtml5proxy`` proxy client runs on the
+# controller node and connects over the private network to this
+# address on the
+# compute node(s).
+#
+# Possible values:
+#
+# * Any valid IP address on the compute node.
+#
+# Related options:
+#
+# * This option depends on the ``server_listen`` option.
+# The proxy client must be able to access the address specified in
+# ``server_listen`` using the value of this option.
+# (string value)
+#server_proxyclient_address = 127.0.0.1
+
+#
+# A keyboard layout which is supported by the underlying hypervisor on
+# this
+# node.
+#
+# Possible values:
+# * This is usually an 'IETF language tag' (default is 'en-us'). If
+# you
+# use QEMU as hypervisor, you should find the list of supported
+# keyboard
+# layouts at /usr/share/qemu/keymaps.
+# (string value)
+#keymap = en-us
+
+#
+# IP address or a hostname on which the ``nova-spicehtml5proxy``
+# service
+# listens for incoming requests.
+#
+# Related options:
+#
+# * This option depends on the ``html5proxy_base_url`` option.
+# The ``nova-spicehtml5proxy`` service must be listening on a host
+# that is
+# accessible from the HTML5 client.
+# (unknown value)
+#html5proxy_host = 0.0.0.0
+
+#
+# Port on which the ``nova-spicehtml5proxy`` service listens for
+# incoming
+# requests.
+#
+# Related options:
+#
+# * This option depends on the ``html5proxy_base_url`` option.
+# The ``nova-spicehtml5proxy`` service must be listening on a port
+# that is
+# accessible from the HTML5 client.
+# (port value)
+# Minimum value: 0
+# Maximum value: 65535
+#html5proxy_port = 6082
+
+
+[upgrade_levels]
+
+{%- if controller.upgrade_levels is defined %}
+{%- for key, value in controller.upgrade_levels.iteritems() %}
+{{ key }}={{ value }}
+{%- endfor %}
+{%- endif %}
+#
+# upgrade_levels options are used to set version cap for RPC
+# messages sent between different nova services.
+#
+# By default all services send messages using the latest version
+# they know about.
+#
+# The compute upgrade level is an important part of rolling upgrades
+# where old and new nova-compute services run side by side.
+#
+# The other options can largely be ignored, and are only kept to
+# help with a possible future backport issue.
+
+#
+# From nova.conf
+#
+
+#
+# Compute RPC API version cap.
+#
+# By default, we always send messages using the most recent version
+# the client knows about.
+#
+# Where you have old and new compute services running, you should set
+# this to the lowest deployed version. This is to guarantee that all
+# services never send messages that one of the compute nodes can't
+# understand. Note that we only support upgrading from release N to
+# release N+1.
+#
+# Set this option to "auto" if you want to let the compute RPC module
+# automatically determine what version to use based on the service
+# versions in the deployment.
+#
+# Possible values:
+#
+# * By default send the latest version the client knows about
+# * 'auto': Automatically determines what version to use based on
+# the service versions in the deployment.
+# * A string representing a version number in the format 'N.N';
+# for example, possible values might be '1.12' or '2.0'.
+# * An OpenStack release name, in lower case, such as 'mitaka' or
+# 'liberty'.
+# (string value)
+#compute = <None>
+
+# Cells RPC API version cap (string value)
+#cells = <None>
+
+# Intercell RPC API version cap (string value)
+#intercell = <None>
+
+# Cert RPC API version cap (string value)
+#cert = <None>
+
+# Scheduler RPC API version cap (string value)
+#scheduler = <None>
+
+# Conductor RPC API version cap (string value)
+#conductor = <None>
+
+# Console RPC API version cap (string value)
+#console = <None>
+
+# Consoleauth RPC API version cap (string value)
+#consoleauth = <None>
+
+# Network RPC API version cap (string value)
+#network = <None>
+
+# Base API RPC API version cap (string value)
+#baseapi = <None>
+
+
+[vault]
+
+#
+# From nova.conf
+#
+
+# root token for vault (string value)
+#root_token_id = <None>
+
+# Use this endpoint to connect to Vault, for example:
+# "http://127.0.0.1:8200" (string value)
+#vault_url = http://127.0.0.1:8200
+
+# Absolute path to ca cert file (string value)
+#ssl_ca_crt_file = <None>
+
+# SSL Enabled/Disabled (boolean value)
+#use_ssl = false
+
+
+[vendordata_dynamic_auth]
+#
+# Options within this group control the authentication of the
+# vendordata
+# subsystem of the metadata API server (and config drive) with
+# external systems.
+
+#
+# From nova.conf
+#
+
+# PEM encoded Certificate Authority to use when verifying HTTPs
+# connections. (string value)
+#cafile = <None>
+
+# PEM encoded client certificate cert file (string value)
+#certfile = <None>
+
+# PEM encoded client certificate key file (string value)
+#keyfile = <None>
+
+# Verify HTTPS connections. (boolean value)
+#insecure = false
+
+# Timeout value for http requests (integer value)
+#timeout = <None>
+
+# Authentication type to load (string value)
+# Deprecated group/name - [vendordata_dynamic_auth]/auth_plugin
+#auth_type = <None>
+
+# Config Section from which to load plugin specific options (string
+# value)
+#auth_section = <None>
+
+# Authentication URL (string value)
+#auth_url = <None>
+
+# Scope for system operations (string value)
+#system_scope = <None>
+
+# Domain ID to scope to (string value)
+#domain_id = <None>
+
+# Domain name to scope to (string value)
+#domain_name = <None>
+
+# Project ID to scope to (string value)
+#project_id = <None>
+
+# Project name to scope to (string value)
+#project_name = <None>
+
+# Domain ID containing project (string value)
+#project_domain_id = <None>
+
+# Domain name containing project (string value)
+#project_domain_name = <None>
+
+# Trust ID (string value)
+#trust_id = <None>
+
+# Optional domain ID to use with v3 and v2 parameters. It will be used
+# for both the user and project domain in v3 and ignored in v2
+# authentication. (string value)
+#default_domain_id = <None>
+
+# Optional domain name to use with v3 API and v2 parameters. It will
+# be used for both the user and project domain in v3 and ignored in v2
+# authentication. (string value)
+#default_domain_name = <None>
+
+# User ID (string value)
+#user_id = <None>
+
+# Username (string value)
+# Deprecated group/name - [vendordata_dynamic_auth]/user_name
+#username = <None>
+
+# User's domain id (string value)
+#user_domain_id = <None>
+
+# User's domain name (string value)
+#user_domain_name = <None>
+
+# User's password (string value)
+#password = <None>
+
+# Tenant ID (string value)
+#tenant_id = <None>
+
+# Tenant Name (string value)
+#tenant_name = <None>
+
+
+[vmware]
+#
+# Related options:
+# Following options must be set in order to launch VMware-based
+# virtual machines.
+#
+# * compute_driver: Must use vmwareapi.VMwareVCDriver.
+# * vmware.host_username
+# * vmware.host_password
+# * vmware.cluster_name
+
+#
+# From nova.conf
+#
+
+#
+# This option specifies the physical ethernet adapter name for VLAN
+# networking.
+#
+# Set the vlan_interface configuration option to match the ESX host
+# interface that handles VLAN-tagged VM traffic.
+#
+# Possible values:
+#
+# * Any valid string representing VLAN interface name
+# (string value)
+#vlan_interface = vmnic0
+
+#
+# This option should be configured only when using the NSX-MH Neutron
+# plugin. This is the name of the integration bridge on the ESXi
+# server
+# or host. This should not be set for any other Neutron plugin. Hence
+# the default value is not set.
+#
+# Possible values:
+#
+# * Any valid string representing the name of the integration bridge
+# (string value)
+#integration_bridge = <None>
+
+#
+# Set this value if affected by an increased network latency causing
+# repeated characters when typing in a remote console.
+# (integer value)
+# Minimum value: 0
+#console_delay_seconds = <None>
+
+#
+# Identifies the remote system where the serial port traffic will
+# be sent.
+#
+# This option adds a virtual serial port which sends console output to
+# a configurable service URI. At the service URI address there will be
+# virtual serial port concentrator that will collect console logs.
+# If this is not set, no serial ports will be added to the created
+# VMs.
+#
+# Possible values:
+#
+# * Any valid URI
+# (string value)
+#serial_port_service_uri = <None>
+
+#
+# Identifies a proxy service that provides network access to the
+# serial_port_service_uri.
+#
+# Possible values:
+#
+# * Any valid URI (The scheme is 'telnet' or 'telnets'.)
+#
+# Related options:
+# This option is ignored if serial_port_service_uri is not specified.
+# * serial_port_service_uri
+# (uri value)
+#serial_port_proxy_uri = <None>
+
+#
+# Specifies the directory where the Virtual Serial Port Concentrator
+# is
+# storing console log files. It should match the 'serial_log_dir'
+# config
+# value of VSPC.
+# (string value)
+#serial_log_dir = /opt/vmware/vspc
+
+#
+# Hostname or IP address for connection to VMware vCenter host.
+# (unknown value)
+#host_ip = <None>
+
+# Port for connection to VMware vCenter host. (port value)
+# Minimum value: 0
+# Maximum value: 65535
+#host_port = 443
+
+# Username for connection to VMware vCenter host. (string value)
+#host_username = <None>
+
+# Password for connection to VMware vCenter host. (string value)
+#host_password = <None>
+
+#
+# Specifies the CA bundle file to be used in verifying the vCenter
+# server certificate.
+# (string value)
+#ca_file = <None>
+
+#
+# If true, the vCenter server certificate is not verified. If false,
+# then the default CA truststore is used for verification.
+#
+# Related options:
+# * ca_file: This option is ignored if "ca_file" is set.
+# (boolean value)
+#insecure = false
+
+# Name of a VMware Cluster ComputeResource. (string value)
+#cluster_name = <None>
+
+#
+# Regular expression pattern to match the name of datastore.
+#
+# The datastore_regex setting specifies the datastores to use with
+# Compute. For example, datastore_regex="nas.*" selects all the data
+# stores that have a name starting with "nas".
+#
+# NOTE: If no regex is given, it just picks the datastore with the
+# most freespace.
+#
+# Possible values:
+#
+# * Any matching regular expression to a datastore must be given
+# (string value)
+#datastore_regex = <None>
+
+#
+# Time interval in seconds to poll remote tasks invoked on
+# VMware VC server.
+# (floating point value)
+#task_poll_interval = 0.5
+
+#
+# Number of times VMware vCenter server API must be retried on
+# connection
+# failures, e.g. socket error, etc.
+# (integer value)
+# Minimum value: 0
+#api_retry_count = 10
+
+#
+# This option specifies VNC starting port.
+#
+# Every VM created by ESX host has an option of enabling VNC client
+# for remote connection. Above option 'vnc_port' helps you to set
+# default starting port for the VNC client.
+#
+# Possible values:
+#
+# * Any valid port number within 5900 -(5900 + vnc_port_total)
+#
+# Related options:
+# Below options should be set to enable VNC client.
+# * vnc.enabled = True
+# * vnc_port_total
+# (port value)
+# Minimum value: 0
+# Maximum value: 65535
+#vnc_port = 5900
+
+#
+# Total number of VNC ports.
+# (integer value)
+# Minimum value: 0
+#vnc_port_total = 10000
+
+#
+# This option enables/disables the use of linked clone.
+#
+# The ESX hypervisor requires a copy of the VMDK file in order to boot
+# up a virtual machine. The compute driver must download the VMDK via
+# HTTP from the OpenStack Image service to a datastore that is visible
+# to the hypervisor and cache it. Subsequent virtual machines that
+# need
+# the VMDK use the cached version and don't have to copy the file
+# again
+# from the OpenStack Image service.
+#
+# If set to false, even with a cached VMDK, there is still a copy
+# operation from the cache location to the hypervisor file directory
+# in the shared datastore. If set to true, the above copy operation
+# is avoided as it creates copy of the virtual machine that shares
+# virtual disks with its parent VM.
+# (boolean value)
+#use_linked_clone = true
+
+#
+# This option sets the http connection pool size
+#
+# The connection pool size is the maximum number of connections from
+# nova to
+# vSphere. It should only be increased if there are warnings
+# indicating that
+# the connection pool is full, otherwise, the default should suffice.
+# (integer value)
+# Minimum value: 10
+#connection_pool_size = 10
+
+#
+# This option enables or disables storage policy based placement
+# of instances.
+#
+# Related options:
+#
+# * pbm_default_policy
+# (boolean value)
+#pbm_enabled = false
+
+#
+# This option specifies the PBM service WSDL file location URL.
+#
+# Setting this will disable storage policy based placement
+# of instances.
+#
+# Possible values:
+#
+# * Any valid file path
+# e.g file:///opt/SDK/spbm/wsdl/pbmService.wsdl
+# (string value)
+#pbm_wsdl_location = <None>
+
+#
+# This option specifies the default policy to be used.
+#
+# If pbm_enabled is set and there is no defined storage policy for the
+# specific request, then this policy will be used.
+#
+# Possible values:
+#
+# * Any valid storage policy such as VSAN default storage policy
+#
+# Related options:
+#
+# * pbm_enabled
+# (string value)
+#pbm_default_policy = <None>
+
+#
+# This option specifies the limit on the maximum number of objects to
+# return in a single result.
+#
+# A positive value will cause the operation to suspend the retrieval
+# when the count of objects reaches the specified limit. The server
+# may
+# still limit the count to something less than the configured value.
+# Any remaining objects may be retrieved with additional requests.
+# (integer value)
+# Minimum value: 0
+#maximum_objects = 100
+
+#
+# This option adds a prefix to the folder where cached images are
+# stored
+#
+# This is not the full path - just a folder prefix. This should only
+# be
+# used when a datastore cache is shared between compute nodes.
+#
+# Note: This should only be used when the compute nodes are running on
+# same
+# host or they have a shared file system.
+#
+# Possible values:
+#
+# * Any string representing the cache prefix to the folder
+# (string value)
+#cache_prefix = <None>
+
+
+[vnc]
+#
+# Virtual Network Computer (VNC) can be used to provide remote desktop
+# console access to instances for tenants and/or administrators.
+
+#
+# From nova.conf
+#
+
+#
+# Enable VNC related features.
+#
+# Guests will get created with graphical devices to support this.
+# Clients
+# (for example Horizon) can then establish a VNC connection to the
+# guest.
+# (boolean value)
+# Deprecated group/name - [DEFAULT]/vnc_enabled
+enabled = true
+
+novncproxy_host = {{ controller.bind.get('novncproxy_address', '0.0.0.0') }}
+novncproxy_base_url = {{ controller.vncproxy_url }}/vnc_auto.html
+novncproxy_port={{ controller.bind.get('vncproxy_port', '6080') }}
+{%- if pillar.nova.compute is defined %}
+vncserver_listen={{ controller.bind.private_address }}
+vncserver_proxyclient_address={{ controller.bind.private_address }}
+{%- else %}
+vncserver_listen={{ controller.bind.get('novncproxy_address', '0.0.0.0') }}
+{%- endif %}
+
+#
+# Keymap for VNC.
+#
+# The keyboard mapping (keymap) determines which keyboard layout a VNC
+# session should use by default.
+#
+# Possible values:
+#
+# * A keyboard layout which is supported by the underlying hypervisor
+# on
+# this node. This is usually an 'IETF language tag' (for example
+# 'en-us'). If you use QEMU as hypervisor, you should find the
+# list
+# of supported keyboard layouts at ``/usr/share/qemu/keymaps``.
+# (string value)
+# Deprecated group/name - [DEFAULT]/vnc_keymap
+keymap = {{ controller.get('vnc_keymap', 'en-us') }}
+
+#
+# The IP address or hostname on which an instance should listen to for
+# incoming VNC connection requests on this node.
+# (unknown value)
+# Deprecated group/name - [DEFAULT]/vncserver_listen
+# Deprecated group/name - [vnc]/vncserver_listen
+#server_listen = 127.0.0.1
+
+#
+# Private, internal IP address or hostname of VNC console proxy.
+#
+# The VNC proxy is an OpenStack component that enables compute service
+# users to access their instances through VNC clients.
+#
+# This option sets the private address to which proxy clients, such as
+# ``nova-xvpvncproxy``, should connect to.
+# (unknown value)
+# Deprecated group/name - [DEFAULT]/vncserver_proxyclient_address
+# Deprecated group/name - [vnc]/vncserver_proxyclient_address
+#server_proxyclient_address = 127.0.0.1
+
+#
+# Public address of noVNC VNC console proxy.
+#
+# The VNC proxy is an OpenStack component that enables compute service
+# users to access their instances through VNC clients. noVNC provides
+# VNC support through a websocket-based client.
+#
+# This option sets the public base URL to which client systems will
+# connect. noVNC clients can use this address to connect to the noVNC
+# instance and, by extension, the VNC sessions.
+#
+# Related options:
+#
+# * novncproxy_host
+# * novncproxy_port
+# (uri value)
+#novncproxy_base_url = http://127.0.0.1:6080/vnc_auto.html
+
+#
+# IP address or hostname that the XVP VNC console proxy should bind
+# to.
+#
+# The VNC proxy is an OpenStack component that enables compute service
+# users to access their instances through VNC clients. Xen provides
+# the Xenserver VNC Proxy, or XVP, as an alternative to the
+# websocket-based noVNC proxy used by Libvirt. In contrast to noVNC,
+# XVP clients are Java-based.
+#
+# This option sets the private address to which the XVP VNC console
+# proxy
+# service should bind to.
+#
+# Related options:
+#
+# * xvpvncproxy_port
+# * xvpvncproxy_base_url
+# (unknown value)
+#xvpvncproxy_host = 0.0.0.0
+
+#
+# Port that the XVP VNC console proxy should bind to.
+#
+# The VNC proxy is an OpenStack component that enables compute service
+# users to access their instances through VNC clients. Xen provides
+# the Xenserver VNC Proxy, or XVP, as an alternative to the
+# websocket-based noVNC proxy used by Libvirt. In contrast to noVNC,
+# XVP clients are Java-based.
+#
+# This option sets the private port to which the XVP VNC console proxy
+# service should bind to.
+#
+# Related options:
+#
+# * xvpvncproxy_host
+# * xvpvncproxy_base_url
+# (port value)
+# Minimum value: 0
+# Maximum value: 65535
+#xvpvncproxy_port = 6081
+
+#
+# Public URL address of XVP VNC console proxy.
+#
+# The VNC proxy is an OpenStack component that enables compute service
+# users to access their instances through VNC clients. Xen provides
+# the Xenserver VNC Proxy, or XVP, as an alternative to the
+# websocket-based noVNC proxy used by Libvirt. In contrast to noVNC,
+# XVP clients are Java-based.
+#
+# This option sets the public base URL to which client systems will
+# connect. XVP clients can use this address to connect to the XVP
+# instance and, by extension, the VNC sessions.
+#
+# Related options:
+#
+# * xvpvncproxy_host
+# * xvpvncproxy_port
+# (uri value)
+#xvpvncproxy_base_url = http://127.0.0.1:6081/console
+
+#
+# IP address that the noVNC console proxy should bind to.
+#
+# The VNC proxy is an OpenStack component that enables compute service
+# users to access their instances through VNC clients. noVNC provides
+# VNC support through a websocket-based client.
+#
+# This option sets the private address to which the noVNC console
+# proxy
+# service should bind to.
+#
+# Related options:
+#
+# * novncproxy_port
+# * novncproxy_base_url
+# (string value)
+#novncproxy_host = 0.0.0.0
+
+#
+# Port that the noVNC console proxy should bind to.
+#
+# The VNC proxy is an OpenStack component that enables compute service
+# users to access their instances through VNC clients. noVNC provides
+# VNC support through a websocket-based client.
+#
+# This option sets the private port to which the noVNC console proxy
+# service should bind to.
+#
+# Related options:
+#
+# * novncproxy_host
+# * novncproxy_base_url
+# (port value)
+# Minimum value: 0
+# Maximum value: 65535
+#novncproxy_port = 6080
+
+#
+# The authentication schemes to use with the compute node.
+#
+# Control what RFB authentication schemes are permitted for
+# connections between
+# the proxy and the compute host. If multiple schemes are enabled, the
+# first
+# matching scheme will be used, thus the strongest schemes should be
+# listed
+# first.
+#
+# Possible values:
+#
+# * ``none``: allow connection without authentication
+# * ``vencrypt``: use VeNCrypt authentication scheme
+#
+# Related options:
+#
+# * ``[vnc]vencrypt_client_key``, ``[vnc]vencrypt_client_cert``: must
+# also be set
+# (list value)
+#auth_schemes = none
+
+# The path to the client certificate PEM file (for x509)
+#
+# The fully qualified path to a PEM file containing the private key
+# which the VNC
+# proxy server presents to the compute node during VNC authentication.
+#
+# Related options:
+#
+# * ``vnc.auth_schemes``: must include ``vencrypt``
+# * ``vnc.vencrypt_client_cert``: must also be set
+# (string value)
+#vencrypt_client_key = <None>
+
+# The path to the client key file (for x509)
+#
+# The fully qualified path to a PEM file containing the x509
+# certificate which
+# the VNC proxy server presents to the compute node during VNC
+# authentication.
+#
+# Realted options:
+#
+# * ``vnc.auth_schemes``: must include ``vencrypt``
+# * ``vnc.vencrypt_client_key``: must also be set
+# (string value)
+#vencrypt_client_cert = <None>
+
+# The path to the CA certificate PEM file
+#
+# The fully qualified path to a PEM file containing one or more x509
+# certificates
+# for the certificate authorities used by the compute node VNC server.
+#
+# Related options:
+#
+# * ``vnc.auth_schemes``: must include ``vencrypt``
+# (string value)
+#vencrypt_ca_certs = <None>
+
+
+[workarounds]
+#
+# A collection of workarounds used to mitigate bugs or issues found in
+# system
+# tools (e.g. Libvirt or QEMU) or Nova itself under certain
+# conditions. These
+# should only be enabled in exceptional circumstances. All options are
+# linked
+# against bug IDs, where more information on the issue can be found.
+
+#
+# From nova.conf
+#
+
+#
+# Use sudo instead of rootwrap.
+#
+# Allow fallback to sudo for performance reasons.
+#
+# For more information, refer to the bug report:
+#
+# https://bugs.launchpad.net/nova/+bug/1415106
+#
+# Possible values:
+#
+# * True: Use sudo instead of rootwrap
+# * False: Use rootwrap as usual
+#
+# Interdependencies to other options:
+#
+# * Any options that affect 'rootwrap' will be ignored.
+# (boolean value)
+#disable_rootwrap = false
+
+#
+# Disable live snapshots when using the libvirt driver.
+#
+# Live snapshots allow the snapshot of the disk to happen without an
+# interruption to the guest, using coordination with a guest agent to
+# quiesce the filesystem.
+#
+# When using libvirt 1.2.2 live snapshots fail intermittently under
+# load
+# (likely related to concurrent libvirt/qemu operations). This config
+# option provides a mechanism to disable live snapshot, in favor of
+# cold
+# snapshot, while this is resolved. Cold snapshot causes an instance
+# outage while the guest is going through the snapshotting process.
+#
+# For more information, refer to the bug report:
+#
+# https://bugs.launchpad.net/nova/+bug/1334398
+#
+# Possible values:
+#
+# * True: Live snapshot is disabled when using libvirt
+# * False: Live snapshots are always used when snapshotting (as long
+# as
+# there is a new enough libvirt and the backend storage supports it)
+# (boolean value)
+#disable_libvirt_livesnapshot = false
+disable_libvirt_livesnapshot = {{ controller.get('workaround', {}).get('disable_libvirt_livesnapshot', True)|lower }}
+
+#
+# Enable handling of events emitted from compute drivers.
+#
+# Many compute drivers emit lifecycle events, which are events that
+# occur when,
+# for example, an instance is starting or stopping. If the instance is
+# going
+# through task state changes due to an API operation, like resize, the
+# events
+# are ignored.
+#
+# This is an advanced feature which allows the hypervisor to signal to
+# the
+# compute service that an unexpected state change has occurred in an
+# instance
+# and that the instance can be shutdown automatically. Unfortunately,
+# this can
+# race in some conditions, for example in reboot operations or when
+# the compute
+# service or when host is rebooted (planned or due to an outage). If
+# such races
+# are common, then it is advisable to disable this feature.
+#
+# Care should be taken when this feature is disabled and
+# 'sync_power_state_interval' is set to a negative value. In this
+# case, any
+# instances that get out of sync between the hypervisor and the Nova
+# database
+# will have to be synchronized manually.
+#
+# For more information, refer to the bug report:
+#
+# https://bugs.launchpad.net/bugs/1444630
+#
+# Interdependencies to other options:
+#
+# * If ``sync_power_state_interval`` is negative and this feature is
+# disabled,
+# then instances that get out of sync between the hypervisor and the
+# Nova
+# database will have to be synchronized manually.
+# (boolean value)
+#handle_virt_lifecycle_events = true
+
+#
+# Disable the server group policy check upcall in compute.
+#
+# In order to detect races with server group affinity policy, the
+# compute
+# service attempts to validate that the policy was not violated by the
+# scheduler. It does this by making an upcall to the API database to
+# list
+# the instances in the server group for one that it is booting, which
+# violates
+# our api/cell isolation goals. Eventually this will be solved by
+# proper affinity
+# guarantees in the scheduler and placement service, but until then,
+# this late
+# check is needed to ensure proper affinity policy.
+#
+# Operators that desire api/cell isolation over this check should
+# enable this flag, which will avoid making that upcall from compute.
+#
+# Related options:
+#
+# * [filter_scheduler]/track_instance_changes also relies on upcalls
+# from the
+# compute service to the scheduler service.
+# (boolean value)
+#disable_group_policy_check_upcall = false
+
+
+[wsgi]
+#
+# Options under this group are used to configure WSGI (Web Server
+# Gateway
+# Interface). WSGI is used to serve API requests.
+
+#
+# From nova.conf
+#
+
+#
+# This option represents a file name for the paste.deploy config for
+# nova-api.
+#
+# Possible values:
+#
+# * A string representing file name for the paste.deploy config.
+# (string value)
+api_paste_config = /etc/nova/api-paste.ini
+
+# DEPRECATED:
+# It represents a python format string that is used as the template to
+# generate
+# log lines. The following values can be formatted into it: client_ip,
+# date_time, request_line, status_code, body_length, wall_seconds.
+#
+# This option is used for building custom request loglines when
+# running
+# nova-api under eventlet. If used under uwsgi or apache, this option
+# has no effect.
+#
+# Possible values:
+#
+# * '%(client_ip)s "%(request_line)s" status: %(status_code)s'
+# 'len: %(body_length)s time: %(wall_seconds).7f' (default)
+# * Any formatted string formed by specific values.
+# (string value)
+# This option is deprecated for removal since 16.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# This option only works when running nova-api under eventlet, and
+# encodes very eventlet specific pieces of information. Starting in
+# Pike
+# the preferred model for running nova-api is under uwsgi or apache
+# mod_wsgi.
+#wsgi_log_format = %(client_ip)s "%(request_line)s" status: %(status_code)s len: %(body_length)s time: %(wall_seconds).7f
+
+#
+# This option specifies the HTTP header used to determine the protocol
+# scheme
+# for the original request, even if it was removed by a SSL
+# terminating proxy.
+#
+# Possible values:
+#
+# * None (default) - the request scheme is not influenced by any HTTP
+# headers
+# * Valid HTTP header, like HTTP_X_FORWARDED_PROTO
+#
+# WARNING: Do not set this unless you know what you are doing.
+#
+# Make sure ALL of the following are true before setting this
+# (assuming the
+# values from the example above):
+# * Your API is behind a proxy.
+# * Your proxy strips the X-Forwarded-Proto header from all incoming
+# requests.
+# In other words, if end users include that header in their
+# requests, the proxy
+# will discard it.
+# * Your proxy sets the X-Forwarded-Proto header and sends it to API,
+# but only
+# for requests that originally come in via HTTPS.
+#
+# If any of those are not true, you should keep this setting set to
+# None.
+#
+# (string value)
+#secure_proxy_ssl_header = <None>
+
+#
+# This option allows setting path to the CA certificate file that
+# should be used
+# to verify connecting clients.
+#
+# Possible values:
+#
+# * String representing path to the CA certificate file.
+#
+# Related options:
+#
+# * enabled_ssl_apis
+# (string value)
+#ssl_ca_file = <None>
+
+#
+# This option allows setting path to the SSL certificate of API
+# server.
+#
+# Possible values:
+#
+# * String representing path to the SSL certificate.
+#
+# Related options:
+#
+# * enabled_ssl_apis
+# (string value)
+#ssl_cert_file = <None>
+
+#
+# This option specifies the path to the file where SSL private key of
+# API
+# server is stored when SSL is in effect.
+#
+# Possible values:
+#
+# * String representing path to the SSL private key.
+#
+# Related options:
+#
+# * enabled_ssl_apis
+# (string value)
+#ssl_key_file = <None>
+
+#
+# This option sets the value of TCP_KEEPIDLE in seconds for each
+# server socket.
+# It specifies the duration of time to keep connection active. TCP
+# generates a
+# KEEPALIVE transmission for an application that requests to keep
+# connection
+# active. Not supported on OS X.
+#
+# Related options:
+#
+# * keep_alive
+# (integer value)
+# Minimum value: 0
+#tcp_keepidle = 600
+
+#
+# This option specifies the size of the pool of greenthreads used by
+# wsgi.
+# It is possible to limit the number of concurrent connections using
+# this
+# option.
+# (integer value)
+# Minimum value: 0
+# Deprecated group/name - [DEFAULT]/wsgi_default_pool_size
+#default_pool_size = 1000
+
+#
+# This option specifies the maximum line size of message headers to be
+# accepted.
+# max_header_line may need to be increased when using large tokens
+# (typically
+# those generated by the Keystone v3 API with big service catalogs).
+#
+# Since TCP is a stream based protocol, in order to reuse a
+# connection, the HTTP
+# has to have a way to indicate the end of the previous response and
+# beginning
+# of the next. Hence, in a keep_alive case, all messages must have a
+# self-defined message length.
+# (integer value)
+# Minimum value: 0
+#max_header_line = 16384
+
+#
+# This option allows using the same TCP connection to send and receive
+# multiple
+# HTTP requests/responses, as opposed to opening a new one for every
+# single
+# request/response pair. HTTP keep-alive indicates HTTP connection
+# reuse.
+#
+# Possible values:
+#
+# * True : reuse HTTP connection.
+# * False : closes the client socket connection explicitly.
+#
+# Related options:
+#
+# * tcp_keepidle
+# (boolean value)
+# Deprecated group/name - [DEFAULT]/wsgi_keep_alive
+#keep_alive = true
+
+#
+# This option specifies the timeout for client connections' socket
+# operations.
+# If an incoming connection is idle for this number of seconds it will
+# be
+# closed. It indicates timeout on individual read/writes on the socket
+# connection. To wait forever set to 0.
+# (integer value)
+# Minimum value: 0
+#client_socket_timeout = 900
+
+
+[xenserver]
+#
+# XenServer options are used when the compute_driver is set to use
+# XenServer (compute_driver=xenapi.XenAPIDriver).
+#
+# Must specify connection_url, connection_password and
+# ovs_integration_bridge to
+# use compute_driver=xenapi.XenAPIDriver.
+
+#
+# From nova.conf
+#
+
+#
+# Number of seconds to wait for agent's reply to a request.
+#
+# Nova configures/performs certain administrative actions on a server
+# with the
+# help of an agent that's installed on the server. The communication
+# between
+# Nova and the agent is achieved via sharing messages, called records,
+# over
+# xenstore, a shared storage across all the domains on a Xenserver
+# host.
+# Operations performed by the agent on behalf of nova are: 'version','
+# key_init',
+# 'password','resetnetwork','inject_file', and 'agentupdate'.
+#
+# To perform one of the above operations, the xapi 'agent' plugin
+# writes the
+# command and its associated parameters to a certain location known to
+# the domain
+# and awaits response. On being notified of the message, the agent
+# performs
+# appropriate actions on the server and writes the result back to
+# xenstore. This
+# result is then read by the xapi 'agent' plugin to determine the
+# success/failure
+# of the operation.
+#
+# This config option determines how long the xapi 'agent' plugin shall
+# wait to
+# read the response off of xenstore for a given request/command. If
+# the agent on
+# the instance fails to write the result in this time period, the
+# operation is
+# considered to have timed out.
+#
+# Related options:
+#
+# * ``agent_version_timeout``
+# * ``agent_resetnetwork_timeout``
+#
+# (integer value)
+# Minimum value: 0
+#agent_timeout = 30
+
+#
+# Number of seconds to wait for agent't reply to version request.
+#
+# This indicates the amount of time xapi 'agent' plugin waits for the
+# agent to
+# respond to the 'version' request specifically. The generic timeout
+# for agent
+# communication ``agent_timeout`` is ignored in this case.
+#
+# During the build process the 'version' request is used to determine
+# if the
+# agent is available/operational to perform other requests such as
+# 'resetnetwork', 'password', 'key_init' and 'inject_file'. If the
+# 'version' call
+# fails, the other configuration is skipped. So, this configuration
+# option can
+# also be interpreted as time in which agent is expected to be fully
+# operational.
+# (integer value)
+# Minimum value: 0
+#agent_version_timeout = 300
+
+#
+# Number of seconds to wait for agent's reply to resetnetwork
+# request.
+#
+# This indicates the amount of time xapi 'agent' plugin waits for the
+# agent to
+# respond to the 'resetnetwork' request specifically. The generic
+# timeout for
+# agent communication ``agent_timeout`` is ignored in this case.
+# (integer value)
+# Minimum value: 0
+#agent_resetnetwork_timeout = 60
+
+#
+# Path to locate guest agent on the server.
+#
+# Specifies the path in which the XenAPI guest agent should be
+# located. If the
+# agent is present, network configuration is not injected into the
+# image.
+#
+# Related options:
+#
+# For this option to have an effect:
+# * ``flat_injected`` should be set to ``True``
+# * ``compute_driver`` should be set to ``xenapi.XenAPIDriver``
+#
+# (string value)
+#agent_path = usr/sbin/xe-update-networking
+
+#
+# Disables the use of XenAPI agent.
+#
+# This configuration option suggests whether the use of agent should
+# be enabled
+# or not regardless of what image properties are present. Image
+# properties have
+# an effect only when this is set to ``True``. Read description of
+# config option
+# ``use_agent_default`` for more information.
+#
+# Related options:
+#
+# * ``use_agent_default``
+#
+# (boolean value)
+#disable_agent = false
+
+#
+# Whether or not to use the agent by default when its usage is enabled
+# but not
+# indicated by the image.
+#
+# The use of XenAPI agent can be disabled altogether using the
+# configuration
+# option ``disable_agent``. However, if it is not disabled, the use of
+# an agent
+# can still be controlled by the image in use through one of its
+# properties,
+# ``xenapi_use_agent``. If this property is either not present or
+# specified
+# incorrectly on the image, the use of agent is determined by this
+# configuration
+# option.
+#
+# Note that if this configuration is set to ``True`` when the agent is
+# not
+# present, the boot times will increase significantly.
+#
+# Related options:
+#
+# * ``disable_agent``
+#
+# (boolean value)
+#use_agent_default = false
+
+# Timeout in seconds for XenAPI login. (integer value)
+# Minimum value: 0
+#login_timeout = 10
+
+#
+# Maximum number of concurrent XenAPI connections.
+#
+# In nova, multiple XenAPI requests can happen at a time.
+# Configuring this option will parallelize access to the XenAPI
+# session, which allows you to make concurrent XenAPI connections.
+# (integer value)
+# Minimum value: 1
+#connection_concurrent = 5
+
+#
+# Cache glance images locally.
+#
+# The value for this option must be chosen from the choices listed
+# here. Configuring a value other than these will default to 'all'.
+#
+# Note: There is nothing that deletes these images.
+#
+# Possible values:
+#
+# * `all`: will cache all images.
+# * `some`: will only cache images that have the
+# image_property `cache_in_nova=True`.
+# * `none`: turns off caching entirely.
+# (string value)
+# Possible values:
+# all - <No description provided>
+# some - <No description provided>
+# none - <No description provided>
+#cache_images = all
+
+#
+# Compression level for images.
+#
+# By setting this option we can configure the gzip compression level.
+# This option sets GZIP environment variable before spawning tar -cz
+# to force the compression level. It defaults to none, which means the
+# GZIP environment variable is not set and the default (usually -6)
+# is used.
+#
+# Possible values:
+#
+# * Range is 1-9, e.g., 9 for gzip -9, 9 being most
+# compressed but most CPU intensive on dom0.
+# * Any values out of this range will default to None.
+# (integer value)
+# Minimum value: 1
+# Maximum value: 9
+#image_compression_level = <None>
+
+# Default OS type used when uploading an image to glance (string
+# value)
+#default_os_type = linux
+
+# Time in secs to wait for a block device to be created (integer
+# value)
+# Minimum value: 1
+#block_device_creation_timeout = 10
+{%- if controller.block_device_creation_timeout is defined %}
+block_device_creation_timeout = {{ controller.block_device_creation_timeout }}
+{%- endif %}
+
+#
+# Maximum size in bytes of kernel or ramdisk images.
+#
+# Specifying the maximum size of kernel or ramdisk will avoid copying
+# large files to dom0 and fill up /boot/guest.
+# (integer value)
+#max_kernel_ramdisk_size = 16777216
+
+#
+# Filter for finding the SR to be used to install guest instances on.
+#
+# Possible values:
+#
+# * To use the Local Storage in default XenServer/XCP installations
+# set this flag to other-config:i18n-key=local-storage.
+# * To select an SR with a different matching criteria, you could
+# set it to other-config:my_favorite_sr=true.
+# * To fall back on the Default SR, as displayed by XenCenter,
+# set this flag to: default-sr:true.
+# (string value)
+#sr_matching_filter = default-sr:true
+
+#
+# Whether to use sparse_copy for copying data on a resize down.
+# (False will use standard dd). This speeds up resizes down
+# considerably since large runs of zeros won't have to be rsynced.
+# (boolean value)
+#sparse_copy = true
+
+#
+# Maximum number of retries to unplug VBD.
+# If set to 0, should try once, no retries.
+# (integer value)
+# Minimum value: 0
+#num_vbd_unplug_retries = 10
+
+#
+# Name of network to use for booting iPXE ISOs.
+#
+# An iPXE ISO is a specially crafted ISO which supports iPXE booting.
+# This feature gives a means to roll your own image.
+#
+# By default this option is not set. Enable this option to
+# boot an iPXE ISO.
+#
+# Related Options:
+#
+# * `ipxe_boot_menu_url`
+# * `ipxe_mkisofs_cmd`
+# (string value)
+#ipxe_network_name = <None>
+
+#
+# URL to the iPXE boot menu.
+#
+# An iPXE ISO is a specially crafted ISO which supports iPXE booting.
+# This feature gives a means to roll your own image.
+#
+# By default this option is not set. Enable this option to
+# boot an iPXE ISO.
+#
+# Related Options:
+#
+# * `ipxe_network_name`
+# * `ipxe_mkisofs_cmd`
+# (string value)
+#ipxe_boot_menu_url = <None>
+
+#
+# Name and optionally path of the tool used for ISO image creation.
+#
+# An iPXE ISO is a specially crafted ISO which supports iPXE booting.
+# This feature gives a means to roll your own image.
+#
+# Note: By default `mkisofs` is not present in the Dom0, so the
+# package can either be manually added to Dom0 or include the
+# `mkisofs` binary in the image itself.
+#
+# Related Options:
+#
+# * `ipxe_network_name`
+# * `ipxe_boot_menu_url`
+# (string value)
+#ipxe_mkisofs_cmd = mkisofs
+
+#
+# URL for connection to XenServer/Xen Cloud Platform. A special value
+# of unix://local can be used to connect to the local unix socket.
+#
+# Possible values:
+#
+# * Any string that represents a URL. The connection_url is
+# generally the management network IP address of the XenServer.
+# * This option must be set if you chose the XenServer driver.
+# (string value)
+#connection_url = <None>
+
+# Username for connection to XenServer/Xen Cloud Platform (string
+# value)
+#connection_username = root
+
+# Password for connection to XenServer/Xen Cloud Platform (string
+# value)
+#connection_password = <None>
+
+#
+# The interval used for polling of coalescing vhds.
+#
+# This is the interval after which the task of coalesce VHD is
+# performed, until it reaches the max attempts that is set by
+# vhd_coalesce_max_attempts.
+#
+# Related options:
+#
+# * `vhd_coalesce_max_attempts`
+# (floating point value)
+# Minimum value: 0
+#vhd_coalesce_poll_interval = 5.0
+
+#
+# Ensure compute service is running on host XenAPI connects to.
+# This option must be set to false if the 'independent_compute'
+# option is set to true.
+#
+# Possible values:
+#
+# * Setting this option to true will make sure that compute service
+# is running on the same host that is specified by connection_url.
+# * Setting this option to false, doesn't perform the check.
+#
+# Related options:
+#
+# * `independent_compute`
+# (boolean value)
+#check_host = true
+
+#
+# Max number of times to poll for VHD to coalesce.
+#
+# This option determines the maximum number of attempts that can be
+# made for coalescing the VHD before giving up.
+#
+# Related opitons:
+#
+# * `vhd_coalesce_poll_interval`
+# (integer value)
+# Minimum value: 0
+#vhd_coalesce_max_attempts = 20
+
+# Base path to the storage repository on the XenServer host. (string
+# value)
+#sr_base_path = /var/run/sr-mount
+
+#
+# The iSCSI Target Host.
+#
+# This option represents the hostname or ip of the iSCSI Target.
+# If the target host is not present in the connection information from
+# the volume provider then the value from this option is taken.
+#
+# Possible values:
+#
+# * Any string that represents hostname/ip of Target.
+# (unknown value)
+#target_host = <None>
+
+#
+# The iSCSI Target Port.
+#
+# This option represents the port of the iSCSI Target. If the
+# target port is not present in the connection information from the
+# volume provider then the value from this option is taken.
+# (port value)
+# Minimum value: 0
+# Maximum value: 65535
+#target_port = 3260
+
+#
+# Used to prevent attempts to attach VBDs locally, so Nova can
+# be run in a VM on a different host.
+#
+# Related options:
+#
+# * ``CONF.flat_injected`` (Must be False)
+# * ``CONF.xenserver.check_host`` (Must be False)
+# * ``CONF.default_ephemeral_format`` (Must be unset or 'ext3')
+# * Joining host aggregates (will error if attempted)
+# * Swap disks for Windows VMs (will error if attempted)
+# * Nova-based auto_configure_disk (will error if attempted)
+# (boolean value)
+#independent_compute = false
+
+#
+# Wait time for instances to go to running state.
+#
+# Provide an integer value representing time in seconds to set the
+# wait time for an instance to go to running state.
+#
+# When a request to create an instance is received by nova-api and
+# communicated to nova-compute, the creation of the instance occurs
+# through interaction with Xen via XenAPI in the compute node. Once
+# the node on which the instance(s) are to be launched is decided by
+# nova-schedule and the launch is triggered, a certain amount of wait
+# time is involved until the instance(s) can become available and
+# 'running'. This wait time is defined by running_timeout. If the
+# instances do not go to running state within this specified wait
+# time, the launch expires and the instance(s) are set to 'error'
+# state.
+# (integer value)
+# Minimum value: 0
+#running_timeout = 60
+
+# DEPRECATED:
+# The XenAPI VIF driver using XenServer Network APIs.
+#
+# Provide a string value representing the VIF XenAPI vif driver to use
+# for
+# plugging virtual network interfaces.
+#
+# Xen configuration uses bridging within the backend domain to allow
+# all VMs to appear on the network as individual hosts. Bridge
+# interfaces are used to create a XenServer VLAN network in which
+# the VIFs for the VM instances are plugged. If no VIF bridge driver
+# is plugged, the bridge is not made available. This configuration
+# option takes in a value for the VIF driver.
+#
+# Possible values:
+#
+# * nova.virt.xenapi.vif.XenAPIOpenVswitchDriver (default)
+# * nova.virt.xenapi.vif.XenAPIBridgeDriver (deprecated)
+#
+# Related options:
+#
+# * ``vlan_interface``
+# * ``ovs_integration_bridge``
+# (string value)
+# This option is deprecated for removal since 15.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# There are only two in-tree vif drivers for XenServer.
+# XenAPIBridgeDriver is for
+# nova-network which is deprecated and XenAPIOpenVswitchDriver is for
+# Neutron
+# which is the default configuration for Nova since the 15.0.0 Ocata
+# release. In
+# the future the "use_neutron" configuration option will be used to
+# determine
+# which vif driver to use.
+#vif_driver = nova.virt.xenapi.vif.XenAPIOpenVswitchDriver
+
+#
+# Dom0 plugin driver used to handle image uploads.
+#
+# Provide a string value representing a plugin driver required to
+# handle the image uploading to GlanceStore.
+#
+# Images, and snapshots from XenServer need to be uploaded to the data
+# store for use. image_upload_handler takes in a value for the Dom0
+# plugin driver. This driver is then called to uplaod images to the
+# GlanceStore.
+# (string value)
+#image_upload_handler = nova.virt.xenapi.image.glance.GlanceStore
+
+#
+# Number of seconds to wait for SR to settle if the VDI
+# does not exist when first introduced.
+#
+# Some SRs, particularly iSCSI connections are slow to see the VDIs
+# right after they got introduced. Setting this option to a
+# time interval will make the SR to wait for that time period
+# before raising VDI not found exception.
+# (integer value)
+# Minimum value: 0
+#introduce_vdi_retry_wait = 20
+
+#
+# The name of the integration Bridge that is used with xenapi
+# when connecting with Open vSwitch.
+#
+# Note: The value of this config option is dependent on the
+# environment, therefore this configuration value must be set
+# accordingly if you are using XenAPI.
+#
+# Possible values:
+#
+# * Any string that represents a bridge name.
+# (string value)
+#ovs_integration_bridge = <None>
+
+#
+# When adding new host to a pool, this will append a --force flag to
+# the
+# command, forcing hosts to join a pool, even if they have different
+# CPUs.
+#
+# Since XenServer version 5.6 it is possible to create a pool of hosts
+# that have
+# different CPU capabilities. To accommodate CPU differences,
+# XenServer limited
+# features it uses to determine CPU compatibility to only the ones
+# that are
+# exposed by CPU and support for CPU masking was added.
+# Despite this effort to level differences between CPUs, it is still
+# possible
+# that adding new host will fail, thus option to force join was
+# introduced.
+# (boolean value)
+#use_join_force = true
+
+#
+# Publicly visible name for this console host.
+#
+# Possible values:
+#
+# * Current hostname (default) or any string representing hostname.
+# (string value)
+#console_public_hostname = <current_hostname>
+
+
+[xvp]
+#
+# Configuration options for XVP.
+#
+# xvp (Xen VNC Proxy) is a proxy server providing password-protected
+# VNC-based
+# access to the consoles of virtual machines hosted on Citrix
+# XenServer.
+
+#
+# From nova.conf
+#
+
+# XVP conf template (string value)
+#console_xvp_conf_template = $pybasedir/nova/console/xvp.conf.template
+
+# Generated XVP conf file (string value)
+#console_xvp_conf = /etc/xvp.conf
+
+# XVP master process pid file (string value)
+#console_xvp_pid = /var/run/xvp.pid
+
+# XVP log file (string value)
+#console_xvp_log = /var/log/xvp.log
+
+# Port for XVP to multiplex VNC connections on (port value)
+# Minimum value: 0
+# Maximum value: 65535
+#console_xvp_multiplex_port = 5900
+
+[matchmaker_redis]
+{#- include "oslo_templates/oslo/_matchmaker_redis.conf" #}
+
+[oslo_messaging_notifications]
+{%- set _data = controller.notification %}
+{%- include "oslo_templates/files/queens/oslo/messaging/_notifications.conf" %}
+
+{%- if controller.message_queue is defined %}
+{%- set _data = controller.message_queue %}
+{%- if _data.engine == 'rabbitmq' %}
+ {%- set messaging_engine = 'rabbit' %}
+{%- else %}
+ {%- set messaging_engine = _data.engine %}
+{%- endif %}
+[oslo_messaging_{{ messaging_engine }}]
+{%- include "oslo_templates/files/queens/oslo/messaging/_" + messaging_engine + ".conf" %}
+{%- endif %}
+
+[oslo_policy]
+{%- if controller.policy is defined %}
+{%- set _data = controller.policy %}
+{%- include "oslo_templates/files/queens/oslo/_policy.conf" %}
+{%- endif %}
+
+[database]
+{%- set _data = controller.database %}
+{%- if _data.ssl is defined and 'cacert_file' not in _data.get('ssl', {}).keys() %}{% do _data['ssl'].update({'cacert_file': controller.cacert_file}) %}{% endif %}
+{%- include "oslo_templates/files/queens/oslo/_database.conf" %}
+
+[oslo_middleware]
+{%- set _data = controller %}
+{%- include "oslo_templates/files/queens/oslo/_middleware.conf" %}
+
+[keystone_authtoken]
+{%- set _data = controller.identity %}
+{%- set auth_type = _data.get('auth_type', 'password') %}
+{%- include "oslo_templates/files/queens/keystonemiddleware/_auth_token.conf" %}
+{%- include "oslo_templates/files/queens/keystoneauth/_type_" + auth_type + ".conf" %}
+
diff --git a/nova/files/queens/nova-controller.conf.RedHat b/nova/files/queens/nova-controller.conf.RedHat
new file mode 120000
index 0000000..09c7524
--- /dev/null
+++ b/nova/files/queens/nova-controller.conf.RedHat
@@ -0,0 +1 @@
+nova-controller.conf.Debian
\ No newline at end of file
diff --git a/nova/files/queens/nova-placement-api.conf b/nova/files/queens/nova-placement-api.conf
new file mode 100644
index 0000000..e7cde11
--- /dev/null
+++ b/nova/files/queens/nova-placement-api.conf
@@ -0,0 +1,28 @@
+{%- from "nova/map.jinja" import controller with context %}
+Listen {{ controller.bind.private_address }}:8778
+
+<VirtualHost {{ controller.bind.private_address }}:8778>
+ WSGIScriptAlias / /usr/bin/nova-placement-api
+ WSGIDaemonProcess nova-placement processes=5 threads=1 user=nova group=nova display-name=%{GROUP}
+ WSGIProcessGroup nova-placement
+ WSGIApplicationGroup %{GLOBAL}
+ WSGIPassAuthorization On
+ LimitRequestBody 114688
+
+ <IfVersion >= 2.4>
+ ErrorLogFormat "%{cu}t %M"
+ </IfVersion>
+
+ ErrorLog /var/log/apache2/nova_placement_error.log
+ CustomLog /var/log/apache2/nova_placement_access.log "%v:%p %h %l %u %t \"%r\" %>s %D %O \"%{Referer}i\" \"%{User-Agent}i\""
+
+ <Directory /usr/bin>
+ <IfVersion >= 2.4>
+ Require all granted
+ </IfVersion>
+ <IfVersion < 2.4>
+ Order allow,deny
+ Allow from all
+ </IfVersion>
+ </Directory>
+</VirtualHost>
diff --git a/nova/files/queens/qemu.conf.Debian b/nova/files/queens/qemu.conf.Debian
new file mode 100644
index 0000000..cb20491
--- /dev/null
+++ b/nova/files/queens/qemu.conf.Debian
@@ -0,0 +1,517 @@
+{%- from "nova/map.jinja" import compute with context %}
+# Master configuration file for the QEMU driver.
+# All settings described here are optional - if omitted, sensible
+# defaults are used.
+
+# VNC is configured to listen on 127.0.0.1 by default.
+# To make it listen on all public interfaces, uncomment
+# this next option.
+#
+# NB, strong recommendation to enable TLS + x509 certificate
+# verification when allowing public access
+#
+#vnc_listen = "0.0.0.0"
+
+# Enable this option to have VNC served over an automatically created
+# unix socket. This prevents unprivileged access from users on the
+# host machine, though most VNC clients do not support it.
+#
+# This will only be enabled for VNC configurations that do not have
+# a hardcoded 'listen' or 'socket' value. This setting takes preference
+# over vnc_listen.
+#
+#vnc_auto_unix_socket = 1
+
+# Enable use of TLS encryption on the VNC server. This requires
+# a VNC client which supports the VeNCrypt protocol extension.
+# Examples include vinagre, virt-viewer, virt-manager and vencrypt
+# itself. UltraVNC, RealVNC, TightVNC do not support this
+#
+# It is necessary to setup CA and issue a server certificate
+# before enabling this.
+#
+#vnc_tls = 1
+
+
+# Use of TLS requires that x509 certificates be issued. The
+# default it to keep them in /etc/pki/libvirt-vnc. This directory
+# must contain
+#
+# ca-cert.pem - the CA master certificate
+# server-cert.pem - the server certificate signed with ca-cert.pem
+# server-key.pem - the server private key
+#
+# This option allows the certificate directory to be changed
+#
+#vnc_tls_x509_cert_dir = "/etc/pki/libvirt-vnc"
+
+
+# The default TLS configuration only uses certificates for the server
+# allowing the client to verify the server's identity and establish
+# an encrypted channel.
+#
+# It is possible to use x509 certificates for authentication too, by
+# issuing a x509 certificate to every client who needs to connect.
+#
+# Enabling this option will reject any client who does not have a
+# certificate signed by the CA in /etc/pki/libvirt-vnc/ca-cert.pem
+#
+#vnc_tls_x509_verify = 1
+
+
+# The default VNC password. Only 8 bytes are significant for
+# VNC passwords. This parameter is only used if the per-domain
+# XML config does not already provide a password. To allow
+# access without passwords, leave this commented out. An empty
+# string will still enable passwords, but be rejected by QEMU,
+# effectively preventing any use of VNC. Obviously change this
+# example here before you set this.
+#
+#vnc_password = "XYZ12345"
+
+
+# Enable use of SASL encryption on the VNC server. This requires
+# a VNC client which supports the SASL protocol extension.
+# Examples include vinagre, virt-viewer and virt-manager
+# itself. UltraVNC, RealVNC, TightVNC do not support this
+#
+# It is necessary to configure /etc/sasl2/qemu.conf to choose
+# the desired SASL plugin (eg, GSSPI for Kerberos)
+#
+#vnc_sasl = 1
+
+
+# The default SASL configuration file is located in /etc/sasl2/
+# When running libvirtd unprivileged, it may be desirable to
+# override the configs in this location. Set this parameter to
+# point to the directory, and create a qemu.conf in that location
+#
+#vnc_sasl_dir = "/some/directory/sasl2"
+
+
+# QEMU implements an extension for providing audio over a VNC connection,
+# though if your VNC client does not support it, your only chance for getting
+# sound output is through regular audio backends. By default, libvirt will
+# disable all QEMU sound backends if using VNC, since they can cause
+# permissions issues. Enabling this option will make libvirtd honor the
+# QEMU_AUDIO_DRV environment variable when using VNC.
+#
+#vnc_allow_host_audio = 0
+
+
+
+# SPICE is configured to listen on 127.0.0.1 by default.
+# To make it listen on all public interfaces, uncomment
+# this next option.
+#
+# NB, strong recommendation to enable TLS + x509 certificate
+# verification when allowing public access
+#
+#spice_listen = "0.0.0.0"
+
+
+# Enable use of TLS encryption on the SPICE server.
+#
+# It is necessary to setup CA and issue a server certificate
+# before enabling this.
+#
+#spice_tls = 1
+
+
+# Use of TLS requires that x509 certificates be issued. The
+# default it to keep them in /etc/pki/libvirt-spice. This directory
+# must contain
+#
+# ca-cert.pem - the CA master certificate
+# server-cert.pem - the server certificate signed with ca-cert.pem
+# server-key.pem - the server private key
+#
+# This option allows the certificate directory to be changed.
+#
+#spice_tls_x509_cert_dir = "/etc/pki/libvirt-spice"
+
+
+# The default SPICE password. This parameter is only used if the
+# per-domain XML config does not already provide a password. To
+# allow access without passwords, leave this commented out. An
+# empty string will still enable passwords, but be rejected by
+# QEMU, effectively preventing any use of SPICE. Obviously change
+# this example here before you set this.
+#
+#spice_password = "XYZ12345"
+
+
+# Enable use of SASL encryption on the SPICE server. This requires
+# a SPICE client which supports the SASL protocol extension.
+#
+# It is necessary to configure /etc/sasl2/qemu.conf to choose
+# the desired SASL plugin (eg, GSSPI for Kerberos)
+#
+#spice_sasl = 1
+
+# The default SASL configuration file is located in /etc/sasl2/
+# When running libvirtd unprivileged, it may be desirable to
+# override the configs in this location. Set this parameter to
+# point to the directory, and create a qemu.conf in that location
+#
+#spice_sasl_dir = "/some/directory/sasl2"
+
+
+# By default, if no graphical front end is configured, libvirt will disable
+# QEMU audio output since directly talking to alsa/pulseaudio may not work
+# with various security settings. If you know what you're doing, enable
+# the setting below and libvirt will passthrough the QEMU_AUDIO_DRV
+# environment variable when using nographics.
+#
+#nographics_allow_host_audio = 1
+
+
+# Override the port for creating both VNC and SPICE sessions (min).
+# This defaults to 5900 and increases for consecutive sessions
+# or when ports are occupied, until it hits the maximum.
+#
+# Minimum must be greater than or equal to 5900 as lower number would
+# result into negative vnc display number.
+#
+# Maximum must be less than 65536, because higher numbers do not make
+# sense as a port number.
+#
+#remote_display_port_min = 5900
+#remote_display_port_max = 65535
+
+# VNC WebSocket port policies, same rules apply as with remote display
+# ports. VNC WebSockets use similar display <-> port mappings, with
+# the exception being that ports start from 5700 instead of 5900.
+#
+#remote_websocket_port_min = 5700
+#remote_websocket_port_max = 65535
+
+# The default security driver is SELinux. If SELinux is disabled
+# on the host, then the security driver will automatically disable
+# itself. If you wish to disable QEMU SELinux security driver while
+# leaving SELinux enabled for the host in general, then set this
+# to 'none' instead. It's also possible to use more than one security
+# driver at the same time, for this use a list of names separated by
+# comma and delimited by square brackets. For example:
+#
+# security_driver = [ "selinux", "apparmor" ]
+#
+# Notes: The DAC security driver is always enabled; as a result, the
+# value of security_driver cannot contain "dac". The value "none" is
+# a special value; security_driver can be set to that value in
+# isolation, but it cannot appear in a list of drivers.
+#
+#security_driver = "selinux"
+
+# If set to non-zero, then the default security labeling
+# will make guests confined. If set to zero, then guests
+# will be unconfined by default. Defaults to 1.
+#security_default_confined = 1
+
+# If set to non-zero, then attempts to create unconfined
+# guests will be blocked. Defaults to 0.
+#security_require_confined = 1
+
+# The user for QEMU processes run by the system instance. It can be
+# specified as a user name or as a user id. The qemu driver will try to
+# parse this value first as a name and then, if the name doesn't exist,
+# as a user id.
+#
+# Since a sequence of digits is a valid user name, a leading plus sign
+# can be used to ensure that a user id will not be interpreted as a user
+# name.
+#
+# Some examples of valid values are:
+#
+# user = "qemu" # A user named "qemu"
+# user = "+0" # Super user (uid=0)
+# user = "100" # A user named "100" or a user with uid=100
+#
+#user = "root"
+
+# The group for QEMU processes run by the system instance. It can be
+# specified in a similar way to user.
+#group = "root"
+
+# Whether libvirt should dynamically change file ownership
+# to match the configured user/group above. Defaults to 1.
+# Set to 0 to disable file ownership changes.
+#dynamic_ownership = 1
+
+{%- if compute.qemu is defined %}
+{%- if compute.qemu.user is defined %}
+user = "{{ compute.qemu.user }}"
+{%- endif %}
+
+{%- if compute.qemu.group is defined %}
+group = "{{ compute.qemu.group }}"
+{%- endif %}
+
+{%- if compute.qemu.dynamic_ownership is defined %}
+dynamic_ownership = "{{ compute.qemu.dynamic_ownership }}"
+{%- endif %}
+{%- endif %}
+
+# What cgroup controllers to make use of with QEMU guests
+#
+# - 'cpu' - use for schedular tunables
+# - 'devices' - use for device whitelisting
+# - 'memory' - use for memory tunables
+# - 'blkio' - use for block devices I/O tunables
+# - 'cpuset' - use for CPUs and memory nodes
+# - 'cpuacct' - use for CPUs statistics.
+#
+# NB, even if configured here, they won't be used unless
+# the administrator has mounted cgroups, e.g.:
+#
+# mkdir /dev/cgroup
+# mount -t cgroup -o devices,cpu,memory,blkio,cpuset none /dev/cgroup
+#
+# They can be mounted anywhere, and different controllers
+# can be mounted in different locations. libvirt will detect
+# where they are located.
+#
+#cgroup_controllers = [ "cpu", "devices", "memory", "blkio", "cpuset", "cpuacct" ]
+
+# This is the basic set of devices allowed / required by
+# all virtual machines.
+#
+# As well as this, any configured block backed disks,
+# all sound device, and all PTY devices are allowed.
+#
+# This will only need setting if newer QEMU suddenly
+# wants some device we don't already know about.
+#
+#cgroup_device_acl = [
+# "/dev/null", "/dev/full", "/dev/zero",
+# "/dev/random", "/dev/urandom",
+# "/dev/ptmx", "/dev/kvm", "/dev/kqemu",
+# "/dev/rtc","/dev/hpet", "/dev/vfio/vfio"
+#]
+
+
+# The default format for Qemu/KVM guest save images is raw; that is, the
+# memory from the domain is dumped out directly to a file. If you have
+# guests with a large amount of memory, however, this can take up quite
+# a bit of space. If you would like to compress the images while they
+# are being saved to disk, you can also set "lzop", "gzip", "bzip2", or "xz"
+# for save_image_format. Note that this means you slow down the process of
+# saving a domain in order to save disk space; the list above is in descending
+# order by performance and ascending order by compression ratio.
+#
+# save_image_format is used when you use 'virsh save' or 'virsh managedsave'
+# at scheduled saving, and it is an error if the specified save_image_format
+# is not valid, or the requested compression program can't be found.
+#
+# dump_image_format is used when you use 'virsh dump' at emergency
+# crashdump, and if the specified dump_image_format is not valid, or
+# the requested compression program can't be found, this falls
+# back to "raw" compression.
+#
+# snapshot_image_format specifies the compression algorithm of the memory save
+# image when an external snapshot of a domain is taken. This does not apply
+# on disk image format. It is an error if the specified format isn't valid,
+# or the requested compression program can't be found.
+#
+#save_image_format = "raw"
+#dump_image_format = "raw"
+#snapshot_image_format = "raw"
+
+# When a domain is configured to be auto-dumped when libvirtd receives a
+# watchdog event from qemu guest, libvirtd will save dump files in directory
+# specified by auto_dump_path. Default value is /var/lib/libvirt/qemu/dump
+#
+#auto_dump_path = "/var/lib/libvirt/qemu/dump"
+
+# When a domain is configured to be auto-dumped, enabling this flag
+# has the same effect as using the VIR_DUMP_BYPASS_CACHE flag with the
+# virDomainCoreDump API. That is, the system will avoid using the
+# file system cache while writing the dump file, but may cause
+# slower operation.
+#
+#auto_dump_bypass_cache = 0
+
+# When a domain is configured to be auto-started, enabling this flag
+# has the same effect as using the VIR_DOMAIN_START_BYPASS_CACHE flag
+# with the virDomainCreateWithFlags API. That is, the system will
+# avoid using the file system cache when restoring any managed state
+# file, but may cause slower operation.
+#
+#auto_start_bypass_cache = 0
+
+# If provided by the host and a hugetlbfs mount point is configured,
+# a guest may request huge page backing. When this mount point is
+# unspecified here, determination of a host mount point in /proc/mounts
+# will be attempted. Specifying an explicit mount overrides detection
+# of the same in /proc/mounts. Setting the mount point to "" will
+# disable guest hugepage backing.
+#
+# NB, within this mount point, guests will create memory backing files
+# in a location of $MOUNTPOINT/libvirt/qemu
+#
+#hugetlbfs_mount = "/dev/hugepages"
+#hugetlbfs_mount = ["/run/hugepages/kvm", "/mnt/hugepages_1GB"]
+{%- if compute.hugepages is defined %}
+hugetlbfs_mount = [{%- for mount in compute.hugepages.mount_points %}"{{ mount.path }}"{% if not loop.last %}, {% endif %}{%- endfor %}]
+security_driver="none"
+{%- endif %}
+
+# Path to the setuid helper for creating tap devices. This executable
+# is used to create <source type='bridge'> interfaces when libvirtd is
+# running unprivileged. libvirt invokes the helper directly, instead
+# of using "-netdev bridge", for security reasons.
+#bridge_helper = "/usr/libexec/qemu-bridge-helper"
+
+
+
+# If clear_emulator_capabilities is enabled, libvirt will drop all
+# privileged capabilities of the QEmu/KVM emulator. This is enabled by
+# default.
+#
+# Warning: Disabling this option means that a compromised guest can
+# exploit the privileges and possibly do damage to the host.
+#
+#clear_emulator_capabilities = 1
+
+
+# If enabled, libvirt will have QEMU set its process name to
+# "qemu:VM_NAME", where VM_NAME is the name of the VM. The QEMU
+# process will appear as "qemu:VM_NAME" in process listings and
+# other system monitoring tools. By default, QEMU does not set
+# its process title, so the complete QEMU command (emulator and
+# its arguments) appear in process listings.
+#
+#set_process_name = 1
+
+
+# If max_processes is set to a positive integer, libvirt will use
+# it to set the maximum number of processes that can be run by qemu
+# user. This can be used to override default value set by host OS.
+# The same applies to max_files which sets the limit on the maximum
+# number of opened files.
+#
+{%- if compute.qemu is defined %}
+
+{%- if compute.qemu.max_processes is defined %}
+max_processes = {{ compute.qemu.max_processes }}
+{%- endif %}
+
+{%- if compute.qemu.max_files is defined %}
+max_files = {{ compute.qemu.max_files }}
+{%- endif %}
+
+{%- endif %}
+
+# mac_filter enables MAC addressed based filtering on bridge ports.
+# This currently requires ebtables to be installed.
+#
+#mac_filter = 1
+
+
+# By default, PCI devices below non-ACS switch are not allowed to be assigned
+# to guests. By setting relaxed_acs_check to 1 such devices will be allowed to
+# be assigned to guests.
+#
+#relaxed_acs_check = 1
+
+
+# If allow_disk_format_probing is enabled, libvirt will probe disk
+# images to attempt to identify their format, when not otherwise
+# specified in the XML. This is disabled by default.
+#
+# WARNING: Enabling probing is a security hole in almost all
+# deployments. It is strongly recommended that users update their
+# guest XML <disk> elements to include <driver type='XXXX'/>
+# elements instead of enabling this option.
+#
+#allow_disk_format_probing = 1
+
+
+# To enable 'Sanlock' project based locking of the file
+# content (to prevent two VMs writing to the same
+# disk), uncomment this
+#
+#lock_manager = "sanlock"
+
+
+
+# Set limit of maximum APIs queued on one domain. All other APIs
+# over this threshold will fail on acquiring job lock. Specially,
+# setting to zero turns this feature off.
+# Note, that job lock is per domain.
+#
+#max_queued = 0
+
+###################################################################
+# Keepalive protocol:
+# This allows qemu driver to detect broken connections to remote
+# libvirtd during peer-to-peer migration. A keepalive message is
+# sent to the daemon after keepalive_interval seconds of inactivity
+# to check if the daemon is still responding; keepalive_count is a
+# maximum number of keepalive messages that are allowed to be sent
+# to the daemon without getting any response before the connection
+# is considered broken. In other words, the connection is
+# automatically closed approximately after
+# keepalive_interval * (keepalive_count + 1) seconds since the last
+# message received from the daemon. If keepalive_interval is set to
+# -1, qemu driver will not send keepalive requests during
+# peer-to-peer migration; however, the remote libvirtd can still
+# send them and source libvirtd will send responses. When
+# keepalive_count is set to 0, connections will be automatically
+# closed after keepalive_interval seconds of inactivity without
+# sending any keepalive messages.
+#
+#keepalive_interval = 5
+#keepalive_count = 5
+
+
+
+# Use seccomp syscall whitelisting in QEMU.
+# 1 = on, 0 = off, -1 = use QEMU default
+# Defaults to -1.
+#
+#seccomp_sandbox = 1
+
+
+
+# Override the listen address for all incoming migrations. Defaults to
+# 0.0.0.0, or :: if both host and qemu are capable of IPv6.
+#migration_address = "127.0.0.1"
+
+
+# Override the port range used for incoming migrations.
+#
+# Minimum must be greater than 0, however when QEMU is not running as root,
+# setting the minimum to be lower than 1024 will not work.
+#
+# Maximum must not be greater than 65535.
+#
+#migration_port_min = 49152
+#migration_port_max = 49215
+cgroup_device_acl = [
+ "/dev/null", "/dev/full", "/dev/zero",
+ "/dev/random", "/dev/urandom",
+ "/dev/ptmx", "/dev/kvm", "/dev/kqemu",
+ "/dev/rtc", "/dev/hpet","/dev/net/tun",
+ {%- if compute.get('sriov', false) %}
+ "/dev/vfio/vfio",
+ {% endif %}
+]
+
+# The backend to use for handling stdout/stderr output from
+# QEMU processes.
+#
+# 'file': QEMU writes directly to a plain file. This is the
+# historical default, but allows QEMU to inflict a
+# denial of service attack on the host by exhausting
+# filesystem space
+#
+# 'logd': QEMU writes to a pipe provided by virtlogd daemon.
+# This is the current default, providing protection
+# against denial of service by performing log file
+# rollover when a size limit is hit.
+#
+#stdio_handler = "logd"
+{%- if compute.get('qemu',{}).get('logging',{}).handler is defined %}
+stdio_handler = "{{ compute.qemu.logging.handler }}"
+{%- endif %}
diff --git a/nova/files/queens/qemu.conf.RedHat b/nova/files/queens/qemu.conf.RedHat
new file mode 120000
index 0000000..1d23f19
--- /dev/null
+++ b/nova/files/queens/qemu.conf.RedHat
@@ -0,0 +1 @@
+qemu.conf.Debian
\ No newline at end of file
diff --git a/nova/files/queens/virtlogd.conf.Debian b/nova/files/queens/virtlogd.conf.Debian
new file mode 100644
index 0000000..ecb5b2e
--- /dev/null
+++ b/nova/files/queens/virtlogd.conf.Debian
@@ -0,0 +1,78 @@
+{%- from "nova/map.jinja" import compute with context %}
+# Master virtlogd daemon configuration file
+#
+
+#################################################################
+#
+# Logging controls
+#
+
+# Logging level: 4 errors, 3 warnings, 2 information, 1 debug
+# basically 1 will log everything possible
+{%- if compute.get('virtlog',{}).level is defined %}
+log_level = {{ compute.virtlog.level }}
+{%- endif %}
+# Logging filters:
+# A filter allows to select a different logging level for a given category
+# of logs
+# The format for a filter is one of:
+# x:name
+# x:+name
+# where name is a string which is matched against source file name,
+# e.g., "remote", "qemu", or "util/json", the optional "+" prefix
+# tells libvirt to log stack trace for each message matching name,
+# and x is the minimal level where matching messages should be logged:
+# 1: DEBUG
+# 2: INFO
+# 3: WARNING
+# 4: ERROR
+#
+# Multiple filter can be defined in a single @filters, they just need to be
+# separated by spaces.
+#
+# e.g. to only get warning or errors from the remote layer and only errors
+# from the event layer:
+#log_filters="3:remote 4:event"
+{%- if compute.get('virtlog',{}).filters is defined %}
+log_filters={{ compute.virtlog.get('filters')|yaml_dquote }}
+{%- endif %}
+# Logging outputs:
+# An output is one of the places to save logging information
+# The format for an output can be:
+# x:stderr
+# output goes to stderr
+# x:syslog:name
+# use syslog for the output and use the given name as the ident
+# x:file:file_path
+# output to a file, with the given filepath
+# x:journald
+# ouput to the systemd journal
+# In all case the x prefix is the minimal level, acting as a filter
+# 1: DEBUG
+# 2: INFO
+# 3: WARNING
+# 4: ERROR
+#
+# Multiple output can be defined, they just need to be separated by spaces.
+# e.g. to log all warnings and errors to syslog under the virtlogd ident:
+#log_outputs="3:syslog:virtlogd"
+{%- if compute.get('virtlog',{}).outputs is defined %}
+log_outputs={{ compute.virtlog.get('outputs')|yaml_dquote }}
+{%- endif %}
+# The maximum number of concurrent client connections to allow
+# over all sockets combined.
+#max_clients = 1024
+{%- if compute.get('virtlog',{}).max_clients is defined %}
+max_clients = {{ compute.virtlog.max_clients }}
+{%- endif %}
+# Maximum file size before rolling over. Defaults to 2 MB
+#max_size = 2097152
+{%- if compute.get('virtlog',{}).max_size is defined %}
+max_size = {{ compute.virtlog.max_size }}
+{%- endif %}
+# Maximum number of backup files to keep. Defaults to 3,
+# not including the primary active file
+#max_backups = 3
+{%- if compute.get('virtlog',{}).max_backups is defined %}
+max_backups = {{ compute.virtlog.max_backups }}
+{%- endif %}
diff --git a/nova/files/queens/virtlogd.conf.RedHat b/nova/files/queens/virtlogd.conf.RedHat
new file mode 120000
index 0000000..d466306
--- /dev/null
+++ b/nova/files/queens/virtlogd.conf.RedHat
@@ -0,0 +1 @@
+virtlogd.conf.Debian
\ No newline at end of file
diff --git a/nova/map.jinja b/nova/map.jinja
index 4d06861..e9282b7 100644
--- a/nova/map.jinja
+++ b/nova/map.jinja
@@ -136,6 +136,9 @@
region: RegionOne
network: {{ compute_network }}
heal_instance_info_cache_interval: '60'
+ message_queue:
+ zmq_linger: 30
+ rpc_response_timeout: 3600
logging:
log_appender: false
log_handlers:
@@ -166,6 +169,9 @@
region: RegionOne
network: {{ compute_network }}
heal_instance_info_cache_interval: '60'
+ message_queue:
+ zmq_linger: 30
+ rpc_response_timeout: 3600
logging:
log_appender: false
log_handlers: