blob: 9f6bd00ed6438b0ebf847578e9a22139905a84ce [file] [log] [blame]
Michael Polenchukf37e5b62018-11-28 17:55:45 +04001{%- from "nova/map.jinja" import compute,compute_driver_mapping with context %}
2[DEFAULT]
3
4#
5# From nova.conf
6#
7
8#
9# Availability zone for internal services.
10#
11# This option determines the availability zone for the various internal nova
12# services, such as 'nova-scheduler', 'nova-conductor', etc.
13#
14# Possible values:
15#
16# * Any string representing an existing availability zone name.
17# (string value)
18#internal_service_availability_zone = internal
19
20#
21# Default availability zone for compute services.
22#
23# This option determines the default availability zone for 'nova-compute'
24# services, which will be used if the service(s) do not belong to aggregates
25# with
26# availability zone metadata.
27#
28# Possible values:
29#
30# * Any string representing an existing availability zone name.
31# (string value)
32#default_availability_zone = nova
33
34#
35# Default availability zone for instances.
36#
37# This option determines the default availability zone for instances, which will
38# be used when a user does not specify one when creating an instance. The
39# instance(s) will be bound to this availability zone for their lifetime.
40#
41# Possible values:
42#
43# * Any string representing an existing availability zone name.
44# * None, which means that the instance can move from one availability zone to
45# another during its lifetime if it is moved from one compute node to another.
46# (string value)
47#default_schedule_zone = <None>
48
49# Length of generated instance admin passwords. (integer value)
50# Minimum value: 0
51#password_length = 12
52
53#
54# Time period to generate instance usages for. It is possible to define optional
55# offset to given period by appending @ character followed by a number defining
56# offset.
57#
58# Possible values:
59#
60# * period, example: ``hour``, ``day``, ``month` or ``year``
61# * period with offset, example: ``month@15`` will result in monthly audits
62# starting on 15th day of month.
63# (string value)
64instance_usage_audit_period = {{ compute.instance_usage_audit_period|default('hour') }}
65
66#
67# Start and use a daemon that can run the commands that need to be run with
68# root privileges. This option is usually enabled on nodes that run nova compute
69# processes.
70# (boolean value)
71#use_rootwrap_daemon = false
72
73#
74# Path to the rootwrap configuration file.
75#
76# Goal of the root wrapper is to allow a service-specific unprivileged user to
77# run a number of actions as the root user in the safest manner possible.
78# The configuration file used here must match the one defined in the sudoers
79# entry.
80# (string value)
81#rootwrap_config = /etc/nova/rootwrap.conf
82
83# Explicitly specify the temporary working directory. (string value)
84#tempdir = <None>
85
86#
87# Defines which driver to use for controlling virtualization.
88#
89# Possible values:
90#
91# * ``libvirt.LibvirtDriver``
92# * ``xenapi.XenAPIDriver``
93# * ``fake.FakeDriver``
94# * ``ironic.IronicDriver``
95# * ``vmwareapi.VMwareVCDriver``
96# * ``hyperv.HyperVDriver``
97# * ``powervm.PowerVMDriver``
98# * ``zvm.ZVMDriver``
99# (string value)
100compute_driver = {{ compute.get('compute_driver', 'libvirt.LibvirtDriver') }}
101
102#
103# Allow destination machine to match source for resize. Useful when
104# testing in single-host environments. By default it is not allowed
105# to resize to the same host. Setting this option to true will add
106# the same host to the destination options. Also set to true
107# if you allow the ServerGroupAffinityFilter and need to resize.
108# (boolean value)
109allow_resize_to_same_host = true
110
111#
112# Image properties that should not be inherited from the instance
113# when taking a snapshot.
114#
115# This option gives an opportunity to select which image-properties
116# should not be inherited by newly created snapshots.
117#
118# Possible values:
119#
120# * A comma-separated list whose item is an image property. Usually only
121# the image properties that are only needed by base images can be included
122# here, since the snapshots that are created from the base images don't
123# need them.
124# * Default list: cache_in_nova, bittorrent, img_signature_hash_method,
125# img_signature, img_signature_key_type,
126# img_signature_certificate_uuid
127#
128# (list value)
129#non_inheritable_image_properties = cache_in_nova,bittorrent,img_signature_hash_method,img_signature,img_signature_key_type,img_signature_certificate_uuid
130
131#
132# Maximum number of devices that will result in a local image being
133# created on the hypervisor node.
134#
135# A negative number means unlimited. Setting max_local_block_devices
136# to 0 means that any request that attempts to create a local disk
137# will fail. This option is meant to limit the number of local discs
138# (so root local disc that is the result of --image being used, and
139# any other ephemeral and swap disks). 0 does not mean that images
140# will be automatically converted to volumes and boot instances from
141# volumes - it just means that all requests that attempt to create a
142# local disk will fail.
143#
144# Possible values:
145#
146# * 0: Creating a local disk is not allowed.
147# * Negative number: Allows unlimited number of local discs.
148# * Positive number: Allows only these many number of local discs.
149# (Default value is 3).
150# (integer value)
151#max_local_block_devices = 3
152
153#
154# A comma-separated list of monitors that can be used for getting
155# compute metrics. You can use the alias/name from the setuptools
156# entry points for nova.compute.monitors.* namespaces. If no
157# namespace is supplied, the "cpu." namespace is assumed for
158# backwards-compatibility.
159#
160# NOTE: Only one monitor per namespace (For example: cpu) can be loaded at
161# a time.
162#
163# Possible values:
164#
165# * An empty list will disable the feature (Default).
166# * An example value that would enable both the CPU and NUMA memory
167# bandwidth monitors that use the virt driver variant:
168#
169# compute_monitors = cpu.virt_driver, numa_mem_bw.virt_driver
170# (list value)
171#compute_monitors =
172
173#
174# The default format an ephemeral_volume will be formatted with on creation.
175#
176# Possible values:
177#
178# * ``ext2``
179# * ``ext3``
180# * ``ext4``
181# * ``xfs``
182# * ``ntfs`` (only for Windows guests)
183# (string value)
184#default_ephemeral_format = <None>
185
186#
187# Determine if instance should boot or fail on VIF plugging timeout.
188#
189# Nova sends a port update to Neutron after an instance has been scheduled,
190# providing Neutron with the necessary information to finish setup of the port.
191# Once completed, Neutron notifies Nova that it has finished setting up the
192# port, at which point Nova resumes the boot of the instance since network
193# connectivity is now supposed to be present. A timeout will occur if the reply
194# is not received after a given interval.
195#
196# This option determines what Nova does when the VIF plugging timeout event
197# happens. When enabled, the instance will error out. When disabled, the
198# instance will continue to boot on the assumption that the port is ready.
199#
200# Possible values:
201#
202# * True: Instances should fail after VIF plugging timeout
203# * False: Instances should continue booting after VIF plugging timeout
204# (boolean value)
205vif_plugging_is_fatal = {{ compute.get('vif_plugging_is_fatal', 'true') }}
206
207#
208# Timeout for Neutron VIF plugging event message arrival.
209#
210# Number of seconds to wait for Neutron vif plugging events to
211# arrive before continuing or failing (see 'vif_plugging_is_fatal').
212#
213# If you are hitting timeout failures at scale, consider running rootwrap
214# in "daemon mode" in the neutron agent via the ``[agent]/root_helper_daemon``
215# neutron configuration option.
216#
217# Related options:
218#
219# * vif_plugging_is_fatal - If ``vif_plugging_timeout`` is set to zero and
220# ``vif_plugging_is_fatal`` is False, events should not be expected to
221# arrive at all.
222# (integer value)
223# Minimum value: 0
224vif_plugging_timeout = {{ compute.get('vif_plugging_timeout', '300') }}
225
226# Path to '/etc/network/interfaces' template.
227#
228# The path to a template file for the '/etc/network/interfaces'-style file,
229# which
230# will be populated by nova and subsequently used by cloudinit. This provides a
231# method to configure network connectivity in environments without a DHCP
232# server.
233#
234# The template will be rendered using Jinja2 template engine, and receive a
235# top-level key called ``interfaces``. This key will contain a list of
236# dictionaries, one for each interface.
237#
238# Refer to the cloudinit documentaion for more information:
239#
240# https://cloudinit.readthedocs.io/en/latest/topics/datasources.html
241#
242# Possible values:
243#
244# * A path to a Jinja2-formatted template for a Debian '/etc/network/interfaces'
245# file. This applies even if using a non Debian-derived guest.
246#
247# Related options:
248#
249# * ``flat_inject``: This must be set to ``True`` to ensure nova embeds network
250# configuration information in the metadata provided through the config drive.
251# (string value)
252#injected_network_template = $pybasedir/nova/virt/interfaces.template
253
254#
255# The image preallocation mode to use.
256#
257# Image preallocation allows storage for instance images to be allocated up
258# front
259# when the instance is initially provisioned. This ensures immediate feedback is
260# given if enough space isn't available. In addition, it should significantly
261# improve performance on writes to new blocks and may even improve I/O
262# performance to prewritten blocks due to reduced fragmentation.
263#
264# Possible values:
265#
266# * "none" => no storage provisioning is done up front
267# * "space" => storage is fully allocated at instance start
268# (string value)
269# Possible values:
270# none - <No description provided>
271# space - <No description provided>
272#preallocate_images = none
273{%- if compute.preallocate_images is defined %}
274preallocate_images = {{ compute.preallocate_images }}
275{%- endif %}
276
277#
278# Enable use of copy-on-write (cow) images.
279#
280# QEMU/KVM allow the use of qcow2 as backing files. By disabling this,
281# backing files will not be used.
282# (boolean value)
283#use_cow_images = true
284{%- if compute.image is defined and compute.image.use_cow is defined %}
285use_cow_images = {{ compute.image.use_cow }}
286{%- endif %}
287
288#
289# Force conversion of backing images to raw format.
290#
291# Possible values:
292#
293# * True: Backing image files will be converted to raw image format
294# * False: Backing image files will not be converted
295#
296# Related options:
297#
298# * ``compute_driver``: Only the libvirt driver uses this option.
299# (boolean value)
300force_raw_images = {{ compute.get('image', {}).get('force_raw', True)|lower }}
301
302#
303# Name of the mkfs commands for ephemeral device.
304#
305# The format is <os_type>=<mkfs command>
306# (multi valued)
307#virt_mkfs =
308
309#
310# Enable resizing of filesystems via a block device.
311#
312# If enabled, attempt to resize the filesystem by accessing the image over a
313# block device. This is done by the host and may not be necessary if the image
314# contains a recent version of cloud-init. Possible mechanisms require the nbd
315# driver (for qcow and raw), or loop (for raw).
316# (boolean value)
317#resize_fs_using_block_device = false
318
319# Amount of time, in seconds, to wait for NBD device start up. (integer value)
320# Minimum value: 0
321#timeout_nbd = 10
Mykyta Karpin5ef9f982019-02-07 18:40:00 +0200322{%- if compute.timeout_nbd is defined %}
323timeout_nbd = {{ compute.timeout_nbd }}
324{%- endif %}
Michael Polenchukf37e5b62018-11-28 17:55:45 +0400325
326#
327# Location of cached images.
328#
329# This is NOT the full path - just a folder name relative to '$instances_path'.
330# For per-compute-host cached images, set to '_base_$my_ip'
331# (string value)
332#image_cache_subdirectory_name = _base
333
334# Should unused base images be removed? (boolean value)
335#remove_unused_base_images = true
336
337#
338# Unused unresized base images younger than this will not be removed.
339# (integer value)
340#remove_unused_original_minimum_age_seconds = 86400
341
342#
343# Generic property to specify the pointer type.
344#
345# Input devices allow interaction with a graphical framebuffer. For
346# example to provide a graphic tablet for absolute cursor movement.
347#
348# If set, the 'hw_pointer_model' image property takes precedence over
349# this configuration option.
350#
351# Possible values:
352#
353# * None: Uses default behavior provided by drivers (mouse on PS2 for
354# libvirt x86)
355# * ps2mouse: Uses relative movement. Mouse connected by PS2
356# * usbtablet: Uses absolute movement. Tablet connect by USB
357#
358# Related options:
359#
360# * usbtablet must be configured with VNC enabled or SPICE enabled and SPICE
361# agent disabled. When used with libvirt the instance mode should be
362# configured as HVM.
363# (string value)
364# Possible values:
365# <None> - <No description provided>
366# ps2mouse - <No description provided>
367# usbtablet - <No description provided>
368#pointer_model = usbtablet
369
370#
371# Defines which physical CPUs (pCPUs) can be used by instance
372# virtual CPUs (vCPUs).
373#
374# Possible values:
375#
376# * A comma-separated list of physical CPU numbers that virtual CPUs can be
377# allocated to by default. Each element should be either a single CPU number,
378# a range of CPU numbers, or a caret followed by a CPU number to be
379# excluded from a previous range. For example::
380#
381# vcpu_pin_set = "4-12,^8,15"
382# (string value)
383#vcpu_pin_set = <None>
384{%- if compute.vcpu_pin_set is defined %}
385vcpu_pin_set = {{ compute.vcpu_pin_set }}
386{%- endif %}
387
388#
389# Number of huge/large memory pages to reserved per NUMA host cell.
390#
391# Possible values:
392#
393# * A list of valid key=value which reflect NUMA node ID, page size
394# (Default unit is KiB) and number of pages to be reserved. For example::
395#
396# reserved_huge_pages = node:0,size:2048,count:64
397# reserved_huge_pages = node:1,size:1GB,count:1
398#
399# In this example we are reserving on NUMA node 0 64 pages of 2MiB
400# and on NUMA node 1 1 page of 1GiB.
401# (dict value)
402#reserved_huge_pages = <None>
403
404#
405# Amount of disk resources in MB to make them always available to host. The
406# disk usage gets reported back to the scheduler from nova-compute running
407# on the compute nodes. To prevent the disk resources from being considered
408# as available, this option can be used to reserve disk space for that host.
409#
410# Possible values:
411#
412# * Any positive integer representing amount of disk in MB to reserve
413# for the host.
414# (integer value)
415# Minimum value: 0
416#reserved_host_disk_mb = 0
417
418#
419# Amount of memory in MB to reserve for the host so that it is always available
420# to host processes. The host resources usage is reported back to the scheduler
421# continuously from nova-compute running on the compute node. To prevent the
422# host
423# memory from being considered as available, this option is used to reserve
424# memory for the host.
425#
426# Possible values:
427#
428# * Any positive integer representing amount of memory in MB to reserve
429# for the host.
430# (integer value)
431# Minimum value: 0
432reserved_host_memory_mb = {{ compute.get('reserved_host_memory_mb', '512') }}
433
434#
435# Number of physical CPUs to reserve for the host. The host resources usage is
436# reported back to the scheduler continuously from nova-compute running on the
437# compute node. To prevent the host CPU from being considered as available,
438# this option is used to reserve random pCPU(s) for the host.
439#
440# Possible values:
441#
442# * Any positive integer representing number of physical CPUs to reserve
443# for the host.
444# (integer value)
445# Minimum value: 0
446#reserved_host_cpus = 0
447
448#
449# This option helps you specify virtual CPU to physical CPU allocation ratio.
450#
451# From Ocata (15.0.0) this is used to influence the hosts selected by
452# the Placement API. Note that when Placement is used, the CoreFilter
453# is redundant, because the Placement API will have already filtered
454# out hosts that would have failed the CoreFilter.
455#
456# This configuration specifies ratio for CoreFilter which can be set
457# per compute node. For AggregateCoreFilter, it will fall back to this
458# configuration value if no per-aggregate setting is found.
459#
460# NOTE: This can be set per-compute, or if set to 0.0, the value
461# set on the scheduler node(s) or compute node(s) will be used
462# and defaulted to 16.0. Once set to a non-default value, it is not possible
463# to "unset" the config to get back to the default behavior. If you want
464# to reset back to the default, explicitly specify 16.0.
465#
466# NOTE: As of the 16.0.0 Pike release, this configuration option is ignored
467# for the ironic.IronicDriver compute driver and is hardcoded to 1.0.
468#
469# Possible values:
470#
471# * Any valid positive integer or float value
472# (floating point value)
473# Minimum value: 0
474{%- if compute.cpu_allocation_ratio is defined %}
475cpu_allocation_ratio = {{ compute.cpu_allocation_ratio }}
476{%- else %}
477#cpu_allocation_ratio = 0.0
478{%- endif %}
479
480#
481# This option helps you specify virtual RAM to physical RAM
482# allocation ratio.
483#
484# From Ocata (15.0.0) this is used to influence the hosts selected by
485# the Placement API. Note that when Placement is used, the RamFilter
486# is redundant, because the Placement API will have already filtered
487# out hosts that would have failed the RamFilter.
488#
489# This configuration specifies ratio for RamFilter which can be set
490# per compute node. For AggregateRamFilter, it will fall back to this
491# configuration value if no per-aggregate setting found.
492#
493# NOTE: This can be set per-compute, or if set to 0.0, the value
494# set on the scheduler node(s) or compute node(s) will be used and
495# defaulted to 1.5. Once set to a non-default value, it is not possible
496# to "unset" the config to get back to the default behavior. If you want
497# to reset back to the default, explicitly specify 1.5.
498#
499# NOTE: As of the 16.0.0 Pike release, this configuration option is ignored
500# for the ironic.IronicDriver compute driver and is hardcoded to 1.0.
501#
502# Possible values:
503#
504# * Any valid positive integer or float value
505# (floating point value)
506# Minimum value: 0
507{%- if compute.ram_allocation_ratio is defined %}
508ram_allocation_ratio = {{ compute.ram_allocation_ratio }}
509{%- else %}
510#ram_allocation_ratio = 0.0
511{%- endif %}
512
513#
514# This option helps you specify virtual disk to physical disk
515# allocation ratio.
516#
517# From Ocata (15.0.0) this is used to influence the hosts selected by
518# the Placement API. Note that when Placement is used, the DiskFilter
519# is redundant, because the Placement API will have already filtered
520# out hosts that would have failed the DiskFilter.
521#
522# A ratio greater than 1.0 will result in over-subscription of the
523# available physical disk, which can be useful for more
524# efficiently packing instances created with images that do not
525# use the entire virtual disk, such as sparse or compressed
526# images. It can be set to a value between 0.0 and 1.0 in order
527# to preserve a percentage of the disk for uses other than
528# instances.
529#
530# NOTE: This can be set per-compute, or if set to 0.0, the value
531# set on the scheduler node(s) or compute node(s) will be used and
532# defaulted to 1.0. Once set to a non-default value, it is not possible
533# to "unset" the config to get back to the default behavior. If you want
534# to reset back to the default, explicitly specify 1.0.
535#
536# NOTE: As of the 16.0.0 Pike release, this configuration option is ignored
537# for the ironic.IronicDriver compute driver and is hardcoded to 1.0.
538#
539# Possible values:
540#
541# * Any valid positive integer or float value
542# (floating point value)
543# Minimum value: 0
Dmitry Ukovca60dd62019-03-05 20:51:10 +0400544{%- if compute.disk_allocation_ratio is defined %}
545disk_allocation_ratio = {{ compute.disk_allocation_ratio }}
546{%- else %}
Michael Polenchukf37e5b62018-11-28 17:55:45 +0400547#disk_allocation_ratio = 0.0
Dmitry Ukovca60dd62019-03-05 20:51:10 +0400548{%- endif %}
Michael Polenchukf37e5b62018-11-28 17:55:45 +0400549
550#
551# Console proxy host to be used to connect to instances on this host. It is the
552# publicly visible name for the console host.
553#
554# Possible values:
555#
556# * Current hostname (default) or any string representing hostname.
557# (string value)
558#console_host = <current_hostname>
559
560#
561# Name of the network to be used to set access IPs for instances. If there are
562# multiple IPs to choose from, an arbitrary one will be chosen.
563#
564# Possible values:
565#
566# * None (default)
567# * Any string representing network name.
568# (string value)
569#default_access_ip_network_name = <None>
570
571#
572# Whether to batch up the application of IPTables rules during a host restart
573# and apply all at the end of the init phase.
574# (boolean value)
575#defer_iptables_apply = false
576
577#
578# Specifies where instances are stored on the hypervisor's disk.
579# It can point to locally attached storage or a directory on NFS.
580#
581# Possible values:
582#
583# * $state_path/instances where state_path is a config option that specifies
584# the top-level directory for maintaining nova's state. (default) or
585# Any string representing directory path.
586# (string value)
587instances_path = {{ compute.instances_path }}
588
589#
590# This option enables periodic compute.instance.exists notifications. Each
591# compute node must be configured to generate system usage data. These
592# notifications are consumed by OpenStack Telemetry service.
593# (boolean value)
594#instance_usage_audit = false
595{% if compute.instance_usage_audit is defined %}
596instance_usage_audit = {{ compute.instance_usage_audit }}
597{%- endif %}
598
599#
600# Maximum number of 1 second retries in live_migration. It specifies number
601# of retries to iptables when it complains. It happens when an user continuously
602# sends live-migration request to same host leading to concurrent request
603# to iptables.
604#
605# Possible values:
606#
607# * Any positive integer representing retry count.
608# (integer value)
609# Minimum value: 0
610#live_migration_retry_count = 30
611
612#
613# This option specifies whether to start guests that were running before the
614# host rebooted. It ensures that all of the instances on a Nova compute node
615# resume their state each time the compute node boots or restarts.
616# (boolean value)
617resume_guests_state_on_host_boot = {{ compute.get('resume_guests_state_on_host_boot', true) }}
618
619#
620# Number of times to retry network allocation. It is required to attempt network
621# allocation retries if the virtual interface plug fails.
622#
623# Possible values:
624#
625# * Any positive integer representing retry count.
626# (integer value)
627# Minimum value: 0
628#network_allocate_retries = 0
629
630#
631# Limits the maximum number of instance builds to run concurrently by
632# nova-compute. Compute service can attempt to build an infinite number of
633# instances, if asked to do so. This limit is enforced to avoid building
634# unlimited instance concurrently on a compute node. This value can be set
635# per compute node.
636#
637# Possible Values:
638#
639# * 0 : treated as unlimited.
640# * Any positive integer representing maximum concurrent builds.
641# (integer value)
642# Minimum value: 0
643#max_concurrent_builds = 10
644
645#
646# Maximum number of live migrations to run concurrently. This limit is enforced
647# to avoid outbound live migrations overwhelming the host/network and causing
648# failures. It is not recommended that you change this unless you are very sure
649# that doing so is safe and stable in your environment.
650#
651# Possible values:
652#
653# * 0 : treated as unlimited.
654# * Negative value defaults to 0.
655# * Any positive integer representing maximum number of live migrations
656# to run concurrently.
657# (integer value)
658#max_concurrent_live_migrations = 1
659{%- if compute.max_concurrent_live_migrations is defined %}
660max_concurrent_live_migrations = {{ compute.max_concurrent_live_migrations }}
661{%- endif %}
662
663#
664# Number of times to retry block device allocation on failures. Starting with
665# Liberty, Cinder can use image volume cache. This may help with block device
666# allocation performance. Look at the cinder image_volume_cache_enabled
667# configuration option.
668#
669# Possible values:
670#
671# * 60 (default)
672# * If value is 0, then one attempt is made.
673# * Any negative value is treated as 0.
674# * For any value > 0, total attempts are (value + 1)
675# (integer value)
676block_device_allocate_retries = {{ compute.get('block_device_allocate_retries', '600') }}
677
678#
679# Number of greenthreads available for use to sync power states.
680#
681# This option can be used to reduce the number of concurrent requests
682# made to the hypervisor or system with real instance power states
683# for performance reasons, for example, with Ironic.
684#
685# Possible values:
686#
687# * Any positive integer representing greenthreads count.
688# (integer value)
689#sync_power_state_pool_size = 1000
690
691#
692# Number of seconds to wait between runs of the image cache manager.
693#
694# Possible values:
695# * 0: run at the default rate.
696# * -1: disable
697# * Any other value
698# (integer value)
699# Minimum value: -1
700#image_cache_manager_interval = 2400
701
702#
703# Interval to pull network bandwidth usage info.
704#
705# Not supported on all hypervisors. If a hypervisor doesn't support bandwidth
706# usage, it will not get the info in the usage events.
707#
708# Possible values:
709#
710# * 0: Will run at the default periodic interval.
711# * Any value < 0: Disables the option.
712# * Any positive integer in seconds.
713# (integer value)
714#bandwidth_poll_interval = 600
715
716#
717# Interval to sync power states between the database and the hypervisor.
718#
719# The interval that Nova checks the actual virtual machine power state
720# and the power state that Nova has in its database. If a user powers
721# down their VM, Nova updates the API to report the VM has been
722# powered down. Should something turn on the VM unexpectedly,
723# Nova will turn the VM back off to keep the system in the expected
724# state.
725#
726# Possible values:
727#
728# * 0: Will run at the default periodic interval.
729# * Any value < 0: Disables the option.
730# * Any positive integer in seconds.
731#
732# Related options:
733#
734# * If ``handle_virt_lifecycle_events`` in workarounds_group is
735# false and this option is negative, then instances that get out
736# of sync between the hypervisor and the Nova database will have
737# to be synchronized manually.
738# (integer value)
739#sync_power_state_interval = 600
740
741#
742# Interval between instance network information cache updates.
743#
744# Number of seconds after which each compute node runs the task of
745# querying Neutron for all of its instances networking information,
746# then updates the Nova db with that information. Nova will never
747# update it's cache if this option is set to 0. If we don't update the
748# cache, the metadata service and nova-api endpoints will be proxying
749# incorrect network data about the instance. So, it is not recommended
750# to set this option to 0.
751#
752# Possible values:
753#
754# * Any positive integer in seconds.
755# * Any value <=0 will disable the sync. This is not recommended.
756# (integer value)
757heal_instance_info_cache_interval = {{ compute.heal_instance_info_cache_interval }}
758
759#
760# Interval for reclaiming deleted instances.
761#
762# A value greater than 0 will enable SOFT_DELETE of instances.
763# This option decides whether the server to be deleted will be put into
764# the SOFT_DELETED state. If this value is greater than 0, the deleted
765# server will not be deleted immediately, instead it will be put into
766# a queue until it's too old (deleted time greater than the value of
767# reclaim_instance_interval). The server can be recovered from the
768# delete queue by using the restore action. If the deleted server remains
769# longer than the value of reclaim_instance_interval, it will be
770# deleted by a periodic task in the compute service automatically.
771#
772# Note that this option is read from both the API and compute nodes, and
773# must be set globally otherwise servers could be put into a soft deleted
774# state in the API and never actually reclaimed (deleted) on the compute
775# node.
776#
777# Possible values:
778#
779# * Any positive integer(in seconds) greater than 0 will enable
780# this option.
781# * Any value <=0 will disable the option.
782# (integer value)
783#reclaim_instance_interval = 0
784{%- if compute.reclaim_instance_interval is defined %}
785reclaim_instance_interval = {{ compute.reclaim_instance_interval }}
786{%- endif %}
787
788#
789# Interval for gathering volume usages.
790#
791# This option updates the volume usage cache for every
792# volume_usage_poll_interval number of seconds.
793#
794# Possible values:
795#
796# * Any positive integer(in seconds) greater than 0 will enable
797# this option.
798# * Any value <=0 will disable the option.
799# (integer value)
800#volume_usage_poll_interval = 0
801
802#
803# Interval for polling shelved instances to offload.
804#
805# The periodic task runs for every shelved_poll_interval number
806# of seconds and checks if there are any shelved instances. If it
807# finds a shelved instance, based on the 'shelved_offload_time' config
808# value it offloads the shelved instances. Check 'shelved_offload_time'
809# config option description for details.
810#
811# Possible values:
812#
813# * Any value <= 0: Disables the option.
814# * Any positive integer in seconds.
815#
816# Related options:
817#
818# * ``shelved_offload_time``
819# (integer value)
820#shelved_poll_interval = 3600
821
822#
823# Time before a shelved instance is eligible for removal from a host.
824#
825# By default this option is set to 0 and the shelved instance will be
826# removed from the hypervisor immediately after shelve operation.
827# Otherwise, the instance will be kept for the value of
828# shelved_offload_time(in seconds) so that during the time period the
829# unshelve action will be faster, then the periodic task will remove
830# the instance from hypervisor after shelved_offload_time passes.
831#
832# Possible values:
833#
834# * 0: Instance will be immediately offloaded after being
835# shelved.
836# * Any value < 0: An instance will never offload.
837# * Any positive integer in seconds: The instance will exist for
838# the specified number of seconds before being offloaded.
839# (integer value)
840#shelved_offload_time = 0
841
842#
843# Interval for retrying failed instance file deletes.
844#
845# This option depends on 'maximum_instance_delete_attempts'.
846# This option specifies how often to retry deletes whereas
847# 'maximum_instance_delete_attempts' specifies the maximum number
848# of retry attempts that can be made.
849#
850# Possible values:
851#
852# * 0: Will run at the default periodic interval.
853# * Any value < 0: Disables the option.
854# * Any positive integer in seconds.
855#
856# Related options:
857#
858# * ``maximum_instance_delete_attempts`` from instance_cleaning_opts
859# group.
860# (integer value)
861#instance_delete_interval = 300
862
863#
864# Interval (in seconds) between block device allocation retries on failures.
865#
866# This option allows the user to specify the time interval between
867# consecutive retries. 'block_device_allocate_retries' option specifies
868# the maximum number of retries.
869#
870# Possible values:
871#
872# * 0: Disables the option.
873# * Any positive integer in seconds enables the option.
874#
875# Related options:
876#
877# * ``block_device_allocate_retries`` in compute_manager_opts group.
878# (integer value)
879# Minimum value: 0
880block_device_allocate_retries_interval = {{ compute.get('block_device_allocate_retries_interval', '10') }}
881
882#
883# Interval between sending the scheduler a list of current instance UUIDs to
884# verify that its view of instances is in sync with nova.
885#
886# If the CONF option 'scheduler_tracks_instance_changes' is
887# False, the sync calls will not be made. So, changing this option will
888# have no effect.
889#
890# If the out of sync situations are not very common, this interval
891# can be increased to lower the number of RPC messages being sent.
892# Likewise, if sync issues turn out to be a problem, the interval
893# can be lowered to check more frequently.
894#
895# Possible values:
896#
897# * 0: Will run at the default periodic interval.
898# * Any value < 0: Disables the option.
899# * Any positive integer in seconds.
900#
901# Related options:
902#
903# * This option has no impact if ``scheduler_tracks_instance_changes``
904# is set to False.
905# (integer value)
906#scheduler_instance_sync_interval = 120
907
908#
909# Interval for updating compute resources.
910#
911# This option specifies how often the update_available_resources
912# periodic task should run. A number less than 0 means to disable the
913# task completely. Leaving this at the default of 0 will cause this to
914# run at the default periodic interval. Setting it to any positive
915# value will cause it to run at approximately that number of seconds.
916#
917# Possible values:
918#
919# * 0: Will run at the default periodic interval.
920# * Any value < 0: Disables the option.
921# * Any positive integer in seconds.
922# (integer value)
923#update_resources_interval = 0
924
925#
926# Time interval after which an instance is hard rebooted automatically.
927#
928# When doing a soft reboot, it is possible that a guest kernel is
929# completely hung in a way that causes the soft reboot task
930# to not ever finish. Setting this option to a time period in seconds
931# will automatically hard reboot an instance if it has been stuck
932# in a rebooting state longer than N seconds.
933#
934# Possible values:
935#
936# * 0: Disables the option (default).
937# * Any positive integer in seconds: Enables the option.
938# (integer value)
939# Minimum value: 0
940#reboot_timeout = 0
941
942#
943# Maximum time in seconds that an instance can take to build.
944#
945# If this timer expires, instance status will be changed to ERROR.
946# Enabling this option will make sure an instance will not be stuck
947# in BUILD state for a longer period.
948#
949# Possible values:
950#
951# * 0: Disables the option (default)
952# * Any positive integer in seconds: Enables the option.
953# (integer value)
954# Minimum value: 0
955#instance_build_timeout = 0
956
957#
958# Interval to wait before un-rescuing an instance stuck in RESCUE.
959#
960# Possible values:
961#
962# * 0: Disables the option (default)
963# * Any positive integer in seconds: Enables the option.
964# (integer value)
965# Minimum value: 0
966#rescue_timeout = 0
967
968#
969# Automatically confirm resizes after N seconds.
970#
971# Resize functionality will save the existing server before resizing.
972# After the resize completes, user is requested to confirm the resize.
973# The user has the opportunity to either confirm or revert all
974# changes. Confirm resize removes the original server and changes
975# server status from resized to active. Setting this option to a time
976# period (in seconds) will automatically confirm the resize if the
977# server is in resized state longer than that time.
978#
979# Possible values:
980#
981# * 0: Disables the option (default)
982# * Any positive integer in seconds: Enables the option.
983# (integer value)
984# Minimum value: 0
985#resize_confirm_window = 0
986
987#
988# Total time to wait in seconds for an instance to perform a clean
989# shutdown.
990#
991# It determines the overall period (in seconds) a VM is allowed to
992# perform a clean shutdown. While performing stop, rescue and shelve,
993# rebuild operations, configuring this option gives the VM a chance
994# to perform a controlled shutdown before the instance is powered off.
995# The default timeout is 60 seconds.
996#
997# The timeout value can be overridden on a per image basis by means
998# of os_shutdown_timeout that is an image metadata setting allowing
999# different types of operating systems to specify how much time they
1000# need to shut down cleanly.
1001#
1002# Possible values:
1003#
1004# * Any positive integer in seconds (default value is 60).
1005# (integer value)
1006# Minimum value: 1
1007#shutdown_timeout = 60
1008
1009#
1010# The compute service periodically checks for instances that have been
1011# deleted in the database but remain running on the compute node. The
1012# above option enables action to be taken when such instances are
1013# identified.
1014#
1015# Possible values:
1016#
1017# * reap: Powers down the instances and deletes them(default)
1018# * log: Logs warning message about deletion of the resource
1019# * shutdown: Powers down instances and marks them as non-
1020# bootable which can be later used for debugging/analysis
1021# * noop: Takes no action
1022#
1023# Related options:
1024#
1025# * running_deleted_instance_poll_interval
1026# * running_deleted_instance_timeout
1027# (string value)
1028# Possible values:
1029# noop - <No description provided>
1030# log - <No description provided>
1031# shutdown - <No description provided>
1032# reap - <No description provided>
1033#running_deleted_instance_action = reap
1034
1035#
1036# Time interval in seconds to wait between runs for the clean up action.
1037# If set to 0, above check will be disabled. If "running_deleted_instance
1038# _action" is set to "log" or "reap", a value greater than 0 must be set.
1039#
1040# Possible values:
1041#
1042# * Any positive integer in seconds enables the option.
1043# * 0: Disables the option.
1044# * 1800: Default value.
1045#
1046# Related options:
1047#
1048# * running_deleted_instance_action
1049# (integer value)
1050#running_deleted_instance_poll_interval = 1800
1051
1052#
1053# Time interval in seconds to wait for the instances that have
1054# been marked as deleted in database to be eligible for cleanup.
1055#
1056# Possible values:
1057#
1058# * Any positive integer in seconds(default is 0).
1059#
1060# Related options:
1061#
1062# * "running_deleted_instance_action"
1063# (integer value)
1064#running_deleted_instance_timeout = 0
1065
1066#
1067# The number of times to attempt to reap an instance's files.
1068#
1069# This option specifies the maximum number of retry attempts
1070# that can be made.
1071#
1072# Possible values:
1073#
1074# * Any positive integer defines how many attempts are made.
1075# * Any value <=0 means no delete attempts occur, but you should use
1076# ``instance_delete_interval`` to disable the delete attempts.
1077#
1078# Related options:
1079#
1080# * ``[DEFAULT] instance_delete_interval`` can be used to disable this option.
1081# (integer value)
1082#maximum_instance_delete_attempts = 5
1083
1084#
1085# Sets the scope of the check for unique instance names.
1086#
1087# The default doesn't check for unique names. If a scope for the name check is
1088# set, a launch of a new instance or an update of an existing instance with a
1089# duplicate name will result in an ''InstanceExists'' error. The uniqueness is
1090# case-insensitive. Setting this option can increase the usability for end
1091# users as they don't have to distinguish among instances with the same name
1092# by their IDs.
1093#
1094# Possible values:
1095#
1096# * '': An empty value means that no uniqueness check is done and duplicate
1097# names are possible.
1098# * "project": The instance name check is done only for instances within the
1099# same project.
1100# * "global": The instance name check is done for all instances regardless of
1101# the project.
1102# (string value)
1103# Possible values:
1104# '' - <No description provided>
1105# project - <No description provided>
1106# global - <No description provided>
1107#osapi_compute_unique_server_name_scope =
1108
1109#
1110# Enable new nova-compute services on this host automatically.
1111#
1112# When a new nova-compute service starts up, it gets
1113# registered in the database as an enabled service. Sometimes it can be useful
1114# to register new compute services in disabled state and then enabled them at a
1115# later point in time. This option only sets this behavior for nova-compute
1116# services, it does not auto-disable other services like nova-conductor,
1117# nova-scheduler, nova-consoleauth, or nova-osapi_compute.
1118#
1119# Possible values:
1120#
1121# * ``True``: Each new compute service is enabled as soon as it registers
1122# itself.
1123# * ``False``: Compute services must be enabled via an os-services REST API call
1124# or with the CLI with ``nova service-enable <hostname> <binary>``, otherwise
1125# they are not ready to use.
1126# (boolean value)
1127#enable_new_services = true
1128
1129#
1130# Template string to be used to generate instance names.
1131#
1132# This template controls the creation of the database name of an instance. This
1133# is *not* the display name you enter when creating an instance (via Horizon
1134# or CLI). For a new deployment it is advisable to change the default value
1135# (which uses the database autoincrement) to another value which makes use
1136# of the attributes of an instance, like ``instance-%(uuid)s``. If you
1137# already have instances in your deployment when you change this, your
1138# deployment will break.
1139#
1140# Possible values:
1141#
1142# * A string which either uses the instance database ID (like the
1143# default)
1144# * A string with a list of named database columns, for example ``%(id)d``
1145# or ``%(uuid)s`` or ``%(hostname)s``.
1146# (string value)
1147#instance_name_template = instance-%08x
1148
1149#
1150# Number of times to retry live-migration before failing.
1151#
1152# Possible values:
1153#
1154# * If == -1, try until out of hosts (default)
1155# * If == 0, only try once, no retries
1156# * Integer greater than 0
1157# (integer value)
1158# Minimum value: -1
1159#migrate_max_retries = -1
1160
1161#
1162# Configuration drive format
1163#
1164# Configuration drive format that will contain metadata attached to the
1165# instance when it boots.
1166#
1167# Possible values:
1168#
1169# * iso9660: A file system image standard that is widely supported across
1170# operating systems. NOTE: Mind the libvirt bug
1171# (https://bugs.launchpad.net/nova/+bug/1246201) - If your hypervisor
1172# driver is libvirt, and you want live migrate to work without shared storage,
1173# then use VFAT.
1174# * vfat: For legacy reasons, you can configure the configuration drive to
1175# use VFAT format instead of ISO 9660.
1176#
1177# Related options:
1178#
1179# * This option is meaningful when one of the following alternatives occur:
1180#
1181# 1. ``force_config_drive`` option set to ``true``
1182# 2. the REST API call to create the instance contains an enable flag for
1183# config drive option
1184# 3. the image used to create the instance requires a config drive,
1185# this is defined by ``img_config_drive`` property for that image.
1186#
1187# * A compute node running Hyper-V hypervisor can be configured to attach
1188# configuration drive as a CD drive. To attach the configuration drive as a CD
1189# drive, set the ``[hyperv] config_drive_cdrom`` option to true.
1190# (string value)
1191# Possible values:
1192# iso9660 - <No description provided>
1193# vfat - <No description provided>
1194config_drive_format = {{ compute.get('config_drive_format', compute.get('config_drive', {}).get('format', 'vfat')) }}
1195
1196#
1197# Force injection to take place on a config drive
1198#
1199# When this option is set to true configuration drive functionality will be
1200# forced enabled by default, otherwise user can still enable configuration
1201# drives via the REST API or image metadata properties.
1202#
1203# Possible values:
1204#
1205# * True: Force to use of configuration drive regardless the user's input in the
1206# REST API call.
1207# * False: Do not force use of configuration drive. Config drives can still be
1208# enabled via the REST API or image metadata properties.
1209#
1210# Related options:
1211#
1212# * Use the 'mkisofs_cmd' flag to set the path where you install the
1213# genisoimage program. If genisoimage is in same path as the
1214# nova-compute service, you do not need to set this flag.
1215# * To use configuration drive with Hyper-V, you must set the
1216# 'mkisofs_cmd' value to the full path to an mkisofs.exe installation.
1217# Additionally, you must set the qemu_img_cmd value in the hyperv
1218# configuration section to the full path to an qemu-img command
1219# installation.
1220# (boolean value)
1221force_config_drive = {{ compute.get('config_drive', {}).get('forced', True)|lower }}
1222
1223#
1224# Name or path of the tool used for ISO image creation
1225#
1226# Use the mkisofs_cmd flag to set the path where you install the genisoimage
1227# program. If genisoimage is on the system path, you do not need to change
1228# the default value.
1229#
1230# To use configuration drive with Hyper-V, you must set the mkisofs_cmd value
1231# to the full path to an mkisofs.exe installation. Additionally, you must set
1232# the qemu_img_cmd value in the hyperv configuration section to the full path
1233# to an qemu-img command installation.
1234#
1235# Possible values:
1236#
1237# * Name of the ISO image creator program, in case it is in the same directory
1238# as the nova-compute service
1239# * Path to ISO image creator program
1240#
1241# Related options:
1242#
1243# * This option is meaningful when config drives are enabled.
1244# * To use configuration drive with Hyper-V, you must set the qemu_img_cmd
1245# value in the hyperv configuration section to the full path to an qemu-img
1246# command installation.
1247# (string value)
1248#mkisofs_cmd = genisoimage
1249
1250# DEPRECATED:
1251# Default flavor to use for the EC2 API only.
1252# The Nova API does not support a default flavor.
1253# (string value)
1254# This option is deprecated for removal since 14.0.0.
1255# Its value may be silently ignored in the future.
1256# Reason: The EC2 API is deprecated.
1257#default_flavor = m1.small
1258
1259#
1260# The IP address which the host is using to connect to the management network.
1261#
1262# Possible values:
1263#
1264# * String with valid IP address. Default is IPv4 address of this host.
1265#
1266# Related options:
1267#
1268# * metadata_host
1269# * my_block_storage_ip
1270# * routing_source_ip
1271# * vpn_ip
1272# (string value)
1273#my_ip = <host_ipv4>
1274{%- if compute.my_ip is defined %}
1275my_ip = {{ compute.my_ip }}
1276{%- endif %}
1277
1278#
1279# The IP address which is used to connect to the block storage network.
1280#
1281# Possible values:
1282#
1283# * String with valid IP address. Default is IP address of this host.
1284#
1285# Related options:
1286#
1287# * my_ip - if my_block_storage_ip is not set, then my_ip value is used.
1288# (string value)
1289#my_block_storage_ip = $my_ip
1290
1291#
1292# Hostname, FQDN or IP address of this host.
1293#
1294# Used as:
1295#
1296# * the oslo.messaging queue name for nova-compute worker
1297# * we use this value for the binding_host sent to neutron. This means if you
1298# use
1299# a neutron agent, it should have the same value for host.
1300# * cinder host attachment information
1301#
1302# Must be valid within AMQP key.
1303#
1304# Possible values:
1305#
1306# * String with hostname, FQDN or IP address. Default is hostname of this host.
1307# (string value)
1308#host = <current_hostname>
1309{%- if compute.host is defined %}
1310host = {{ compute.host }}
1311{%- endif %}
1312
1313# DEPRECATED:
1314# This option is a list of full paths to one or more configuration files for
1315# dhcpbridge. In most cases the default path of '/etc/nova/nova-dhcpbridge.conf'
1316# should be sufficient, but if you have special needs for configuring
1317# dhcpbridge,
1318# you can change or add to this list.
1319#
1320# Possible values
1321#
1322# * A list of strings, where each string is the full path to a dhcpbridge
1323# configuration file.
1324# (multi valued)
1325# This option is deprecated for removal since 16.0.0.
1326# Its value may be silently ignored in the future.
1327# Reason:
1328# nova-network is deprecated, as are any related configuration options.
1329#dhcpbridge_flagfile = /etc/nova/nova-dhcpbridge.conf
1330
1331# DEPRECATED:
1332# The location where the network configuration files will be kept. The default
1333# is
1334# the 'networks' directory off of the location where nova's Python module is
1335# installed.
1336#
1337# Possible values
1338#
1339# * A string containing the full path to the desired configuration directory
1340# (string value)
1341# This option is deprecated for removal since 16.0.0.
1342# Its value may be silently ignored in the future.
1343# Reason:
1344# nova-network is deprecated, as are any related configuration options.
1345#networks_path = $state_path/networks
1346
1347# DEPRECATED:
1348# This is the name of the network interface for public IP addresses. The default
1349# is 'eth0'.
1350#
1351# Possible values:
1352#
1353# * Any string representing a network interface name
1354# (string value)
1355# This option is deprecated for removal since 16.0.0.
1356# Its value may be silently ignored in the future.
1357# Reason:
1358# nova-network is deprecated, as are any related configuration options.
1359#public_interface = eth0
1360
1361# DEPRECATED:
1362# The location of the binary nova-dhcpbridge. By default it is the binary named
1363# 'nova-dhcpbridge' that is installed with all the other nova binaries.
1364#
1365# Possible values:
1366#
1367# * Any string representing the full path to the binary for dhcpbridge
1368# (string value)
1369# This option is deprecated for removal since 16.0.0.
1370# Its value may be silently ignored in the future.
1371# Reason:
1372# nova-network is deprecated, as are any related configuration options.
1373#dhcpbridge = $bindir/nova-dhcpbridge
1374
1375# DEPRECATED:
1376# The public IP address of the network host.
1377#
1378# This is used when creating an SNAT rule.
1379#
1380# Possible values:
1381#
1382# * Any valid IP address
1383#
1384# Related options:
1385#
1386# * ``force_snat_range``
1387# (string value)
1388# This option is deprecated for removal since 16.0.0.
1389# Its value may be silently ignored in the future.
1390# Reason:
1391# nova-network is deprecated, as are any related configuration options.
1392#routing_source_ip = $my_ip
1393
1394# DEPRECATED:
1395# The lifetime of a DHCP lease, in seconds. The default is 86400 (one day).
1396#
1397# Possible values:
1398#
1399# * Any positive integer value.
1400# (integer value)
1401# Minimum value: 1
1402# This option is deprecated for removal since 16.0.0.
1403# Its value may be silently ignored in the future.
1404# Reason:
1405# nova-network is deprecated, as are any related configuration options.
1406#dhcp_lease_time = 86400
1407
1408# DEPRECATED:
1409# Despite the singular form of the name of this option, it is actually a list of
1410# zero or more server addresses that dnsmasq will use for DNS nameservers. If
1411# this is not empty, dnsmasq will not read /etc/resolv.conf, but will only use
1412# the servers specified in this option. If the option use_network_dns_servers is
1413# True, the dns1 and dns2 servers from the network will be appended to this
1414# list,
1415# and will be used as DNS servers, too.
1416#
1417# Possible values:
1418#
1419# * A list of strings, where each string is either an IP address or a FQDN.
1420#
1421# Related options:
1422#
1423# * ``use_network_dns_servers``
1424# (multi valued)
1425# This option is deprecated for removal since 16.0.0.
1426# Its value may be silently ignored in the future.
1427# Reason:
1428# nova-network is deprecated, as are any related configuration options.
1429#dns_server =
1430
1431# DEPRECATED:
1432# When this option is set to True, the dns1 and dns2 servers for the network
1433# specified by the user on boot will be used for DNS, as well as any specified
1434# in
1435# the `dns_server` option.
1436#
1437# Related options:
1438#
1439# * ``dns_server``
1440# (boolean value)
1441# This option is deprecated for removal since 16.0.0.
1442# Its value may be silently ignored in the future.
1443# Reason:
1444# nova-network is deprecated, as are any related configuration options.
1445#use_network_dns_servers = false
1446
1447# DEPRECATED:
1448# This option is a list of zero or more IP address ranges in your network's DMZ
1449# that should be accepted.
1450#
1451# Possible values:
1452#
1453# * A list of strings, each of which should be a valid CIDR.
1454# (list value)
1455# This option is deprecated for removal since 16.0.0.
1456# Its value may be silently ignored in the future.
1457# Reason:
1458# nova-network is deprecated, as are any related configuration options.
1459#dmz_cidr =
1460
1461# DEPRECATED:
1462# This is a list of zero or more IP ranges that traffic from the
1463# `routing_source_ip` will be SNATted to. If the list is empty, then no SNAT
1464# rules are created.
1465#
1466# Possible values:
1467#
1468# * A list of strings, each of which should be a valid CIDR.
1469#
1470# Related options:
1471#
1472# * ``routing_source_ip``
1473# (multi valued)
1474# This option is deprecated for removal since 16.0.0.
1475# Its value may be silently ignored in the future.
1476# Reason:
1477# nova-network is deprecated, as are any related configuration options.
1478#force_snat_range =
1479
1480# DEPRECATED:
1481# The path to the custom dnsmasq configuration file, if any.
1482#
1483# Possible values:
1484#
1485# * The full path to the configuration file, or an empty string if there is no
1486# custom dnsmasq configuration file.
1487# (string value)
1488# This option is deprecated for removal since 16.0.0.
1489# Its value may be silently ignored in the future.
1490# Reason:
1491# nova-network is deprecated, as are any related configuration options.
1492#dnsmasq_config_file =
1493
1494# DEPRECATED:
1495# This is the class used as the ethernet device driver for linuxnet bridge
1496# operations. The default value should be all you need for most cases, but if
1497# you
1498# wish to use a customized class, set this option to the full dot-separated
1499# import path for that class.
1500#
1501# Possible values:
1502#
1503# * Any string representing a dot-separated class path that Nova can import.
1504# (string value)
1505# This option is deprecated for removal since 16.0.0.
1506# Its value may be silently ignored in the future.
1507# Reason:
1508# nova-network is deprecated, as are any related configuration options.
1509#linuxnet_interface_driver = nova.network.linux_net.LinuxBridgeInterfaceDriver
1510
1511# DEPRECATED:
1512# The name of the Open vSwitch bridge that is used with linuxnet when connecting
1513# with Open vSwitch."
1514#
1515# Possible values:
1516#
1517# * Any string representing a valid bridge name.
1518# (string value)
1519# This option is deprecated for removal since 16.0.0.
1520# Its value may be silently ignored in the future.
1521# Reason:
1522# nova-network is deprecated, as are any related configuration options.
1523#linuxnet_ovs_integration_bridge = br-int
1524
1525#
1526# When True, when a device starts up, and upon binding floating IP addresses,
1527# arp
1528# messages will be sent to ensure that the arp caches on the compute hosts are
1529# up-to-date.
1530#
1531# Related options:
1532#
1533# * ``send_arp_for_ha_count``
1534# (boolean value)
1535#send_arp_for_ha = false
1536
1537#
1538# When arp messages are configured to be sent, they will be sent with the count
1539# set to the value of this option. Of course, if this is set to zero, no arp
1540# messages will be sent.
1541#
1542# Possible values:
1543#
1544# * Any integer greater than or equal to 0
1545#
1546# Related options:
1547#
1548# * ``send_arp_for_ha``
1549# (integer value)
1550#send_arp_for_ha_count = 3
1551
1552# DEPRECATED:
1553# When set to True, only the firt nic of a VM will get its default gateway from
1554# the DHCP server.
1555# (boolean value)
1556# This option is deprecated for removal since 16.0.0.
1557# Its value may be silently ignored in the future.
1558# Reason:
1559# nova-network is deprecated, as are any related configuration options.
1560#use_single_default_gateway = false
1561
1562# DEPRECATED:
1563# One or more interfaces that bridges can forward traffic to. If any of the
1564# items
1565# in this list is the special keyword 'all', then all traffic will be forwarded.
1566#
1567# Possible values:
1568#
1569# * A list of zero or more interface names, or the word 'all'.
1570# (multi valued)
1571# This option is deprecated for removal since 16.0.0.
1572# Its value may be silently ignored in the future.
1573# Reason:
1574# nova-network is deprecated, as are any related configuration options.
1575#forward_bridge_interface = all
1576
1577#
1578# This option determines the IP address for the network metadata API server.
1579#
1580# This is really the client side of the metadata host equation that allows
1581# nova-network to find the metadata server when doing a default multi host
1582# networking.
1583#
1584# Possible values:
1585#
1586# * Any valid IP address. The default is the address of the Nova API server.
1587#
1588# Related options:
1589#
1590# * ``metadata_port``
1591# (string value)
1592#metadata_host = $my_ip
1593
1594# DEPRECATED:
1595# This option determines the port used for the metadata API server.
1596#
1597# Related options:
1598#
1599# * ``metadata_host``
1600# (port value)
1601# Minimum value: 0
1602# Maximum value: 65535
1603# This option is deprecated for removal since 16.0.0.
1604# Its value may be silently ignored in the future.
1605# Reason:
1606# nova-network is deprecated, as are any related configuration options.
1607#metadata_port = 8775
1608
1609# DEPRECATED:
1610# This expression, if defined, will select any matching iptables rules and place
1611# them at the top when applying metadata changes to the rules.
1612#
1613# Possible values:
1614#
1615# * Any string representing a valid regular expression, or an empty string
1616#
1617# Related options:
1618#
1619# * ``iptables_bottom_regex``
1620# (string value)
1621# This option is deprecated for removal since 16.0.0.
1622# Its value may be silently ignored in the future.
1623# Reason:
1624# nova-network is deprecated, as are any related configuration options.
1625#iptables_top_regex =
1626
1627# DEPRECATED:
1628# This expression, if defined, will select any matching iptables rules and place
1629# them at the bottom when applying metadata changes to the rules.
1630#
1631# Possible values:
1632#
1633# * Any string representing a valid regular expression, or an empty string
1634#
1635# Related options:
1636#
1637# * iptables_top_regex
1638# (string value)
1639# This option is deprecated for removal since 16.0.0.
1640# Its value may be silently ignored in the future.
1641# Reason:
1642# nova-network is deprecated, as are any related configuration options.
1643#iptables_bottom_regex =
1644
1645# DEPRECATED:
1646# By default, packets that do not pass the firewall are DROPped. In many cases,
1647# though, an operator may find it more useful to change this from DROP to
1648# REJECT,
1649# so that the user issuing those packets may have a better idea as to what's
1650# going on, or LOGDROP in order to record the blocked traffic before DROPping.
1651#
1652# Possible values:
1653#
1654# * A string representing an iptables chain. The default is DROP.
1655# (string value)
1656# This option is deprecated for removal since 16.0.0.
1657# Its value may be silently ignored in the future.
1658# Reason:
1659# nova-network is deprecated, as are any related configuration options.
1660#iptables_drop_action = DROP
1661
1662# DEPRECATED:
1663# This option represents the period of time, in seconds, that the ovs_vsctl
1664# calls
1665# will wait for a response from the database before timing out. A setting of 0
1666# means that the utility should wait forever for a response.
1667#
1668# Possible values:
1669#
1670# * Any positive integer if a limited timeout is desired, or zero if the calls
1671# should wait forever for a response.
1672# (integer value)
1673# Minimum value: 0
1674# This option is deprecated for removal since 16.0.0.
1675# Its value may be silently ignored in the future.
1676# Reason:
1677# nova-network is deprecated, as are any related configuration options.
1678#ovs_vsctl_timeout = 120
1679
1680# DEPRECATED:
1681# This option is used mainly in testing to avoid calls to the underlying network
1682# utilities.
1683# (boolean value)
1684# This option is deprecated for removal since 16.0.0.
1685# Its value may be silently ignored in the future.
1686# Reason:
1687# nova-network is deprecated, as are any related configuration options.
1688#fake_network = false
1689
1690# DEPRECATED:
1691# This option determines the number of times to retry ebtables commands before
1692# giving up. The minimum number of retries is 1.
1693#
1694# Possible values:
1695#
1696# * Any positive integer
1697#
1698# Related options:
1699#
1700# * ``ebtables_retry_interval``
1701# (integer value)
1702# Minimum value: 1
1703# This option is deprecated for removal since 16.0.0.
1704# Its value may be silently ignored in the future.
1705# Reason:
1706# nova-network is deprecated, as are any related configuration options.
1707#ebtables_exec_attempts = 3
1708
1709# DEPRECATED:
1710# This option determines the time, in seconds, that the system will sleep in
1711# between ebtables retries. Note that each successive retry waits a multiple of
1712# this value, so for example, if this is set to the default of 1.0 seconds, and
1713# ebtables_exec_attempts is 4, after the first failure, the system will sleep
1714# for
1715# 1 * 1.0 seconds, after the second failure it will sleep 2 * 1.0 seconds, and
1716# after the third failure it will sleep 3 * 1.0 seconds.
1717#
1718# Possible values:
1719#
1720# * Any non-negative float or integer. Setting this to zero will result in no
1721# waiting between attempts.
1722#
1723# Related options:
1724#
1725# * ebtables_exec_attempts
1726# (floating point value)
1727# This option is deprecated for removal since 16.0.0.
1728# Its value may be silently ignored in the future.
1729# Reason:
1730# nova-network is deprecated, as are any related configuration options.
1731#ebtables_retry_interval = 1.0
1732
1733# DEPRECATED:
1734# Enable neutron as the backend for networking.
1735#
1736# Determine whether to use Neutron or Nova Network as the back end. Set to true
1737# to use neutron.
1738# (boolean value)
1739# This option is deprecated for removal since 15.0.0.
1740# Its value may be silently ignored in the future.
1741# Reason:
1742# nova-network is deprecated, as are any related configuration options.
1743#use_neutron = true
1744
1745#
1746# This option determines whether the network setup information is injected into
1747# the VM before it is booted. While it was originally designed to be used only
1748# by nova-network, it is also used by the vmware and xenapi virt drivers to
1749# control whether network information is injected into a VM. The libvirt virt
1750# driver also uses it when we use config_drive to configure network to control
1751# whether network information is injected into a VM.
1752# (boolean value)
1753#flat_injected = false
1754
1755# DEPRECATED:
1756# This option determines the bridge used for simple network interfaces when no
1757# bridge is specified in the VM creation request.
1758#
1759# Please note that this option is only used when using nova-network instead of
1760# Neutron in your deployment.
1761#
1762# Possible values:
1763#
1764# * Any string representing a valid network bridge, such as 'br100'
1765#
1766# Related options:
1767#
1768# * ``use_neutron``
1769# (string value)
1770# This option is deprecated for removal since 15.0.0.
1771# Its value may be silently ignored in the future.
1772# Reason:
1773# nova-network is deprecated, as are any related configuration options.
1774#flat_network_bridge = <None>
1775
1776# DEPRECATED:
1777# This is the address of the DNS server for a simple network. If this option is
1778# not specified, the default of '8.8.4.4' is used.
1779#
1780# Please note that this option is only used when using nova-network instead of
1781# Neutron in your deployment.
1782#
1783# Possible values:
1784#
1785# * Any valid IP address.
1786#
1787# Related options:
1788#
1789# * ``use_neutron``
1790# (string value)
1791# This option is deprecated for removal since 15.0.0.
1792# Its value may be silently ignored in the future.
1793# Reason:
1794# nova-network is deprecated, as are any related configuration options.
1795#flat_network_dns = 8.8.4.4
1796
1797# DEPRECATED:
1798# This option is the name of the virtual interface of the VM on which the bridge
1799# will be built. While it was originally designed to be used only by
1800# nova-network, it is also used by libvirt for the bridge interface name.
1801#
1802# Possible values:
1803#
1804# * Any valid virtual interface name, such as 'eth0'
1805# (string value)
1806# This option is deprecated for removal since 15.0.0.
1807# Its value may be silently ignored in the future.
1808# Reason:
1809# nova-network is deprecated, as are any related configuration options.
1810#flat_interface = <None>
1811
1812# DEPRECATED:
1813# This is the VLAN number used for private networks. Note that the when creating
1814# the networks, if the specified number has already been assigned, nova-network
1815# will increment this number until it finds an available VLAN.
1816#
1817# Please note that this option is only used when using nova-network instead of
1818# Neutron in your deployment. It also will be ignored if the configuration
1819# option
1820# for `network_manager` is not set to the default of
1821# 'nova.network.manager.VlanManager'.
1822#
1823# Possible values:
1824#
1825# * Any integer between 1 and 4094. Values outside of that range will raise a
1826# ValueError exception.
1827#
1828# Related options:
1829#
1830# * ``network_manager``
1831# * ``use_neutron``
1832# (integer value)
1833# Minimum value: 1
1834# Maximum value: 4094
1835# This option is deprecated for removal since 15.0.0.
1836# Its value may be silently ignored in the future.
1837# Reason:
1838# nova-network is deprecated, as are any related configuration options.
1839#vlan_start = 100
1840
1841# DEPRECATED:
1842# This option is the name of the virtual interface of the VM on which the VLAN
1843# bridge will be built. While it was originally designed to be used only by
1844# nova-network, it is also used by libvirt and xenapi for the bridge interface
1845# name.
1846#
1847# Please note that this setting will be ignored in nova-network if the
1848# configuration option for `network_manager` is not set to the default of
1849# 'nova.network.manager.VlanManager'.
1850#
1851# Possible values:
1852#
1853# * Any valid virtual interface name, such as 'eth0'
1854# (string value)
1855# This option is deprecated for removal since 15.0.0.
1856# Its value may be silently ignored in the future.
1857# Reason:
1858# nova-network is deprecated, as are any related configuration options. While
1859# this option has an effect when using neutron, it incorrectly override the
1860# value
1861# provided by neutron and should therefore not be used.
1862#vlan_interface = <None>
1863
1864# DEPRECATED:
1865# This option represents the number of networks to create if not explicitly
1866# specified when the network is created. The only time this is used is if a CIDR
1867# is specified, but an explicit network_size is not. In that case, the subnets
1868# are created by diving the IP address space of the CIDR by num_networks. The
1869# resulting subnet sizes cannot be larger than the configuration option
1870# `network_size`; in that event, they are reduced to `network_size`, and a
1871# warning is logged.
1872#
1873# Please note that this option is only used when using nova-network instead of
1874# Neutron in your deployment.
1875#
1876# Possible values:
1877#
1878# * Any positive integer is technically valid, although there are practical
1879# limits based upon available IP address space and virtual interfaces.
1880#
1881# Related options:
1882#
1883# * ``use_neutron``
1884# * ``network_size``
1885# (integer value)
1886# Minimum value: 1
1887# This option is deprecated for removal since 15.0.0.
1888# Its value may be silently ignored in the future.
1889# Reason:
1890# nova-network is deprecated, as are any related configuration options.
1891#num_networks = 1
1892
1893# DEPRECATED:
1894# This option is no longer used since the /os-cloudpipe API was removed in the
1895# 16.0.0 Pike release. This is the public IP address for the cloudpipe VPN
1896# servers. It defaults to the IP address of the host.
1897#
1898# Please note that this option is only used when using nova-network instead of
1899# Neutron in your deployment. It also will be ignored if the configuration
1900# option
1901# for `network_manager` is not set to the default of
1902# 'nova.network.manager.VlanManager'.
1903#
1904# Possible values:
1905#
1906# * Any valid IP address. The default is ``$my_ip``, the IP address of the VM.
1907#
1908# Related options:
1909#
1910# * ``network_manager``
1911# * ``use_neutron``
1912# * ``vpn_start``
1913# (string value)
1914# This option is deprecated for removal since 15.0.0.
1915# Its value may be silently ignored in the future.
1916# Reason:
1917# nova-network is deprecated, as are any related configuration options.
1918#vpn_ip = $my_ip
1919
1920# DEPRECATED:
1921# This is the port number to use as the first VPN port for private networks.
1922#
1923# Please note that this option is only used when using nova-network instead of
1924# Neutron in your deployment. It also will be ignored if the configuration
1925# option
1926# for `network_manager` is not set to the default of
1927# 'nova.network.manager.VlanManager', or if you specify a value the 'vpn_start'
1928# parameter when creating a network.
1929#
1930# Possible values:
1931#
1932# * Any integer representing a valid port number. The default is 1000.
1933#
1934# Related options:
1935#
1936# * ``use_neutron``
1937# * ``vpn_ip``
1938# * ``network_manager``
1939# (port value)
1940# Minimum value: 0
1941# Maximum value: 65535
1942# This option is deprecated for removal since 15.0.0.
1943# Its value may be silently ignored in the future.
1944# Reason:
1945# nova-network is deprecated, as are any related configuration options.
1946#vpn_start = 1000
1947
1948# DEPRECATED:
1949# This option determines the number of addresses in each private subnet.
1950#
1951# Please note that this option is only used when using nova-network instead of
1952# Neutron in your deployment.
1953#
1954# Possible values:
1955#
1956# * Any positive integer that is less than or equal to the available network
1957# size. Note that if you are creating multiple networks, they must all fit in
1958# the available IP address space. The default is 256.
1959#
1960# Related options:
1961#
1962# * ``use_neutron``
1963# * ``num_networks``
1964# (integer value)
1965# Minimum value: 1
1966# This option is deprecated for removal since 15.0.0.
1967# Its value may be silently ignored in the future.
1968# Reason:
1969# nova-network is deprecated, as are any related configuration options.
1970#network_size = 256
1971
1972# DEPRECATED:
1973# This option determines the fixed IPv6 address block when creating a network.
1974#
1975# Please note that this option is only used when using nova-network instead of
1976# Neutron in your deployment.
1977#
1978# Possible values:
1979#
1980# * Any valid IPv6 CIDR
1981#
1982# Related options:
1983#
1984# * ``use_neutron``
1985# (string value)
1986# This option is deprecated for removal since 15.0.0.
1987# Its value may be silently ignored in the future.
1988# Reason:
1989# nova-network is deprecated, as are any related configuration options.
1990#fixed_range_v6 = fd00::/48
1991
1992# DEPRECATED:
1993# This is the default IPv4 gateway. It is used only in the testing suite.
1994#
1995# Please note that this option is only used when using nova-network instead of
1996# Neutron in your deployment.
1997#
1998# Possible values:
1999#
2000# * Any valid IP address.
2001#
2002# Related options:
2003#
2004# * ``use_neutron``
2005# * ``gateway_v6``
2006# (string value)
2007# This option is deprecated for removal since 15.0.0.
2008# Its value may be silently ignored in the future.
2009# Reason:
2010# nova-network is deprecated, as are any related configuration options.
2011#gateway = <None>
2012
2013# DEPRECATED:
2014# This is the default IPv6 gateway. It is used only in the testing suite.
2015#
2016# Please note that this option is only used when using nova-network instead of
2017# Neutron in your deployment.
2018#
2019# Possible values:
2020#
2021# * Any valid IP address.
2022#
2023# Related options:
2024#
2025# * ``use_neutron``
2026# * ``gateway``
2027# (string value)
2028# This option is deprecated for removal since 15.0.0.
2029# Its value may be silently ignored in the future.
2030# Reason:
2031# nova-network is deprecated, as are any related configuration options.
2032#gateway_v6 = <None>
2033
2034# DEPRECATED:
2035# This option represents the number of IP addresses to reserve at the top of the
2036# address range for VPN clients. It also will be ignored if the configuration
2037# option for `network_manager` is not set to the default of
2038# 'nova.network.manager.VlanManager'.
2039#
2040# Possible values:
2041#
2042# * Any integer, 0 or greater.
2043#
2044# Related options:
2045#
2046# * ``use_neutron``
2047# * ``network_manager``
2048# (integer value)
2049# Minimum value: 0
2050# This option is deprecated for removal since 15.0.0.
2051# Its value may be silently ignored in the future.
2052# Reason:
2053# nova-network is deprecated, as are any related configuration options.
2054#cnt_vpn_clients = 0
2055
2056# DEPRECATED:
2057# This is the number of seconds to wait before disassociating a deallocated
2058# fixed
2059# IP address. This is only used with the nova-network service, and has no effect
2060# when using neutron for networking.
2061#
2062# Possible values:
2063#
2064# * Any integer, zero or greater.
2065#
2066# Related options:
2067#
2068# * ``use_neutron``
2069# (integer value)
2070# Minimum value: 0
2071# This option is deprecated for removal since 15.0.0.
2072# Its value may be silently ignored in the future.
2073# Reason:
2074# nova-network is deprecated, as are any related configuration options.
2075#fixed_ip_disassociate_timeout = 600
2076
2077# DEPRECATED:
2078# This option determines how many times nova-network will attempt to create a
2079# unique MAC address before giving up and raising a
2080# `VirtualInterfaceMacAddressException` error.
2081#
2082# Possible values:
2083#
2084# * Any positive integer. The default is 5.
2085#
2086# Related options:
2087#
2088# * ``use_neutron``
2089# (integer value)
2090# Minimum value: 1
2091# This option is deprecated for removal since 15.0.0.
2092# Its value may be silently ignored in the future.
2093# Reason:
2094# nova-network is deprecated, as are any related configuration options.
2095#create_unique_mac_address_attempts = 5
2096
2097# DEPRECATED:
2098# Determines whether unused gateway devices, both VLAN and bridge, are deleted
2099# if
2100# the network is in nova-network VLAN mode and is multi-hosted.
2101#
2102# Related options:
2103#
2104# * ``use_neutron``
2105# * ``vpn_ip``
2106# * ``fake_network``
2107# (boolean value)
2108# This option is deprecated for removal since 15.0.0.
2109# Its value may be silently ignored in the future.
2110# Reason:
2111# nova-network is deprecated, as are any related configuration options.
2112#teardown_unused_network_gateway = false
2113
2114# DEPRECATED:
2115# When this option is True, a call is made to release the DHCP for the instance
2116# when that instance is terminated.
2117#
2118# Related options:
2119#
2120# * ``use_neutron``
2121# (boolean value)
2122# This option is deprecated for removal since 15.0.0.
2123# Its value may be silently ignored in the future.
2124# Reason:
2125# nova-network is deprecated, as are any related configuration options.
2126force_dhcp_release = {{ compute.get('force_dhcp_release', 'true') }}
2127
2128# DEPRECATED:
2129# When this option is True, whenever a DNS entry must be updated, a fanout cast
2130# message is sent to all network hosts to update their DNS entries in multi-host
2131# mode.
2132#
2133# Related options:
2134#
2135# * ``use_neutron``
2136# (boolean value)
2137# This option is deprecated for removal since 15.0.0.
2138# Its value may be silently ignored in the future.
2139# Reason:
2140# nova-network is deprecated, as are any related configuration options.
2141#update_dns_entries = false
2142
2143# DEPRECATED:
2144# This option determines the time, in seconds, to wait between refreshing DNS
2145# entries for the network.
2146#
2147# Possible values:
2148#
2149# * A positive integer
2150# * -1 to disable updates
2151#
2152# Related options:
2153#
2154# * ``use_neutron``
2155# (integer value)
2156# Minimum value: -1
2157# This option is deprecated for removal since 15.0.0.
2158# Its value may be silently ignored in the future.
2159# Reason:
2160# nova-network is deprecated, as are any related configuration options.
2161#dns_update_periodic_interval = -1
2162
2163# DEPRECATED:
2164# This option allows you to specify the domain for the DHCP server.
2165#
2166# Possible values:
2167#
2168# * Any string that is a valid domain name.
2169#
2170# Related options:
2171#
2172# * ``use_neutron``
2173# (string value)
2174# This option is deprecated for removal since 15.0.0.
2175# Its value may be silently ignored in the future.
2176# Reason:
2177# nova-network is deprecated, as are any related configuration options.
2178dhcp_domain = {{ compute.get('dhcp_domain', 'novalocal') }}
2179
2180# DEPRECATED:
2181# This option allows you to specify the L3 management library to be used.
2182#
2183# Possible values:
2184#
2185# * Any dot-separated string that represents the import path to an L3 networking
2186# library.
2187#
2188# Related options:
2189#
2190# * ``use_neutron``
2191# (string value)
2192# This option is deprecated for removal since 15.0.0.
2193# Its value may be silently ignored in the future.
2194# Reason:
2195# nova-network is deprecated, as are any related configuration options.
2196#l3_lib = nova.network.l3.LinuxNetL3
2197
2198# DEPRECATED:
2199# THIS VALUE SHOULD BE SET WHEN CREATING THE NETWORK.
2200#
2201# If True in multi_host mode, all compute hosts share the same dhcp address. The
2202# same IP address used for DHCP will be added on each nova-network node which is
2203# only visible to the VMs on the same host.
2204#
2205# The use of this configuration has been deprecated and may be removed in any
2206# release after Mitaka. It is recommended that instead of relying on this
2207# option,
2208# an explicit value should be passed to 'create_networks()' as a keyword
2209# argument
2210# with the name 'share_address'.
2211# (boolean value)
2212# This option is deprecated for removal since 2014.2.
2213# Its value may be silently ignored in the future.
2214#share_dhcp_address = false
2215
2216# DEPRECATED:
2217# URL for LDAP server which will store DNS entries
2218#
2219# Possible values:
2220#
2221# * A valid LDAP URL representing the server
2222# (uri value)
2223# This option is deprecated for removal since 16.0.0.
2224# Its value may be silently ignored in the future.
2225# Reason:
2226# nova-network is deprecated, as are any related configuration options.
2227#ldap_dns_url = ldap://ldap.example.com:389
2228
2229# DEPRECATED: Bind user for LDAP server (string value)
2230# This option is deprecated for removal since 16.0.0.
2231# Its value may be silently ignored in the future.
2232# Reason:
2233# nova-network is deprecated, as are any related configuration options.
2234#ldap_dns_user = uid=admin,ou=people,dc=example,dc=org
2235
2236# DEPRECATED: Bind user's password for LDAP server (string value)
2237# This option is deprecated for removal since 16.0.0.
2238# Its value may be silently ignored in the future.
2239# Reason:
2240# nova-network is deprecated, as are any related configuration options.
2241#ldap_dns_password = password
2242
2243# DEPRECATED:
2244# Hostmaster for LDAP DNS driver Statement of Authority
2245#
2246# Possible values:
2247#
2248# * Any valid string representing LDAP DNS hostmaster.
2249# (string value)
2250# This option is deprecated for removal since 16.0.0.
2251# Its value may be silently ignored in the future.
2252# Reason:
2253# nova-network is deprecated, as are any related configuration options.
2254#ldap_dns_soa_hostmaster = hostmaster@example.org
2255
2256# DEPRECATED:
2257# DNS Servers for LDAP DNS driver
2258#
2259# Possible values:
2260#
2261# * A valid URL representing a DNS server
2262# (multi valued)
2263# This option is deprecated for removal since 16.0.0.
2264# Its value may be silently ignored in the future.
2265# Reason:
2266# nova-network is deprecated, as are any related configuration options.
2267#ldap_dns_servers = dns.example.org
2268
2269# DEPRECATED:
2270# Base distinguished name for the LDAP search query
2271#
2272# This option helps to decide where to look up the host in LDAP.
2273# (string value)
2274# This option is deprecated for removal since 16.0.0.
2275# Its value may be silently ignored in the future.
2276# Reason:
2277# nova-network is deprecated, as are any related configuration options.
2278#ldap_dns_base_dn = ou=hosts,dc=example,dc=org
2279
2280# DEPRECATED:
2281# Refresh interval (in seconds) for LDAP DNS driver Start of Authority
2282#
2283# Time interval, a secondary/slave DNS server waits before requesting for
2284# primary DNS server's current SOA record. If the records are different,
2285# secondary DNS server will request a zone transfer from primary.
2286#
2287# NOTE: Lower values would cause more traffic.
2288# (integer value)
2289# This option is deprecated for removal since 16.0.0.
2290# Its value may be silently ignored in the future.
2291# Reason:
2292# nova-network is deprecated, as are any related configuration options.
2293#ldap_dns_soa_refresh = 1800
2294
2295# DEPRECATED:
2296# Retry interval (in seconds) for LDAP DNS driver Start of Authority
2297#
2298# Time interval, a secondary/slave DNS server should wait, if an
2299# attempt to transfer zone failed during the previous refresh interval.
2300# (integer value)
2301# This option is deprecated for removal since 16.0.0.
2302# Its value may be silently ignored in the future.
2303# Reason:
2304# nova-network is deprecated, as are any related configuration options.
2305#ldap_dns_soa_retry = 3600
2306
2307# DEPRECATED:
2308# Expiry interval (in seconds) for LDAP DNS driver Start of Authority
2309#
2310# Time interval, a secondary/slave DNS server holds the information
2311# before it is no longer considered authoritative.
2312# (integer value)
2313# This option is deprecated for removal since 16.0.0.
2314# Its value may be silently ignored in the future.
2315# Reason:
2316# nova-network is deprecated, as are any related configuration options.
2317#ldap_dns_soa_expiry = 86400
2318
2319# DEPRECATED:
2320# Minimum interval (in seconds) for LDAP DNS driver Start of Authority
2321#
2322# It is Minimum time-to-live applies for all resource records in the
2323# zone file. This value is supplied to other servers how long they
2324# should keep the data in cache.
2325# (integer value)
2326# This option is deprecated for removal since 16.0.0.
2327# Its value may be silently ignored in the future.
2328# Reason:
2329# nova-network is deprecated, as are any related configuration options.
2330#ldap_dns_soa_minimum = 7200
2331
2332# DEPRECATED:
2333# Default value for multi_host in networks.
2334#
2335# nova-network service can operate in a multi-host or single-host mode.
2336# In multi-host mode each compute node runs a copy of nova-network and the
2337# instances on that compute node use the compute node as a gateway to the
2338# Internet. Where as in single-host mode, a central server runs the nova-network
2339# service. All compute nodes forward traffic from the instances to the
2340# cloud controller which then forwards traffic to the Internet.
2341#
2342# If this options is set to true, some rpc network calls will be sent directly
2343# to host.
2344#
2345# Note that this option is only used when using nova-network instead of
2346# Neutron in your deployment.
2347#
2348# Related options:
2349#
2350# * ``use_neutron``
2351# (boolean value)
2352# This option is deprecated for removal since 15.0.0.
2353# Its value may be silently ignored in the future.
2354# Reason:
2355# nova-network is deprecated, as are any related configuration options.
2356#multi_host = false
2357
2358# DEPRECATED:
2359# Driver to use for network creation.
2360#
2361# Network driver initializes (creates bridges and so on) only when the
2362# first VM lands on a host node. All network managers configure the
2363# network using network drivers. The driver is not tied to any particular
2364# network manager.
2365#
2366# The default Linux driver implements vlans, bridges, and iptables rules
2367# using linux utilities.
2368#
2369# Note that this option is only used when using nova-network instead
2370# of Neutron in your deployment.
2371#
2372# Related options:
2373#
2374# * ``use_neutron``
2375# (string value)
2376# This option is deprecated for removal since 15.0.0.
2377# Its value may be silently ignored in the future.
2378# Reason:
2379# nova-network is deprecated, as are any related configuration options.
2380#network_driver = nova.network.linux_net
2381
2382# DEPRECATED:
2383# Firewall driver to use with ``nova-network`` service.
2384#
2385# This option only applies when using the ``nova-network`` service. When using
2386# another networking services, such as Neutron, this should be to set to the
2387# ``nova.virt.firewall.NoopFirewallDriver``.
2388#
2389# Possible values:
2390#
2391# * ``nova.virt.firewall.IptablesFirewallDriver``
2392# * ``nova.virt.firewall.NoopFirewallDriver``
2393# * ``nova.virt.libvirt.firewall.IptablesFirewallDriver``
2394# * [...]
2395#
2396# Related options:
2397#
2398# * ``use_neutron``: This must be set to ``False`` to enable ``nova-network``
2399# networking
2400# (string value)
2401# This option is deprecated for removal since 16.0.0.
2402# Its value may be silently ignored in the future.
2403# Reason:
2404# nova-network is deprecated, as are any related configuration options.
2405#firewall_driver = nova.virt.firewall.NoopFirewallDriver
2406
2407# DEPRECATED:
2408# Determine whether to allow network traffic from same network.
2409#
2410# When set to true, hosts on the same subnet are not filtered and are allowed
2411# to pass all types of traffic between them. On a flat network, this allows
2412# all instances from all projects unfiltered communication. With VLAN
2413# networking, this allows access between instances within the same project.
2414#
2415# This option only applies when using the ``nova-network`` service. When using
2416# another networking services, such as Neutron, security groups or other
2417# approaches should be used.
2418#
2419# Possible values:
2420#
2421# * True: Network traffic should be allowed pass between all instances on the
2422# same network, regardless of their tenant and security policies
2423# * False: Network traffic should not be allowed pass between instances unless
2424# it is unblocked in a security group
2425#
2426# Related options:
2427#
2428# * ``use_neutron``: This must be set to ``False`` to enable ``nova-network``
2429# networking
2430# * ``firewall_driver``: This must be set to
2431# ``nova.virt.libvirt.firewall.IptablesFirewallDriver`` to ensure the
2432# libvirt firewall driver is enabled.
2433# (boolean value)
2434# This option is deprecated for removal since 16.0.0.
2435# Its value may be silently ignored in the future.
2436# Reason:
2437# nova-network is deprecated, as are any related configuration options.
2438#allow_same_net_traffic = true
2439
2440# DEPRECATED:
2441# Default pool for floating IPs.
2442#
2443# This option specifies the default floating IP pool for allocating floating
2444# IPs.
2445#
2446# While allocating a floating ip, users can optionally pass in the name of the
2447# pool they want to allocate from, otherwise it will be pulled from the
2448# default pool.
2449#
2450# If this option is not set, then 'nova' is used as default floating pool.
2451#
2452# Possible values:
2453#
2454# * Any string representing a floating IP pool name
2455# (string value)
2456# This option is deprecated for removal since 16.0.0.
2457# Its value may be silently ignored in the future.
2458# Reason:
2459# This option was used for two purposes: to set the floating IP pool name for
2460# nova-network and to do the same for neutron. nova-network is deprecated, as
2461# are
2462# any related configuration options. Users of neutron, meanwhile, should use the
2463# 'default_floating_pool' option in the '[neutron]' group.
2464#default_floating_pool = nova
2465
2466# DEPRECATED:
2467# Autoassigning floating IP to VM
2468#
2469# When set to True, floating IP is auto allocated and associated
2470# to the VM upon creation.
2471#
2472# Related options:
2473#
2474# * use_neutron: this options only works with nova-network.
2475# (boolean value)
2476# This option is deprecated for removal since 15.0.0.
2477# Its value may be silently ignored in the future.
2478# Reason:
2479# nova-network is deprecated, as are any related configuration options.
2480#auto_assign_floating_ip = false
2481
2482# DEPRECATED:
2483# Full class name for the DNS Manager for floating IPs.
2484#
2485# This option specifies the class of the driver that provides functionality
2486# to manage DNS entries associated with floating IPs.
2487#
2488# When a user adds a DNS entry for a specified domain to a floating IP,
2489# nova will add a DNS entry using the specified floating DNS driver.
2490# When a floating IP is deallocated, its DNS entry will automatically be
2491# deleted.
2492#
2493# Possible values:
2494#
2495# * Full Python path to the class to be used
2496#
2497# Related options:
2498#
2499# * use_neutron: this options only works with nova-network.
2500# (string value)
2501# This option is deprecated for removal since 15.0.0.
2502# Its value may be silently ignored in the future.
2503# Reason:
2504# nova-network is deprecated, as are any related configuration options.
2505#floating_ip_dns_manager = nova.network.noop_dns_driver.NoopDNSDriver
2506
2507# DEPRECATED:
2508# Full class name for the DNS Manager for instance IPs.
2509#
2510# This option specifies the class of the driver that provides functionality
2511# to manage DNS entries for instances.
2512#
2513# On instance creation, nova will add DNS entries for the instance name and
2514# id, using the specified instance DNS driver and domain. On instance deletion,
2515# nova will remove the DNS entries.
2516#
2517# Possible values:
2518#
2519# * Full Python path to the class to be used
2520#
2521# Related options:
2522#
2523# * use_neutron: this options only works with nova-network.
2524# (string value)
2525# This option is deprecated for removal since 15.0.0.
2526# Its value may be silently ignored in the future.
2527# Reason:
2528# nova-network is deprecated, as are any related configuration options.
2529#instance_dns_manager = nova.network.noop_dns_driver.NoopDNSDriver
2530
2531# DEPRECATED:
2532# If specified, Nova checks if the availability_zone of every instance matches
2533# what the database says the availability_zone should be for the specified
2534# dns_domain.
2535#
2536# Related options:
2537#
2538# * use_neutron: this options only works with nova-network.
2539# (string value)
2540# This option is deprecated for removal since 15.0.0.
2541# Its value may be silently ignored in the future.
2542# Reason:
2543# nova-network is deprecated, as are any related configuration options.
2544#instance_dns_domain =
2545
2546# DEPRECATED:
2547# Assign IPv6 and IPv4 addresses when creating instances.
2548#
2549# Related options:
2550#
2551# * use_neutron: this only works with nova-network.
2552# (boolean value)
2553# This option is deprecated for removal since 16.0.0.
2554# Its value may be silently ignored in the future.
2555# Reason:
2556# nova-network is deprecated, as are any related configuration options.
2557#use_ipv6 = false
2558
2559# DEPRECATED:
2560# Abstracts out IPv6 address generation to pluggable backends.
2561#
2562# nova-network can be put into dual-stack mode, so that it uses
2563# both IPv4 and IPv6 addresses. In dual-stack mode, by default, instances
2564# acquire IPv6 global unicast addresses with the help of stateless address
2565# auto-configuration mechanism.
2566#
2567# Related options:
2568#
2569# * use_neutron: this option only works with nova-network.
2570# * use_ipv6: this option only works if ipv6 is enabled for nova-network.
2571# (string value)
2572# Possible values:
2573# rfc2462 - <No description provided>
2574# account_identifier - <No description provided>
2575# This option is deprecated for removal since 16.0.0.
2576# Its value may be silently ignored in the future.
2577# Reason:
2578# nova-network is deprecated, as are any related configuration options.
2579#ipv6_backend = rfc2462
2580
2581# DEPRECATED:
2582# This option is used to enable or disable quota checking for tenant networks.
2583#
2584# Related options:
2585#
2586# * quota_networks
2587# (boolean value)
2588# This option is deprecated for removal since 14.0.0.
2589# Its value may be silently ignored in the future.
2590# Reason:
2591# CRUD operations on tenant networks are only available when using nova-network
2592# and nova-network is itself deprecated.
2593#enable_network_quota = false
2594
2595# DEPRECATED:
2596# This option controls the number of private networks that can be created per
2597# project (or per tenant).
2598#
2599# Related options:
2600#
2601# * enable_network_quota
2602# (integer value)
2603# Minimum value: 0
2604# This option is deprecated for removal since 14.0.0.
2605# Its value may be silently ignored in the future.
2606# Reason:
2607# CRUD operations on tenant networks are only available when using nova-network
2608# and nova-network is itself deprecated.
2609#quota_networks = 3
2610
2611# DEPRECATED: Full class name for the Manager for network (string value)
2612# Possible values:
2613# nova.network.manager.FlatManager - <No description provided>
2614# nova.network.manager.FlatDHCPManager - <No description provided>
2615# nova.network.manager.VlanManager - <No description provided>
2616# This option is deprecated for removal since 18.0.0.
2617# Its value may be silently ignored in the future.
2618# Reason:
2619# nova-network is deprecated, as are any related configuration options.
2620#network_manager = nova.network.manager.VlanManager
2621
2622#
2623# Filename that will be used for storing websocket frames received
2624# and sent by a proxy service (like VNC, spice, serial) running on this host.
2625# If this is not set, no recording will be done.
2626# (string value)
2627#record = <None>
2628
2629# Run as a background process. (boolean value)
2630#daemon = false
2631
2632# Disallow non-encrypted connections. (boolean value)
2633#ssl_only = false
2634
2635# Set to True if source host is addressed with IPv6. (boolean value)
2636#source_is_ipv6 = false
2637
2638# Path to SSL certificate file. (string value)
2639#cert = self.pem
2640
2641# SSL key file (if separate from cert). (string value)
2642#key = <None>
2643
2644#
2645# Path to directory with content which will be served by a web server.
2646# (string value)
2647#web = /usr/share/spice-html5
2648
2649#
2650# The directory where the Nova python modules are installed.
2651#
2652# This directory is used to store template files for networking and remote
2653# console access. It is also the default path for other config options which
2654# need to persist Nova internal data. It is very unlikely that you need to
2655# change this option from its default value.
2656#
2657# Possible values:
2658#
2659# * The full path to a directory.
2660#
2661# Related options:
2662#
2663# * ``state_path``
2664# (string value)
2665#pybasedir = <Path>
2666
2667#
2668# The directory where the Nova binaries are installed.
2669#
2670# This option is only relevant if the networking capabilities from Nova are
2671# used (see services below). Nova's networking capabilities are targeted to
2672# be fully replaced by Neutron in the future. It is very unlikely that you need
2673# to change this option from its default value.
2674#
2675# Possible values:
2676#
2677# * The full path to a directory.
2678# (string value)
2679#bindir = /tmp/nova/.tox/shared/local/bin
2680
2681#
2682# The top-level directory for maintaining Nova's state.
2683#
2684# This directory is used to store Nova's internal state. It is used by a
2685# variety of other config options which derive from this. In some scenarios
2686# (for example migrations) it makes sense to use a storage location which is
2687# shared between multiple compute hosts (for example via NFS). Unless the
2688# option ``instances_path`` gets overwritten, this directory can grow very
2689# large.
2690#
2691# Possible values:
2692#
2693# * The full path to a directory. Defaults to value provided in ``pybasedir``.
2694# (string value)
2695state_path = /var/lib/nova
2696
2697#
2698# This option allows setting an alternate timeout value for RPC calls
2699# that have the potential to take a long time. If set, RPC calls to
2700# other services will use this value for the timeout (in seconds)
2701# instead of the global rpc_response_timeout value.
2702#
2703# Operations with RPC calls that utilize this value:
2704#
2705# * live migration
2706#
2707# Related options:
2708#
2709# * rpc_response_timeout
2710# (integer value)
2711#long_rpc_timeout = 1800
2712
2713#
2714# Number of seconds indicating how frequently the state of services on a
2715# given hypervisor is reported. Nova needs to know this to determine the
2716# overall health of the deployment.
2717#
2718# Related Options:
2719#
2720# * service_down_time
2721# report_interval should be less than service_down_time. If service_down_time
2722# is less than report_interval, services will routinely be considered down,
2723# because they report in too rarely.
2724# (integer value)
2725report_interval = {{ compute.get('report_interval', '60') }}
2726
2727#
2728# Maximum time in seconds since last check-in for up service
2729#
2730# Each compute node periodically updates their database status based on the
2731# specified report interval. If the compute node hasn't updated the status
2732# for more than service_down_time, then the compute node is considered down.
2733#
2734# Related Options:
2735#
2736# * report_interval (service_down_time should not be less than report_interval)
2737# * scheduler.periodic_task_interval
2738# (integer value)
2739service_down_time = 90
2740
2741#
2742# Enable periodic tasks.
2743#
2744# If set to true, this option allows services to periodically run tasks
2745# on the manager.
2746#
2747# In case of running multiple schedulers or conductors you may want to run
2748# periodic tasks on only one host - in this case disable this option for all
2749# hosts but one.
2750# (boolean value)
2751#periodic_enable = true
2752
2753#
2754# Number of seconds to randomly delay when starting the periodic task
2755# scheduler to reduce stampeding.
2756#
2757# When compute workers are restarted in unison across a cluster,
2758# they all end up running the periodic tasks at the same time
2759# causing problems for the external services. To mitigate this
2760# behavior, periodic_fuzzy_delay option allows you to introduce a
2761# random initial delay when starting the periodic task scheduler.
2762#
2763# Possible Values:
2764#
2765# * Any positive integer (in seconds)
2766# * 0 : disable the random delay
2767# (integer value)
2768# Minimum value: 0
2769#periodic_fuzzy_delay = 60
2770
2771# List of APIs to be enabled by default. (list value)
2772#enabled_apis = osapi_compute,metadata
2773
2774#
2775# List of APIs with enabled SSL.
2776#
2777# Nova provides SSL support for the API servers. enabled_ssl_apis option
2778# allows configuring the SSL support.
2779# (list value)
2780#enabled_ssl_apis =
2781
2782#
2783# IP address on which the OpenStack API will listen.
2784#
2785# The OpenStack API service listens on this IP address for incoming
2786# requests.
2787# (string value)
2788#osapi_compute_listen = 0.0.0.0
2789
2790#
2791# Port on which the OpenStack API will listen.
2792#
2793# The OpenStack API service listens on this port number for incoming
2794# requests.
2795# (port value)
2796# Minimum value: 0
2797# Maximum value: 65535
2798#osapi_compute_listen_port = 8774
2799
2800#
2801# Number of workers for OpenStack API service. The default will be the number
2802# of CPUs available.
2803#
2804# OpenStack API services can be configured to run as multi-process (workers).
2805# This overcomes the problem of reduction in throughput when API request
2806# concurrency increases. OpenStack API service will run in the specified
2807# number of processes.
2808#
2809# Possible Values:
2810#
2811# * Any positive integer
2812# * None (default value)
2813# (integer value)
2814# Minimum value: 1
2815#osapi_compute_workers = <None>
2816
2817#
2818# IP address on which the metadata API will listen.
2819#
2820# The metadata API service listens on this IP address for incoming
2821# requests.
2822# (string value)
2823#metadata_listen = 0.0.0.0
2824
2825#
2826# Port on which the metadata API will listen.
2827#
2828# The metadata API service listens on this port number for incoming
2829# requests.
2830# (port value)
2831# Minimum value: 0
2832# Maximum value: 65535
2833#metadata_listen_port = 8775
2834
2835#
2836# Number of workers for metadata service. If not specified the number of
2837# available CPUs will be used.
2838#
2839# The metadata service can be configured to run as multi-process (workers).
2840# This overcomes the problem of reduction in throughput when API request
2841# concurrency increases. The metadata service will run in the specified
2842# number of processes.
2843#
2844# Possible Values:
2845#
2846# * Any positive integer
2847# * None (default value)
2848# (integer value)
2849# Minimum value: 1
2850#metadata_workers = <None>
2851
2852#
2853# This option specifies the driver to be used for the servicegroup service.
2854#
2855# ServiceGroup API in nova enables checking status of a compute node. When a
2856# compute worker running the nova-compute daemon starts, it calls the join API
2857# to join the compute group. Services like nova scheduler can query the
2858# ServiceGroup API to check if a node is alive. Internally, the ServiceGroup
2859# client driver automatically updates the compute worker status. There are
2860# multiple backend implementations for this service: Database ServiceGroup
2861# driver
2862# and Memcache ServiceGroup driver.
2863#
2864# Possible Values:
2865#
2866# * db : Database ServiceGroup driver
2867# * mc : Memcache ServiceGroup driver
2868#
2869# Related Options:
2870#
2871# * service_down_time (maximum time since last check-in for up service)
2872# (string value)
2873# Possible values:
2874# db - <No description provided>
2875# mc - <No description provided>
2876#servicegroup_driver = db
2877
2878{%- if compute.logging is defined %}
2879{%- set _data = compute.logging %}
2880{%- include "oslo_templates/files/" ~ compute.version ~ "/oslo/_log.conf" %}
2881{%- endif %}
2882
2883{%- set _data = compute.message_queue %}
2884{%- include "oslo_templates/files/" ~ compute.version ~ "/oslo/messaging/_default.conf" %}
2885
2886#
2887# From oslo.service.periodic_task
2888#
2889
2890# Some periodic tasks can be run in a separate process. Should we run them here?
2891# (boolean value)
2892#run_external_periodic_tasks = true
2893
2894#
2895# From oslo.service.service
2896#
2897
2898# Enable eventlet backdoor. Acceptable values are 0, <port>, and <start>:<end>,
2899# where 0 results in listening on a random tcp port number; <port> results in
2900# listening on the specified port number (and not enabling backdoor if that port
2901# is in use); and <start>:<end> results in listening on the smallest unused port
2902# number within the specified range of port numbers. The chosen port is
2903# displayed in the service's log file. (string value)
2904#backdoor_port = <None>
2905
2906# Enable eventlet backdoor, using the provided path as a unix socket that can
2907# receive connections. This option is mutually exclusive with 'backdoor_port' in
2908# that only one should be provided. If both are provided then the existence of
2909# this option overrides the usage of that option. (string value)
2910#backdoor_socket = <None>
2911
2912# Enables or disables logging values of all registered options when starting a
2913# service (at DEBUG level). (boolean value)
2914#log_options = true
2915
2916# Specify a timeout after which a gracefully shutdown server will exit. Zero
2917# value means endless wait. (integer value)
2918#graceful_shutdown_timeout = 60
2919
2920
2921[api]
2922#
2923# Options under this group are used to define Nova API.
2924
2925#
2926# From nova.conf
2927#
2928
2929#
2930# This determines the strategy to use for authentication: keystone or noauth2.
2931# 'noauth2' is designed for testing only, as it does no actual credential
2932# checking. 'noauth2' provides administrative credentials only if 'admin' is
2933# specified as the username.
2934# (string value)
2935# Possible values:
2936# keystone - <No description provided>
2937# noauth2 - <No description provided>
2938#auth_strategy = keystone
2939
2940#
2941# When True, the 'X-Forwarded-For' header is treated as the canonical remote
2942# address. When False (the default), the 'remote_address' header is used.
2943#
2944# You should only enable this if you have an HTML sanitizing proxy.
2945# (boolean value)
2946#use_forwarded_for = false
2947
2948#
2949# When gathering the existing metadata for a config drive, the EC2-style
2950# metadata is returned for all versions that don't appear in this option.
2951# As of the Liberty release, the available versions are:
2952#
2953# * 1.0
2954# * 2007-01-19
2955# * 2007-03-01
2956# * 2007-08-29
2957# * 2007-10-10
2958# * 2007-12-15
2959# * 2008-02-01
2960# * 2008-09-01
2961# * 2009-04-04
2962#
2963# The option is in the format of a single string, with each version separated
2964# by a space.
2965#
2966# Possible values:
2967#
2968# * Any string that represents zero or more versions, separated by spaces.
2969# (string value)
2970#config_drive_skip_versions = 1.0 2007-01-19 2007-03-01 2007-08-29 2007-10-10 2007-12-15 2008-02-01 2008-09-01
2971
2972#
2973# A list of vendordata providers.
2974#
2975# vendordata providers are how deployers can provide metadata via configdrive
2976# and metadata that is specific to their deployment. There are currently two
2977# supported providers: StaticJSON and DynamicJSON.
2978#
2979# StaticJSON reads a JSON file configured by the flag vendordata_jsonfile_path
2980# and places the JSON from that file into vendor_data.json and
2981# vendor_data2.json.
2982#
2983# DynamicJSON is configured via the vendordata_dynamic_targets flag, which is
2984# documented separately. For each of the endpoints specified in that flag, a
2985# section is added to the vendor_data2.json.
2986#
2987# For more information on the requirements for implementing a vendordata
2988# dynamic endpoint, please see the vendordata.rst file in the nova developer
2989# reference.
2990#
2991# Possible values:
2992#
2993# * A list of vendordata providers, with StaticJSON and DynamicJSON being
2994# current options.
2995#
2996# Related options:
2997#
2998# * vendordata_dynamic_targets
2999# * vendordata_dynamic_ssl_certfile
3000# * vendordata_dynamic_connect_timeout
3001# * vendordata_dynamic_read_timeout
3002# * vendordata_dynamic_failure_fatal
3003# (list value)
3004#vendordata_providers = StaticJSON
3005
3006#
3007# A list of targets for the dynamic vendordata provider. These targets are of
3008# the form <name>@<url>.
3009#
3010# The dynamic vendordata provider collects metadata by contacting external REST
3011# services and querying them for information about the instance. This behaviour
3012# is documented in the vendordata.rst file in the nova developer reference.
3013# (list value)
3014#vendordata_dynamic_targets =
3015
3016#
3017# Path to an optional certificate file or CA bundle to verify dynamic
3018# vendordata REST services ssl certificates against.
3019#
3020# Possible values:
3021#
3022# * An empty string, or a path to a valid certificate file
3023#
3024# Related options:
3025#
3026# * vendordata_providers
3027# * vendordata_dynamic_targets
3028# * vendordata_dynamic_connect_timeout
3029# * vendordata_dynamic_read_timeout
3030# * vendordata_dynamic_failure_fatal
3031# (string value)
3032#vendordata_dynamic_ssl_certfile =
3033
3034#
3035# Maximum wait time for an external REST service to connect.
3036#
3037# Possible values:
3038#
3039# * Any integer with a value greater than three (the TCP packet retransmission
3040# timeout). Note that instance start may be blocked during this wait time,
3041# so this value should be kept small.
3042#
3043# Related options:
3044#
3045# * vendordata_providers
3046# * vendordata_dynamic_targets
3047# * vendordata_dynamic_ssl_certfile
3048# * vendordata_dynamic_read_timeout
3049# * vendordata_dynamic_failure_fatal
3050# (integer value)
3051# Minimum value: 3
3052#vendordata_dynamic_connect_timeout = 5
3053
3054#
3055# Maximum wait time for an external REST service to return data once connected.
3056#
3057# Possible values:
3058#
3059# * Any integer. Note that instance start is blocked during this wait time,
3060# so this value should be kept small.
3061#
3062# Related options:
3063#
3064# * vendordata_providers
3065# * vendordata_dynamic_targets
3066# * vendordata_dynamic_ssl_certfile
3067# * vendordata_dynamic_connect_timeout
3068# * vendordata_dynamic_failure_fatal
3069# (integer value)
3070# Minimum value: 0
3071#vendordata_dynamic_read_timeout = 5
3072
3073#
3074# Should failures to fetch dynamic vendordata be fatal to instance boot?
3075#
3076# Related options:
3077#
3078# * vendordata_providers
3079# * vendordata_dynamic_targets
3080# * vendordata_dynamic_ssl_certfile
3081# * vendordata_dynamic_connect_timeout
3082# * vendordata_dynamic_read_timeout
3083# (boolean value)
3084#vendordata_dynamic_failure_fatal = false
3085
3086#
3087# This option is the time (in seconds) to cache metadata. When set to 0,
3088# metadata caching is disabled entirely; this is generally not recommended for
3089# performance reasons. Increasing this setting should improve response times
3090# of the metadata API when under heavy load. Higher values may increase memory
3091# usage, and result in longer times for host metadata changes to take effect.
3092# (integer value)
3093# Minimum value: 0
3094#metadata_cache_expiration = 15
3095
3096#
3097# Cloud providers may store custom data in vendor data file that will then be
3098# available to the instances via the metadata service, and to the rendering of
3099# config-drive. The default class for this, JsonFileVendorData, loads this
3100# information from a JSON file, whose path is configured by this option. If
3101# there is no path set by this option, the class returns an empty dictionary.
3102#
3103# Possible values:
3104#
3105# * Any string representing the path to the data file, or an empty string
3106# (default).
3107# (string value)
3108#vendordata_jsonfile_path = <None>
3109
3110#
3111# As a query can potentially return many thousands of items, you can limit the
3112# maximum number of items in a single response by setting this option.
3113# (integer value)
3114# Minimum value: 0
3115# Deprecated group/name - [DEFAULT]/osapi_max_limit
3116#max_limit = 1000
3117
3118#
3119# This string is prepended to the normal URL that is returned in links to the
3120# OpenStack Compute API. If it is empty (the default), the URLs are returned
3121# unchanged.
3122#
3123# Possible values:
3124#
3125# * Any string, including an empty string (the default).
3126# (string value)
3127# Deprecated group/name - [DEFAULT]/osapi_compute_link_prefix
3128#compute_link_prefix = <None>
3129
3130#
3131# This string is prepended to the normal URL that is returned in links to
3132# Glance resources. If it is empty (the default), the URLs are returned
3133# unchanged.
3134#
3135# Possible values:
3136#
3137# * Any string, including an empty string (the default).
3138# (string value)
3139# Deprecated group/name - [DEFAULT]/osapi_glance_link_prefix
3140#glance_link_prefix = <None>
3141
3142#
3143# When enabled, this will cause the API to only query cell databases
3144# in which the tenant has mapped instances. This requires an additional
3145# (fast) query in the API database before each list, but also
3146# (potentially) limits the number of cell databases that must be queried
3147# to provide the result. If you have a small number of cells, or tenants
3148# are likely to have instances in all cells, then this should be
3149# False. If you have many cells, especially if you confine tenants to a
3150# small subset of those cells, this should be True.
3151# (boolean value)
3152#instance_list_per_project_cells = false
3153
3154# DEPRECATED:
3155# This option is a list of all instance states for which network address
3156# information should not be returned from the API.
3157#
3158# Possible values:
3159#
3160# A list of strings, where each string is a valid VM state, as defined in
3161# nova/compute/vm_states.py. As of the Newton release, they are:
3162#
3163# * "active"
3164# * "building"
3165# * "paused"
3166# * "suspended"
3167# * "stopped"
3168# * "rescued"
3169# * "resized"
3170# * "soft-delete"
3171# * "deleted"
3172# * "error"
3173# * "shelved"
3174# * "shelved_offloaded"
3175# (list value)
3176# Deprecated group/name - [DEFAULT]/osapi_hide_server_address_states
3177# This option is deprecated for removal since 17.0.0.
3178# Its value may be silently ignored in the future.
3179# Reason: This option hide the server address in server representation for
3180# configured server states. Which makes GET server API controlled by this config
3181# options. Due to this config options, user would not be able to discover the
3182# API behavior on different clouds which leads to the interop issue.
3183#hide_server_address_states = building
3184
3185#
3186# When True, the TenantNetworkController will query the Neutron API to get the
3187# default networks to use.
3188#
3189# Related options:
3190#
3191# * neutron_default_tenant_id
3192# (boolean value)
3193#use_neutron_default_nets = false
3194
3195#
3196# Tenant ID for getting the default network from Neutron API (also referred in
3197# some places as the 'project ID') to use.
3198#
3199# Related options:
3200#
3201# * use_neutron_default_nets
3202# (string value)
3203#neutron_default_tenant_id = default
3204
3205#
3206# Enables returning of the instance password by the relevant server API calls
3207# such as create, rebuild, evacuate, or rescue. If the hypervisor does not
3208# support password injection, then the password returned will not be correct,
3209# so if your hypervisor does not support password injection, set this to False.
3210# (boolean value)
3211#enable_instance_password = true
3212
3213
3214[api_database]
3215{%- set _data = {} %}
3216{%- do _data.update(compute.database) %}
3217{%- do _data.update({'name': 'nova_api'}) %}
3218{%- if _data.ssl is defined and 'cacert_file' not in _data.get('ssl', {}).keys() %}{% do _data['ssl'].update({'cacert_file': compute.cacert_file}) %}{% endif %}
3219{%- include "oslo_templates/files/" ~ compute.version ~ "/oslo/_database.conf" %}
3220
3221
3222[barbican]
3223{%- if compute.get('barbican', {}).get('enabled', False) %}
3224{%- set _data = compute.identity %}
3225{%- include "oslo_templates/files/" ~ compute.version ~ "/castellan/_barbican.conf" %}
3226{%- endif %}
3227
3228
3229[cache]
Michael Polenchukf37e5b62018-11-28 17:55:45 +04003230{%- if compute.cache is defined %}
Oleksandr Bryndzii068f1532019-02-18 15:18:42 +02003231{%- set _data = compute.cache %}
3232{%- include "oslo_templates/files/" ~ compute.version ~ "/oslo/_cache.conf" %}
Michael Polenchukf37e5b62018-11-28 17:55:45 +04003233{%- endif %}
3234
Michael Polenchukf37e5b62018-11-28 17:55:45 +04003235
3236[cells]
3237#
3238# DEPRECATED: Cells options allow you to use cells v1 functionality in an
3239# OpenStack deployment.
3240#
3241# Note that the options in this group are only for cells v1 functionality, which
3242# is considered experimental and not recommended for new deployments. Cells v1
3243# is being replaced with cells v2, which starting in the 15.0.0 Ocata release is
3244# required and all Nova deployments will be at least a cells v2 cell of one.
3245#
3246
3247#
3248# From nova.conf
3249#
3250
3251# DEPRECATED:
3252# Enable cell v1 functionality.
3253#
3254# Note that cells v1 is considered experimental and not recommended for new
3255# Nova deployments. Cells v1 is being replaced by cells v2 which starting in
3256# the 15.0.0 Ocata release, all Nova deployments are at least a cells v2 cell
3257# of one. Setting this option, or any other options in the [cells] group, is
3258# not required for cells v2.
3259#
3260# When this functionality is enabled, it lets you to scale an OpenStack
3261# Compute cloud in a more distributed fashion without having to use
3262# complicated technologies like database and message queue clustering.
3263# Cells are configured as a tree. The top-level cell should have a host
3264# that runs a nova-api service, but no nova-compute services. Each
3265# child cell should run all of the typical nova-* services in a regular
3266# Compute cloud except for nova-api. You can think of cells as a normal
3267# Compute deployment in that each cell has its own database server and
3268# message queue broker.
3269#
3270# Related options:
3271#
3272# * name: A unique cell name must be given when this functionality
3273# is enabled.
3274# * cell_type: Cell type should be defined for all cells.
3275# (boolean value)
3276# This option is deprecated for removal since 16.0.0.
3277# Its value may be silently ignored in the future.
3278# Reason: Cells v1 is being replaced with Cells v2.
3279#enable = false
3280
3281# DEPRECATED:
3282# Name of the current cell.
3283#
3284# This value must be unique for each cell. Name of a cell is used as
3285# its id, leaving this option unset or setting the same name for
3286# two or more cells may cause unexpected behaviour.
3287#
3288# Related options:
3289#
3290# * enabled: This option is meaningful only when cells service
3291# is enabled
3292# (string value)
3293# This option is deprecated for removal since 16.0.0.
3294# Its value may be silently ignored in the future.
3295# Reason: Cells v1 is being replaced with Cells v2.
3296#name = nova
3297
3298# DEPRECATED:
3299# Cell capabilities.
3300#
3301# List of arbitrary key=value pairs defining capabilities of the
3302# current cell to be sent to the parent cells. These capabilities
3303# are intended to be used in cells scheduler filters/weighers.
3304#
3305# Possible values:
3306#
3307# * key=value pairs list for example;
3308# ``hypervisor=xenserver;kvm,os=linux;windows``
3309# (list value)
3310# This option is deprecated for removal since 16.0.0.
3311# Its value may be silently ignored in the future.
3312# Reason: Cells v1 is being replaced with Cells v2.
3313#capabilities = hypervisor=xenserver;kvm,os=linux;windows
3314
3315# DEPRECATED:
3316# Call timeout.
3317#
3318# Cell messaging module waits for response(s) to be put into the
3319# eventlet queue. This option defines the seconds waited for
3320# response from a call to a cell.
3321#
3322# Possible values:
3323#
3324# * An integer, corresponding to the interval time in seconds.
3325# (integer value)
3326# Minimum value: 0
3327# This option is deprecated for removal since 16.0.0.
3328# Its value may be silently ignored in the future.
3329# Reason: Cells v1 is being replaced with Cells v2.
3330#call_timeout = 60
3331
3332# DEPRECATED:
3333# Reserve percentage
3334#
3335# Percentage of cell capacity to hold in reserve, so the minimum
3336# amount of free resource is considered to be;
3337#
3338# min_free = total * (reserve_percent / 100.0)
3339#
3340# This option affects both memory and disk utilization.
3341#
3342# The primary purpose of this reserve is to ensure some space is
3343# available for users who want to resize their instance to be larger.
3344# Note that currently once the capacity expands into this reserve
3345# space this option is ignored.
3346#
3347# Possible values:
3348#
3349# * An integer or float, corresponding to the percentage of cell capacity to
3350# be held in reserve.
3351# (floating point value)
3352# This option is deprecated for removal since 16.0.0.
3353# Its value may be silently ignored in the future.
3354# Reason: Cells v1 is being replaced with Cells v2.
3355#reserve_percent = 10.0
3356
3357# DEPRECATED:
3358# Type of cell.
3359#
3360# When cells feature is enabled the hosts in the OpenStack Compute
3361# cloud are partitioned into groups. Cells are configured as a tree.
3362# The top-level cell's cell_type must be set to ``api``. All other
3363# cells are defined as a ``compute cell`` by default.
3364#
3365# Related option:
3366#
3367# * quota_driver: Disable quota checking for the child cells.
3368# (nova.quota.NoopQuotaDriver)
3369# (string value)
3370# Possible values:
3371# api - <No description provided>
3372# compute - <No description provided>
3373# This option is deprecated for removal since 16.0.0.
3374# Its value may be silently ignored in the future.
3375# Reason: Cells v1 is being replaced with Cells v2.
3376#cell_type = compute
3377
3378# DEPRECATED:
3379# Mute child interval.
3380#
3381# Number of seconds after which a lack of capability and capacity
3382# update the child cell is to be treated as a mute cell. Then the
3383# child cell will be weighed as recommend highly that it be skipped.
3384#
3385# Possible values:
3386#
3387# * An integer, corresponding to the interval time in seconds.
3388# (integer value)
3389# This option is deprecated for removal since 16.0.0.
3390# Its value may be silently ignored in the future.
3391# Reason: Cells v1 is being replaced with Cells v2.
3392#mute_child_interval = 300
3393
3394# DEPRECATED:
3395# Bandwidth update interval.
3396#
3397# Seconds between bandwidth usage cache updates for cells.
3398#
3399# Possible values:
3400#
3401# * An integer, corresponding to the interval time in seconds.
3402# (integer value)
3403# This option is deprecated for removal since 16.0.0.
3404# Its value may be silently ignored in the future.
3405# Reason: Cells v1 is being replaced with Cells v2.
3406#bandwidth_update_interval = 600
3407
3408# DEPRECATED:
3409# Instance update sync database limit.
3410#
3411# Number of instances to pull from the database at one time for
3412# a sync. If there are more instances to update the results will
3413# be paged through.
3414#
3415# Possible values:
3416#
3417# * An integer, corresponding to a number of instances.
3418# (integer value)
3419# This option is deprecated for removal since 16.0.0.
3420# Its value may be silently ignored in the future.
3421# Reason: Cells v1 is being replaced with Cells v2.
3422#instance_update_sync_database_limit = 100
3423
3424# DEPRECATED:
3425# Mute weight multiplier.
3426#
3427# Multiplier used to weigh mute children. Mute children cells are
3428# recommended to be skipped so their weight is multiplied by this
3429# negative value.
3430#
3431# Possible values:
3432#
3433# * Negative numeric number
3434# (floating point value)
3435# This option is deprecated for removal since 16.0.0.
3436# Its value may be silently ignored in the future.
3437# Reason: Cells v1 is being replaced with Cells v2.
3438#mute_weight_multiplier = -10000.0
3439
3440# DEPRECATED:
3441# Ram weight multiplier.
3442#
3443# Multiplier used for weighing ram. Negative numbers indicate that
3444# Compute should stack VMs on one host instead of spreading out new
3445# VMs to more hosts in the cell.
3446#
3447# Possible values:
3448#
3449# * Numeric multiplier
3450# (floating point value)
3451# This option is deprecated for removal since 16.0.0.
3452# Its value may be silently ignored in the future.
3453# Reason: Cells v1 is being replaced with Cells v2.
3454#ram_weight_multiplier = 10.0
3455
3456# DEPRECATED:
3457# Offset weight multiplier
3458#
3459# Multiplier used to weigh offset weigher. Cells with higher
3460# weight_offsets in the DB will be preferred. The weight_offset
3461# is a property of a cell stored in the database. It can be used
3462# by a deployer to have scheduling decisions favor or disfavor
3463# cells based on the setting.
3464#
3465# Possible values:
3466#
3467# * Numeric multiplier
3468# (floating point value)
3469# This option is deprecated for removal since 16.0.0.
3470# Its value may be silently ignored in the future.
3471# Reason: Cells v1 is being replaced with Cells v2.
3472#offset_weight_multiplier = 1.0
3473
3474# DEPRECATED:
3475# Instance updated at threshold
3476#
3477# Number of seconds after an instance was updated or deleted to
3478# continue to update cells. This option lets cells manager to only
3479# attempt to sync instances that have been updated recently.
3480# i.e., a threshold of 3600 means to only update instances that
3481# have modified in the last hour.
3482#
3483# Possible values:
3484#
3485# * Threshold in seconds
3486#
3487# Related options:
3488#
3489# * This value is used with the ``instance_update_num_instances``
3490# value in a periodic task run.
3491# (integer value)
3492# This option is deprecated for removal since 16.0.0.
3493# Its value may be silently ignored in the future.
3494# Reason: Cells v1 is being replaced with Cells v2.
3495#instance_updated_at_threshold = 3600
3496
3497# DEPRECATED:
3498# Instance update num instances
3499#
3500# On every run of the periodic task, nova cells manager will attempt to
3501# sync instance_updated_at_threshold number of instances. When the
3502# manager gets the list of instances, it shuffles them so that multiple
3503# nova-cells services do not attempt to sync the same instances in
3504# lockstep.
3505#
3506# Possible values:
3507#
3508# * Positive integer number
3509#
3510# Related options:
3511#
3512# * This value is used with the ``instance_updated_at_threshold``
3513# value in a periodic task run.
3514# (integer value)
3515# This option is deprecated for removal since 16.0.0.
3516# Its value may be silently ignored in the future.
3517# Reason: Cells v1 is being replaced with Cells v2.
3518#instance_update_num_instances = 1
3519
3520# DEPRECATED:
3521# Maximum hop count
3522#
3523# When processing a targeted message, if the local cell is not the
3524# target, a route is defined between neighbouring cells. And the
3525# message is processed across the whole routing path. This option
3526# defines the maximum hop counts until reaching the target.
3527#
3528# Possible values:
3529#
3530# * Positive integer value
3531# (integer value)
3532# This option is deprecated for removal since 16.0.0.
3533# Its value may be silently ignored in the future.
3534# Reason: Cells v1 is being replaced with Cells v2.
3535#max_hop_count = 10
3536
3537# DEPRECATED:
3538# Cells scheduler.
3539#
3540# The class of the driver used by the cells scheduler. This should be
3541# the full Python path to the class to be used. If nothing is specified
3542# in this option, the CellsScheduler is used.
3543# (string value)
3544# This option is deprecated for removal since 16.0.0.
3545# Its value may be silently ignored in the future.
3546# Reason: Cells v1 is being replaced with Cells v2.
3547#scheduler = nova.cells.scheduler.CellsScheduler
3548
3549# DEPRECATED:
3550# RPC driver queue base.
3551#
3552# When sending a message to another cell by JSON-ifying the message
3553# and making an RPC cast to 'process_message', a base queue is used.
3554# This option defines the base queue name to be used when communicating
3555# between cells. Various topics by message type will be appended to this.
3556#
3557# Possible values:
3558#
3559# * The base queue name to be used when communicating between cells.
3560# (string value)
3561# This option is deprecated for removal since 16.0.0.
3562# Its value may be silently ignored in the future.
3563# Reason: Cells v1 is being replaced with Cells v2.
3564#rpc_driver_queue_base = cells.intercell
3565
3566# DEPRECATED:
3567# Scheduler filter classes.
3568#
3569# Filter classes the cells scheduler should use. An entry of
3570# "nova.cells.filters.all_filters" maps to all cells filters
3571# included with nova. As of the Mitaka release the following
3572# filter classes are available:
3573#
3574# Different cell filter: A scheduler hint of 'different_cell'
3575# with a value of a full cell name may be specified to route
3576# a build away from a particular cell.
3577#
3578# Image properties filter: Image metadata named
3579# 'hypervisor_version_requires' with a version specification
3580# may be specified to ensure the build goes to a cell which
3581# has hypervisors of the required version. If either the version
3582# requirement on the image or the hypervisor capability of the
3583# cell is not present, this filter returns without filtering out
3584# the cells.
3585#
3586# Target cell filter: A scheduler hint of 'target_cell' with a
3587# value of a full cell name may be specified to route a build to
3588# a particular cell. No error handling is done as there's no way
3589# to know whether the full path is a valid.
3590#
3591# As an admin user, you can also add a filter that directs builds
3592# to a particular cell.
3593#
3594# (list value)
3595# This option is deprecated for removal since 16.0.0.
3596# Its value may be silently ignored in the future.
3597# Reason: Cells v1 is being replaced with Cells v2.
3598#scheduler_filter_classes = nova.cells.filters.all_filters
3599
3600# DEPRECATED:
3601# Scheduler weight classes.
3602#
3603# Weigher classes the cells scheduler should use. An entry of
3604# "nova.cells.weights.all_weighers" maps to all cell weighers
3605# included with nova. As of the Mitaka release the following
3606# weight classes are available:
3607#
3608# mute_child: Downgrades the likelihood of child cells being
3609# chosen for scheduling requests, which haven't sent capacity
3610# or capability updates in a while. Options include
3611# mute_weight_multiplier (multiplier for mute children; value
3612# should be negative).
3613#
3614# ram_by_instance_type: Select cells with the most RAM capacity
3615# for the instance type being requested. Because higher weights
3616# win, Compute returns the number of available units for the
3617# instance type requested. The ram_weight_multiplier option defaults
3618# to 10.0 that adds to the weight by a factor of 10. Use a negative
3619# number to stack VMs on one host instead of spreading out new VMs
3620# to more hosts in the cell.
3621#
3622# weight_offset: Allows modifying the database to weight a particular
3623# cell. The highest weight will be the first cell to be scheduled for
3624# launching an instance. When the weight_offset of a cell is set to 0,
3625# it is unlikely to be picked but it could be picked if other cells
3626# have a lower weight, like if they're full. And when the weight_offset
3627# is set to a very high value (for example, '999999999999999'), it is
3628# likely to be picked if another cell do not have a higher weight.
3629# (list value)
3630# This option is deprecated for removal since 16.0.0.
3631# Its value may be silently ignored in the future.
3632# Reason: Cells v1 is being replaced with Cells v2.
3633#scheduler_weight_classes = nova.cells.weights.all_weighers
3634
3635# DEPRECATED:
3636# Scheduler retries.
3637#
3638# How many retries when no cells are available. Specifies how many
3639# times the scheduler tries to launch a new instance when no cells
3640# are available.
3641#
3642# Possible values:
3643#
3644# * Positive integer value
3645#
3646# Related options:
3647#
3648# * This value is used with the ``scheduler_retry_delay`` value
3649# while retrying to find a suitable cell.
3650# (integer value)
3651# This option is deprecated for removal since 16.0.0.
3652# Its value may be silently ignored in the future.
3653# Reason: Cells v1 is being replaced with Cells v2.
3654#scheduler_retries = 10
3655
3656# DEPRECATED:
3657# Scheduler retry delay.
3658#
3659# Specifies the delay (in seconds) between scheduling retries when no
3660# cell can be found to place the new instance on. When the instance
3661# could not be scheduled to a cell after ``scheduler_retries`` in
3662# combination with ``scheduler_retry_delay``, then the scheduling
3663# of the instance failed.
3664#
3665# Possible values:
3666#
3667# * Time in seconds.
3668#
3669# Related options:
3670#
3671# * This value is used with the ``scheduler_retries`` value
3672# while retrying to find a suitable cell.
3673# (integer value)
3674# This option is deprecated for removal since 16.0.0.
3675# Its value may be silently ignored in the future.
3676# Reason: Cells v1 is being replaced with Cells v2.
3677#scheduler_retry_delay = 2
3678
3679# DEPRECATED:
3680# DB check interval.
3681#
3682# Cell state manager updates cell status for all cells from the DB
3683# only after this particular interval time is passed. Otherwise cached
3684# status are used. If this value is 0 or negative all cell status are
3685# updated from the DB whenever a state is needed.
3686#
3687# Possible values:
3688#
3689# * Interval time, in seconds.
3690#
3691# (integer value)
3692# This option is deprecated for removal since 16.0.0.
3693# Its value may be silently ignored in the future.
3694# Reason: Cells v1 is being replaced with Cells v2.
3695#db_check_interval = 60
3696
3697# DEPRECATED:
3698# Optional cells configuration.
3699#
3700# Configuration file from which to read cells configuration. If given,
3701# overrides reading cells from the database.
3702#
3703# Cells store all inter-cell communication data, including user names
3704# and passwords, in the database. Because the cells data is not updated
3705# very frequently, use this option to specify a JSON file to store
3706# cells data. With this configuration, the database is no longer
3707# consulted when reloading the cells data. The file must have columns
3708# present in the Cell model (excluding common database fields and the
3709# id column). You must specify the queue connection information through
3710# a transport_url field, instead of username, password, and so on.
3711#
3712# The transport_url has the following form:
3713# rabbit://USERNAME:PASSWORD@HOSTNAME:PORT/VIRTUAL_HOST
3714#
3715# Possible values:
3716#
3717# The scheme can be either qpid or rabbit, the following sample shows
3718# this optional configuration::
3719#
3720# {
3721# "parent": {
3722# "name": "parent",
3723# "api_url": "http://api.example.com:8774",
3724# "transport_url": "rabbit://rabbit.example.com",
3725# "weight_offset": 0.0,
3726# "weight_scale": 1.0,
3727# "is_parent": true
3728# },
3729# "cell1": {
3730# "name": "cell1",
3731# "api_url": "http://api.example.com:8774",
3732# "transport_url": "rabbit://rabbit1.example.com",
3733# "weight_offset": 0.0,
3734# "weight_scale": 1.0,
3735# "is_parent": false
3736# },
3737# "cell2": {
3738# "name": "cell2",
3739# "api_url": "http://api.example.com:8774",
3740# "transport_url": "rabbit://rabbit2.example.com",
3741# "weight_offset": 0.0,
3742# "weight_scale": 1.0,
3743# "is_parent": false
3744# }
3745# }
3746# (string value)
3747# This option is deprecated for removal since 16.0.0.
3748# Its value may be silently ignored in the future.
3749# Reason: Cells v1 is being replaced with Cells v2.
3750#cells_config = <None>
3751
3752
3753[cinder]
3754
3755#
3756# From nova.conf
3757#
3758
3759#
3760# Info to match when looking for cinder in the service catalog.
3761#
3762# Possible values:
3763#
3764# * Format is separated values of the form:
3765# <service_type>:<service_name>:<endpoint_type>
3766#
3767# Note: Nova does not support the Cinder v2 API since the Nova 17.0.0 Queens
3768# release.
3769#
3770# Related options:
3771#
3772# * endpoint_template - Setting this option will override catalog_info
3773# (string value)
3774catalog_info = volumev3:cinderv3:internalURL
3775
3776#
3777# If this option is set then it will override service catalog lookup with
3778# this template for cinder endpoint
3779#
3780# Possible values:
3781#
3782# * URL for cinder endpoint API
3783# e.g. http://localhost:8776/v3/%(project_id)s
3784#
3785# Note: Nova does not support the Cinder v2 API since the Nova 17.0.0 Queens
3786# release.
3787#
3788# Related options:
3789#
3790# * catalog_info - If endpoint_template is not set, catalog_info will be used.
3791# (string value)
3792#endpoint_template = <None>
3793
3794#
3795# Region name of this node. This is used when picking the URL in the service
3796# catalog.
3797#
3798# Possible values:
3799#
3800# * Any string representing region name
3801# (string value)
3802os_region_name = {{ compute.identity.region }}
3803
3804#
3805# Number of times cinderclient should retry on any failed http call.
3806# 0 means connection is attempted only once. Setting it to any positive integer
3807# means that on failure connection is retried that many times e.g. setting it
3808# to 3 means total attempts to connect will be 4.
3809#
3810# Possible values:
3811#
3812# * Any integer value. 0 means connection is attempted only once
3813# (integer value)
3814# Minimum value: 0
3815#http_retries = 3
3816
3817#
3818# Allow attach between instance and volume in different availability zones.
3819#
3820# If False, volumes attached to an instance must be in the same availability
3821# zone in Cinder as the instance availability zone in Nova.
3822# This also means care should be taken when booting an instance from a volume
3823# where source is not "volume" because Nova will attempt to create a volume
3824# using
3825# the same availability zone as what is assigned to the instance.
3826# If that AZ is not in Cinder (or allow_availability_zone_fallback=False in
3827# cinder.conf), the volume create request will fail and the instance will fail
3828# the build request.
3829# By default there is no availability zone restriction on volume attach.
3830# (boolean value)
3831#cross_az_attach = true
3832{%- if compute.cross_az_attach is defined %}
3833cross_az_attach = {{ compute.cross_az_attach }}
3834{%- endif %}
3835
3836{%- set _data = compute.get('cinder', compute.get('identity', {})) %}
3837{%- set auth_type = _data.get('auth_type', 'password') %}
3838{%- if 'cacert_file' not in _data.keys() %}{% do _data.update({'cacert_file': compute.cacert_file}) %}{% endif %}
3839{%- include "oslo_templates/files/" ~ compute.version ~ "/keystoneauth/_type_" ~ auth_type ~ ".conf" %}
3840
3841
3842[compute]
3843
3844#
3845# From nova.conf
3846#
3847
3848#
3849# Enables reporting of build failures to the scheduler.
3850#
3851# Any nonzero value will enable sending build failure statistics to the
3852# scheduler for use by the BuildFailureWeigher.
3853#
3854# Possible values:
3855#
3856# * Any positive integer enables reporting build failures.
3857# * Zero to disable reporting build failures.
3858#
3859# Related options:
3860#
3861# * [filter_scheduler]/build_failure_weight_multiplier
3862#
3863# (integer value)
3864#consecutive_build_service_disable_threshold = 10
3865{%- if compute.get('compute', {}).consecutive_build_service_disable_threshold is defined %}
3866consecutive_build_service_disable_threshold = {{ compute.compute.consecutive_build_service_disable_threshold }}
3867{%- endif %}
3868
3869#
3870# Time to wait in seconds before resending an ACPI shutdown signal to
3871# instances.
3872#
3873# The overall time to wait is set by ``shutdown_timeout``.
3874#
3875# Possible values:
3876#
3877# * Any integer greater than 0 in seconds
3878#
3879# Related options:
3880#
3881# * ``shutdown_timeout``
3882# (integer value)
3883# Minimum value: 1
3884#shutdown_retry_interval = 10
3885
3886#
3887# Interval for updating nova-compute-side cache of the compute node resource
3888# provider's aggregates and traits info.
3889#
3890# This option specifies the number of seconds between attempts to update a
3891# provider's aggregates and traits information in the local cache of the compute
3892# node.
3893#
3894# Possible values:
3895#
3896# * Any positive integer in seconds.
3897# (integer value)
3898# Minimum value: 1
3899#resource_provider_association_refresh = 300
3900
3901#
3902# Defines which physical CPUs (pCPUs) will be used for best-effort guest vCPU
3903# resources.
3904#
3905# Currently only used by libvirt driver to place guest emulator threads when
3906# hw:emulator_threads_policy:share.
3907#
3908# ::
3909# cpu_shared_set = "4-12,^8,15"
3910# (string value)
3911#cpu_shared_set = <None>
3912
3913#
3914# Determine if the source compute host should wait for a ``network-vif-plugged``
3915# event from the (neutron) networking service before starting the actual
3916# transfer
3917# of the guest to the destination compute host.
3918#
3919# Note that this option is read on the destination host of a live migration.
3920# If you set this option the same on all of your compute hosts, which you should
3921# do if you use the same networking backend universally, you do not have to
3922# worry about this.
3923#
3924# Before starting the transfer of the guest, some setup occurs on the
3925# destination
3926# compute host, including plugging virtual interfaces. Depending on the
3927# networking backend **on the destination host**, a ``network-vif-plugged``
3928# event may be triggered and then received on the source compute host and the
3929# source compute can wait for that event to ensure networking is set up on the
3930# destination host before starting the guest transfer in the hypervisor.
3931#
3932# By default, this is False for two reasons:
3933#
3934# 1. Backward compatibility: deployments should test this out and ensure it
3935# works
3936# for them before enabling it.
3937#
3938# 2. The compute service cannot reliably determine which types of virtual
3939# interfaces (``port.binding:vif_type``) will send ``network-vif-plugged``
3940# events without an accompanying port ``binding:host_id`` change.
3941# Open vSwitch and linuxbridge should be OK, but OpenDaylight is at least
3942# one known backend that will not currently work in this case, see bug
3943# https://launchpad.net/bugs/1755890 for more details.
3944#
3945# Possible values:
3946#
3947# * True: wait for ``network-vif-plugged`` events before starting guest transfer
3948# * False: do not wait for ``network-vif-plugged`` events before starting guest
3949# transfer (this is how things have always worked before this option
3950# was introduced)
3951#
3952# Related options:
3953#
3954# * [DEFAULT]/vif_plugging_is_fatal: if ``live_migration_wait_for_vif_plug`` is
3955# True and ``vif_plugging_timeout`` is greater than 0, and a timeout is
3956# reached, the live migration process will fail with an error but the guest
3957# transfer will not have started to the destination host
3958# * [DEFAULT]/vif_plugging_timeout: if ``live_migration_wait_for_vif_plug`` is
3959# True, this controls the amount of time to wait before timing out and either
3960# failing if ``vif_plugging_is_fatal`` is True, or simply continuing with the
3961# live migration
3962# (boolean value)
3963#live_migration_wait_for_vif_plug = false
3964{%- if pillar.get('neutron', {}).get('compute', {}).get('backend', {}).get('mechanism', {}).get('ovs', {}).get('driver', '') == 'openvswitch' %}
3965live_migration_wait_for_vif_plug = true
3966{%- endif %}
3967
3968
3969[conductor]
3970#
3971# Options under this group are used to define Conductor's communication,
3972# which manager should be act as a proxy between computes and database,
3973# and finally, how many worker processes will be used.
3974
3975#
3976# From nova.conf
3977#
3978
3979#
3980# Number of workers for OpenStack Conductor service. The default will be the
3981# number of CPUs available.
3982# (integer value)
3983#workers = <None>
3984
3985
3986[console]
3987#
3988# Options under this group allow to tune the configuration of the console proxy
3989# service.
3990#
3991# Note: in configuration of every compute is a ``console_host`` option,
3992# which allows to select the console proxy service to connect to.
3993
3994#
3995# From nova.conf
3996#
3997
3998#
3999# Adds list of allowed origins to the console websocket proxy to allow
4000# connections from other origin hostnames.
4001# Websocket proxy matches the host header with the origin header to
4002# prevent cross-site requests. This list specifies if any there are
4003# values other than host are allowed in the origin header.
4004#
4005# Possible values:
4006#
4007# * A list where each element is an allowed origin hostnames, else an empty list
4008# (list value)
4009# Deprecated group/name - [DEFAULT]/console_allowed_origins
4010#allowed_origins =
4011
4012
4013[consoleauth]
4014
4015#
4016# From nova.conf
4017#
4018
4019#
4020# The lifetime of a console auth token (in seconds).
4021#
4022# A console auth token is used in authorizing console access for a user.
4023# Once the auth token time to live count has elapsed, the token is
4024# considered expired. Expired tokens are then deleted.
4025#
4026# Related options:
4027#
4028# * ``[workarounds]/enable_consoleauth``
4029# (integer value)
4030# Minimum value: 0
4031# Deprecated group/name - [DEFAULT]/console_token_ttl
4032#token_ttl = 600
4033{% if compute.consoleauth_token_ttl is defined %}
4034token_ttl = {{ compute.consoleauth_token_ttl }}
4035{%- elif compute.get('consoleauth', {}).token_ttl is defined %}
4036token_ttl = {{ compute.consoleauth.token_ttl }}
4037{% endif %}
4038
4039
4040[cors]
4041
4042#
4043# From oslo.middleware
4044#
4045
4046# Indicate whether this resource may be shared with the domain received in the
4047# requests "origin" header. Format: "<protocol>://<host>[:<port>]", no trailing
4048# slash. Example: https://horizon.example.com (list value)
4049#allowed_origin = <None>
4050
4051# Indicate that the actual request can include user credentials (boolean value)
4052#allow_credentials = true
4053
4054# Indicate which headers are safe to expose to the API. Defaults to HTTP Simple
4055# Headers. (list value)
4056#expose_headers = X-Auth-Token,X-Openstack-Request-Id,X-Subject-Token,X-Service-Token
4057
4058# Maximum cache age of CORS preflight requests. (integer value)
4059#max_age = 3600
4060
4061# Indicate which methods can be used during the actual request. (list value)
4062#allow_methods = GET,PUT,POST,DELETE,PATCH
4063
4064# Indicate which header field names may be used during the actual request. (list
4065# value)
4066#allow_headers = X-Auth-Token,X-Openstack-Request-Id,X-Identity-Status,X-Roles,X-Service-Catalog,X-User-Id,X-Tenant-Id
4067
4068
4069[database]
4070{%- set _data = compute.database %}
4071{%- if _data.ssl is defined and 'cacert_file' not in _data.get('ssl', {}).keys() %}{% do _data['ssl'].update({'cacert_file': compute.cacert_file}) %}{% endif %}
4072{%- include "oslo_templates/files/" ~ compute.version ~ "/oslo/_database.conf" %}
4073
4074
4075[devices]
4076
4077#
4078# From nova.conf
4079#
4080
4081#
4082# The vGPU types enabled in the compute node.
4083#
4084# Some pGPUs (e.g. NVIDIA GRID K1) support different vGPU types. User can use
4085# this option to specify a list of enabled vGPU types that may be assigned to a
4086# guest instance. But please note that Nova only supports a single type in the
4087# Queens release. If more than one vGPU type is specified (as a comma-separated
4088# list), only the first one will be used. An example is as the following::
4089#
4090# [devices]
4091# enabled_vgpu_types = GRID K100,Intel GVT-g,MxGPU.2,nvidia-11
4092# (list value)
4093#enabled_vgpu_types =
Ivan Berezovskiyeff62782019-09-25 16:29:47 +04004094{%- if compute.get('devices', {}).enabled_vgpu_types is defined %}
4095enabled_vgpu_types = {{ compute.devices.enabled_vgpu_types }}
4096{% endif %}
Michael Polenchukf37e5b62018-11-28 17:55:45 +04004097
4098
4099[ephemeral_storage_encryption]
4100
4101#
4102# From nova.conf
4103#
4104
4105#
4106# Enables/disables LVM ephemeral storage encryption.
4107# (boolean value)
4108#enabled = false
4109
4110#
4111# Cipher-mode string to be used.
4112#
4113# The cipher and mode to be used to encrypt ephemeral storage. The set of
4114# cipher-mode combinations available depends on kernel support. According
4115# to the dm-crypt documentation, the cipher is expected to be in the format:
4116# "<cipher>-<chainmode>-<ivmode>".
4117#
4118# Possible values:
4119#
4120# * Any crypto option listed in ``/proc/crypto``.
4121# (string value)
4122#cipher = aes-xts-plain64
4123
4124#
4125# Encryption key length in bits.
4126#
4127# The bit length of the encryption key to be used to encrypt ephemeral storage.
4128# In XTS mode only half of the bits are used for encryption key.
4129# (integer value)
4130# Minimum value: 1
4131#key_size = 512
4132
4133
4134[filter_scheduler]
4135
4136#
4137# From nova.conf
4138#
4139
4140#
4141# Size of subset of best hosts selected by scheduler.
4142#
4143# New instances will be scheduled on a host chosen randomly from a subset of the
4144# N best hosts, where N is the value set by this option.
4145#
4146# Setting this to a value greater than 1 will reduce the chance that multiple
4147# scheduler processes handling similar requests will select the same host,
4148# creating a potential race condition. By selecting a host randomly from the N
4149# hosts that best fit the request, the chance of a conflict is reduced. However,
4150# the higher you set this value, the less optimal the chosen host may be for a
4151# given request.
4152#
4153# This option is only used by the FilterScheduler and its subclasses; if you use
4154# a different scheduler, this option has no effect.
4155#
4156# Possible values:
4157#
4158# * An integer, where the integer corresponds to the size of a host subset. Any
4159# integer is valid, although any value less than 1 will be treated as 1
4160# (integer value)
4161# Minimum value: 1
4162# Deprecated group/name - [DEFAULT]/scheduler_host_subset_size
4163#host_subset_size = 1
4164
4165#
4166# The number of instances that can be actively performing IO on a host.
4167#
4168# Instances performing IO includes those in the following states: build, resize,
4169# snapshot, migrate, rescue, unshelve.
4170#
4171# This option is only used by the FilterScheduler and its subclasses; if you use
4172# a different scheduler, this option has no effect. Also note that this setting
4173# only affects scheduling if the 'io_ops_filter' filter is enabled.
4174#
4175# Possible values:
4176#
4177# * An integer, where the integer corresponds to the max number of instances
4178# that can be actively performing IO on any given host.
4179# (integer value)
4180#max_io_ops_per_host = 8
4181
4182#
4183# Maximum number of instances that be active on a host.
4184#
4185# If you need to limit the number of instances on any given host, set this
4186# option
4187# to the maximum number of instances you want to allow. The NumInstancesFilter
4188# and AggregateNumInstancesFilter will reject any host that has at least as many
4189# instances as this option's value.
4190#
4191# This option is only used by the FilterScheduler and its subclasses; if you use
4192# a different scheduler, this option has no effect. Also note that this setting
4193# only affects scheduling if the 'NumInstancesFilter' or
4194# 'AggregateNumInstancesFilter' filter is enabled.
4195#
4196# Possible values:
4197#
4198# * An integer, where the integer corresponds to the max instances that can be
4199# scheduled on a host.
4200# (integer value)
4201# Minimum value: 1
4202#max_instances_per_host = 50
4203
4204#
4205# Enable querying of individual hosts for instance information.
4206#
4207# The scheduler may need information about the instances on a host in order to
4208# evaluate its filters and weighers. The most common need for this information
4209# is
4210# for the (anti-)affinity filters, which need to choose a host based on the
4211# instances already running on a host.
4212#
4213# If the configured filters and weighers do not need this information, disabling
4214# this option will improve performance. It may also be disabled when the
4215# tracking
4216# overhead proves too heavy, although this will cause classes requiring host
4217# usage data to query the database on each request instead.
4218#
4219# This option is only used by the FilterScheduler and its subclasses; if you use
4220# a different scheduler, this option has no effect.
4221#
4222# NOTE: In a multi-cell (v2) setup where the cell MQ is separated from the
4223# top-level, computes cannot directly communicate with the scheduler. Thus,
4224# this option cannot be enabled in that scenario. See also the
4225# [workarounds]/disable_group_policy_check_upcall option.
4226# (boolean value)
4227# Deprecated group/name - [DEFAULT]/scheduler_tracks_instance_changes
4228#track_instance_changes = true
4229
4230#
4231# Filters that the scheduler can use.
4232#
4233# An unordered list of the filter classes the nova scheduler may apply. Only
4234# the
4235# filters specified in the 'enabled_filters' option will be used, but
4236# any filter appearing in that option must also be included in this list.
4237#
4238# By default, this is set to all filters that are included with nova.
4239#
4240# This option is only used by the FilterScheduler and its subclasses; if you use
4241# a different scheduler, this option has no effect.
4242#
4243# Possible values:
4244#
4245# * A list of zero or more strings, where each string corresponds to the name of
4246# a filter that may be used for selecting a host
4247#
4248# Related options:
4249#
4250# * enabled_filters
4251# (multi valued)
4252# Deprecated group/name - [DEFAULT]/scheduler_available_filters
4253#available_filters = nova.scheduler.filters.all_filters
4254
4255#
4256# Filters that the scheduler will use.
4257#
4258# An ordered list of filter class names that will be used for filtering
4259# hosts. These filters will be applied in the order they are listed so
4260# place your most restrictive filters first to make the filtering process more
4261# efficient.
4262#
4263# This option is only used by the FilterScheduler and its subclasses; if you use
4264# a different scheduler, this option has no effect.
4265#
4266# Possible values:
4267#
4268# * A list of zero or more strings, where each string corresponds to the name of
4269# a filter to be used for selecting a host
4270#
4271# Related options:
4272#
4273# * All of the filters in this option *must* be present in the
4274# 'available_filters' option, or a SchedulerHostFilterNotFound
4275# exception will be raised.
4276# (list value)
4277# Deprecated group/name - [DEFAULT]/scheduler_default_filters
4278#enabled_filters = RetryFilter,AvailabilityZoneFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,ServerGroupAntiAffinityFilter,ServerGroupAffinityFilter
4279
4280#
4281# Weighers that the scheduler will use.
4282#
4283# Only hosts which pass the filters are weighed. The weight for any host starts
4284# at 0, and the weighers order these hosts by adding to or subtracting from the
4285# weight assigned by the previous weigher. Weights may become negative. An
4286# instance will be scheduled to one of the N most-weighted hosts, where N is
4287# 'scheduler_host_subset_size'.
4288#
4289# By default, this is set to all weighers that are included with Nova.
4290#
4291# This option is only used by the FilterScheduler and its subclasses; if you use
4292# a different scheduler, this option has no effect.
4293#
4294# Possible values:
4295#
4296# * A list of zero or more strings, where each string corresponds to the name of
4297# a weigher that will be used for selecting a host
4298# (list value)
4299# Deprecated group/name - [DEFAULT]/scheduler_weight_classes
4300#weight_classes = nova.scheduler.weights.all_weighers
4301
4302#
4303# RAM weight multipler ratio.
4304#
4305# This option determines how hosts with more or less available RAM are weighed.
4306# A
4307# positive value will result in the scheduler preferring hosts with more
4308# available RAM, and a negative number will result in the scheduler preferring
4309# hosts with less available RAM. Another way to look at it is that positive
4310# values for this option will tend to spread instances across many hosts, while
4311# negative values will tend to fill up (stack) hosts as much as possible before
4312# scheduling to a less-used host. The absolute value, whether positive or
4313# negative, controls how strong the RAM weigher is relative to other weighers.
4314#
4315# This option is only used by the FilterScheduler and its subclasses; if you use
4316# a different scheduler, this option has no effect. Also note that this setting
4317# only affects scheduling if the 'ram' weigher is enabled.
4318#
4319# Possible values:
4320#
4321# * An integer or float value, where the value corresponds to the multipler
4322# ratio for this weigher.
4323# (floating point value)
4324#ram_weight_multiplier = 1.0
4325
4326#
4327# CPU weight multiplier ratio.
4328#
4329# Multiplier used for weighting free vCPUs. Negative numbers indicate stacking
4330# rather than spreading.
4331#
4332# This option is only used by the FilterScheduler and its subclasses; if you use
4333# a different scheduler, this option has no effect. Also note that this setting
4334# only affects scheduling if the 'cpu' weigher is enabled.
4335#
4336# Possible values:
4337#
4338# * An integer or float value, where the value corresponds to the multipler
4339# ratio for this weigher.
4340#
4341# Related options:
4342#
4343# * ``filter_scheduler.weight_classes``: This weigher must be added to list of
4344# enabled weight classes if the ``weight_classes`` setting is set to a
4345# non-default value.
4346# (floating point value)
4347#cpu_weight_multiplier = 1.0
4348
4349#
4350# Disk weight multipler ratio.
4351#
4352# Multiplier used for weighing free disk space. Negative numbers mean to
4353# stack vs spread.
4354#
4355# This option is only used by the FilterScheduler and its subclasses; if you use
4356# a different scheduler, this option has no effect. Also note that this setting
4357# only affects scheduling if the 'disk' weigher is enabled.
4358#
4359# Possible values:
4360#
4361# * An integer or float value, where the value corresponds to the multipler
4362# ratio for this weigher.
4363# (floating point value)
4364#disk_weight_multiplier = 1.0
4365
4366#
4367# IO operations weight multipler ratio.
4368#
4369# This option determines how hosts with differing workloads are weighed.
4370# Negative
4371# values, such as the default, will result in the scheduler preferring hosts
4372# with
4373# lighter workloads whereas positive values will prefer hosts with heavier
4374# workloads. Another way to look at it is that positive values for this option
4375# will tend to schedule instances onto hosts that are already busy, while
4376# negative values will tend to distribute the workload across more hosts. The
4377# absolute value, whether positive or negative, controls how strong the io_ops
4378# weigher is relative to other weighers.
4379#
4380# This option is only used by the FilterScheduler and its subclasses; if you use
4381# a different scheduler, this option has no effect. Also note that this setting
4382# only affects scheduling if the 'io_ops' weigher is enabled.
4383#
4384# Possible values:
4385#
4386# * An integer or float value, where the value corresponds to the multipler
4387# ratio for this weigher.
4388# (floating point value)
4389#io_ops_weight_multiplier = -1.0
4390
4391#
4392# PCI device affinity weight multiplier.
4393#
4394# The PCI device affinity weighter computes a weighting based on the number of
4395# PCI devices on the host and the number of PCI devices requested by the
4396# instance. The ``NUMATopologyFilter`` filter must be enabled for this to have
4397# any significance. For more information, refer to the filter documentation:
4398#
4399# https://docs.openstack.org/nova/latest/user/filter-scheduler.html
4400#
4401# Possible values:
4402#
4403# * A positive integer or float value, where the value corresponds to the
4404# multiplier ratio for this weigher.
4405# (floating point value)
4406# Minimum value: 0
4407#pci_weight_multiplier = 1.0
4408
4409#
4410# Multiplier used for weighing hosts for group soft-affinity.
4411#
4412# Possible values:
4413#
4414# * An integer or float value, where the value corresponds to weight multiplier
4415# for hosts with group soft affinity. Only a positive value are meaningful, as
4416# negative values would make this behave as a soft anti-affinity weigher.
4417# (floating point value)
4418#soft_affinity_weight_multiplier = 1.0
4419
4420#
4421# Multiplier used for weighing hosts for group soft-anti-affinity.
4422#
4423# Possible values:
4424#
4425# * An integer or float value, where the value corresponds to weight multiplier
4426# for hosts with group soft anti-affinity. Only a positive value are
4427# meaningful, as negative values would make this behave as a soft affinity
4428# weigher.
4429# (floating point value)
4430#soft_anti_affinity_weight_multiplier = 1.0
4431
4432#
4433# Multiplier used for weighing hosts that have had recent build failures.
4434#
4435# This option determines how much weight is placed on a compute node with
4436# recent build failures. Build failures may indicate a failing, misconfigured,
4437# or otherwise ailing compute node, and avoiding it during scheduling may be
4438# beneficial. The weight is inversely proportional to the number of recent
4439# build failures the compute node has experienced. This value should be
4440# set to some high value to offset weight given by other enabled weighers
4441# due to available resources. To disable weighing compute hosts by the
4442# number of recent failures, set this to zero.
4443#
4444# This option is only used by the FilterScheduler and its subclasses; if you use
4445# a different scheduler, this option has no effect.
4446#
4447# Possible values:
4448#
4449# * An integer or float value, where the value corresponds to the multiplier
4450# ratio for this weigher.
4451#
4452# Related options:
4453#
4454# * [compute]/consecutive_build_service_disable_threshold - Must be nonzero
4455# for a compute to report data considered by this weigher.
4456# (floating point value)
4457#build_failure_weight_multiplier = 1000000.0
4458
4459#
4460# Enable spreading the instances between hosts with the same best weight.
4461#
4462# Enabling it is beneficial for cases when host_subset_size is 1
4463# (default), but there is a large number of hosts with same maximal weight.
4464# This scenario is common in Ironic deployments where there are typically many
4465# baremetal nodes with identical weights returned to the scheduler.
4466# In such case enabling this option will reduce contention and chances for
4467# rescheduling events.
4468# At the same time it will make the instance packing (even in unweighed case)
4469# less dense.
4470# (boolean value)
4471#shuffle_best_same_weighed_hosts = false
4472
4473#
4474# The default architecture to be used when using the image properties filter.
4475#
4476# When using the ImagePropertiesFilter, it is possible that you want to define
4477# a default architecture to make the user experience easier and avoid having
4478# something like x86_64 images landing on aarch64 compute nodes because the
4479# user did not specify the 'hw_architecture' property in Glance.
4480#
4481# Possible values:
4482#
4483# * CPU Architectures such as x86_64, aarch64, s390x.
4484# (string value)
4485# Possible values:
4486# alpha - <No description provided>
4487# armv6 - <No description provided>
4488# armv7l - <No description provided>
4489# armv7b - <No description provided>
4490# aarch64 - <No description provided>
4491# cris - <No description provided>
4492# i686 - <No description provided>
4493# ia64 - <No description provided>
4494# lm32 - <No description provided>
4495# m68k - <No description provided>
4496# microblaze - <No description provided>
4497# microblazeel - <No description provided>
4498# mips - <No description provided>
4499# mipsel - <No description provided>
4500# mips64 - <No description provided>
4501# mips64el - <No description provided>
4502# openrisc - <No description provided>
4503# parisc - <No description provided>
4504# parisc64 - <No description provided>
4505# ppc - <No description provided>
4506# ppcle - <No description provided>
4507# ppc64 - <No description provided>
4508# ppc64le - <No description provided>
4509# ppcemb - <No description provided>
4510# s390 - <No description provided>
4511# s390x - <No description provided>
4512# sh4 - <No description provided>
4513# sh4eb - <No description provided>
4514# sparc - <No description provided>
4515# sparc64 - <No description provided>
4516# unicore32 - <No description provided>
4517# x86_64 - <No description provided>
4518# xtensa - <No description provided>
4519# xtensaeb - <No description provided>
4520#image_properties_default_architecture = <None>
4521
4522#
4523# List of UUIDs for images that can only be run on certain hosts.
4524#
4525# If there is a need to restrict some images to only run on certain designated
4526# hosts, list those image UUIDs here.
4527#
4528# This option is only used by the FilterScheduler and its subclasses; if you use
4529# a different scheduler, this option has no effect. Also note that this setting
4530# only affects scheduling if the 'IsolatedHostsFilter' filter is enabled.
4531#
4532# Possible values:
4533#
4534# * A list of UUID strings, where each string corresponds to the UUID of an
4535# image
4536#
4537# Related options:
4538#
4539# * scheduler/isolated_hosts
4540# * scheduler/restrict_isolated_hosts_to_isolated_images
4541# (list value)
4542#isolated_images =
4543
4544#
4545# List of hosts that can only run certain images.
4546#
4547# If there is a need to restrict some images to only run on certain designated
4548# hosts, list those host names here.
4549#
4550# This option is only used by the FilterScheduler and its subclasses; if you use
4551# a different scheduler, this option has no effect. Also note that this setting
4552# only affects scheduling if the 'IsolatedHostsFilter' filter is enabled.
4553#
4554# Possible values:
4555#
4556# * A list of strings, where each string corresponds to the name of a host
4557#
4558# Related options:
4559#
4560# * scheduler/isolated_images
4561# * scheduler/restrict_isolated_hosts_to_isolated_images
4562# (list value)
4563#isolated_hosts =
4564
4565#
4566# Prevent non-isolated images from being built on isolated hosts.
4567#
4568# This option is only used by the FilterScheduler and its subclasses; if you use
4569# a different scheduler, this option has no effect. Also note that this setting
4570# only affects scheduling if the 'IsolatedHostsFilter' filter is enabled. Even
4571# then, this option doesn't affect the behavior of requests for isolated images,
4572# which will *always* be restricted to isolated hosts.
4573#
4574# Related options:
4575#
4576# * scheduler/isolated_images
4577# * scheduler/isolated_hosts
4578# (boolean value)
4579#restrict_isolated_hosts_to_isolated_images = true
4580
4581#
4582# Image property namespace for use in the host aggregate.
4583#
4584# Images and hosts can be configured so that certain images can only be
4585# scheduled
4586# to hosts in a particular aggregate. This is done with metadata values set on
4587# the host aggregate that are identified by beginning with the value of this
4588# option. If the host is part of an aggregate with such a metadata key, the
4589# image
4590# in the request spec must have the value of that metadata in its properties in
4591# order for the scheduler to consider the host as acceptable.
4592#
4593# This option is only used by the FilterScheduler and its subclasses; if you use
4594# a different scheduler, this option has no effect. Also note that this setting
4595# only affects scheduling if the 'aggregate_image_properties_isolation' filter
4596# is
4597# enabled.
4598#
4599# Possible values:
4600#
4601# * A string, where the string corresponds to an image property namespace
4602#
4603# Related options:
4604#
4605# * aggregate_image_properties_isolation_separator
4606# (string value)
4607#aggregate_image_properties_isolation_namespace = <None>
4608
4609#
4610# Separator character(s) for image property namespace and name.
4611#
4612# When using the aggregate_image_properties_isolation filter, the relevant
4613# metadata keys are prefixed with the namespace defined in the
4614# aggregate_image_properties_isolation_namespace configuration option plus a
4615# separator. This option defines the separator to be used.
4616#
4617# This option is only used by the FilterScheduler and its subclasses; if you use
4618# a different scheduler, this option has no effect. Also note that this setting
4619# only affects scheduling if the 'aggregate_image_properties_isolation' filter
4620# is enabled.
4621#
4622# Possible values:
4623#
4624# * A string, where the string corresponds to an image property namespace
4625# separator character
4626#
4627# Related options:
4628#
4629# * aggregate_image_properties_isolation_namespace
4630# (string value)
4631#aggregate_image_properties_isolation_separator = .
4632
4633
4634[glance]
4635# Configuration options for the Image service
4636
4637#
4638# From nova.conf
4639#
4640
4641#
4642# List of glance api servers endpoints available to nova.
4643#
4644# https is used for ssl-based glance api servers.
4645#
4646# NOTE: The preferred mechanism for endpoint discovery is via keystoneauth1
4647# loading options. Only use api_servers if you need multiple endpoints and are
4648# unable to use a load balancer for some reason.
4649#
4650# Possible values:
4651#
4652# * A list of any fully qualified url of the form
4653# "scheme://hostname:port[/path]"
4654# (i.e. "http://10.0.1.0:9292" or "https://my.glance.server/image").
4655# (list value)
4656#api_servers = <None>
4657{%- if compute.image is defined %}
4658api_servers = {{ compute.image.get('protocol', 'http') }}://{{ compute.image.host }}:{{ compute.image.get('port', 9292) }}
4659{% endif %}
4660
4661#
4662# Enable glance operation retries.
4663#
4664# Specifies the number of retries when uploading / downloading
4665# an image to / from glance. 0 means no retries.
4666# (integer value)
4667# Minimum value: 0
4668#num_retries = 0
4669
4670# DEPRECATED:
4671# List of url schemes that can be directly accessed.
4672#
4673# This option specifies a list of url schemes that can be downloaded
4674# directly via the direct_url. This direct_URL can be fetched from
4675# Image metadata which can be used by nova to get the
4676# image more efficiently. nova-compute could benefit from this by
4677# invoking a copy when it has access to the same file system as glance.
4678#
4679# Possible values:
4680#
4681# * [file], Empty list (default)
4682# (list value)
4683# This option is deprecated for removal since 17.0.0.
4684# Its value may be silently ignored in the future.
4685# Reason:
4686# This was originally added for the 'nova.image.download.file' FileTransfer
4687# extension which was removed in the 16.0.0 Pike release. The
4688# 'nova.image.download.modules' extension point is not maintained
4689# and there is no indication of its use in production clouds.
4690#allowed_direct_url_schemes =
4691
4692#
4693# Enable image signature verification.
4694#
4695# nova uses the image signature metadata from glance and verifies the signature
4696# of a signed image while downloading that image. If the image signature cannot
4697# be verified or if the image signature metadata is either incomplete or
4698# unavailable, then nova will not boot the image and instead will place the
4699# instance into an error state. This provides end users with stronger assurances
4700# of the integrity of the image data they are using to create servers.
4701#
4702# Related options:
4703#
4704# * The options in the `key_manager` group, as the key_manager is used
4705# for the signature validation.
4706# * Both enable_certificate_validation and default_trusted_certificate_ids
4707# below depend on this option being enabled.
4708# (boolean value)
4709{%- if compute.get('image', {}).verify_glance_signatures is defined %}
4710verify_glance_signatures = {{ compute.image.verify_glance_signatures }}
4711{%- elif compute.get('barbican', {}).get('enabled', False) %}
4712verify_glance_signatures = true
4713{%- else %}
4714#verify_glance_signatures = false
4715{%- endif %}
4716
4717# DEPRECATED:
4718# Enable certificate validation for image signature verification.
4719#
4720# During image signature verification nova will first verify the validity of the
4721# image's signing certificate using the set of trusted certificates associated
4722# with the instance. If certificate validation fails, signature verification
4723# will not be performed and the instance will be placed into an error state.
4724# This
4725# provides end users with stronger assurances that the image data is unmodified
4726# and trustworthy. If left disabled, image signature verification can still
4727# occur but the end user will not have any assurance that the signing
4728# certificate used to generate the image signature is still trustworthy.
4729#
4730# Related options:
4731#
4732# * This option only takes effect if verify_glance_signatures is enabled.
4733# * The value of default_trusted_certificate_ids may be used when this option
4734# is enabled.
4735# (boolean value)
4736# This option is deprecated for removal since 16.0.0.
4737# Its value may be silently ignored in the future.
4738# Reason:
4739# This option is intended to ease the transition for deployments leveraging
4740# image signature verification. The intended state long-term is for signature
4741# verification and certificate validation to always happen together.
4742#enable_certificate_validation = false
4743
4744#
4745# List of certificate IDs for certificates that should be trusted.
4746#
4747# May be used as a default list of trusted certificate IDs for certificate
4748# validation. The value of this option will be ignored if the user provides a
4749# list of trusted certificate IDs with an instance API request. The value of
4750# this option will be persisted with the instance data if signature verification
4751# and certificate validation are enabled and if the user did not provide an
4752# alternative list. If left empty when certificate validation is enabled the
4753# user must provide a list of trusted certificate IDs otherwise certificate
4754# validation will fail.
4755#
4756# Related options:
4757#
4758# * The value of this option may be used if both verify_glance_signatures and
4759# enable_certificate_validation are enabled.
4760# (list value)
4761#default_trusted_certificate_ids =
4762
4763# Enable or disable debug logging with glanceclient. (boolean value)
4764#debug = false
4765
4766# PEM encoded Certificate Authority to use when verifying HTTPs connections.
4767# (string value)
4768#cafile = <None>
4769
4770# PEM encoded client certificate cert file (string value)
4771#certfile = <None>
4772
4773# PEM encoded client certificate key file (string value)
4774#keyfile = <None>
4775
4776# Verify HTTPS connections. (boolean value)
4777#insecure = false
4778
4779# Timeout value for http requests (integer value)
4780#timeout = <None>
4781
4782# Collect per-API call timing information. (boolean value)
4783#collect_timing = false
4784
4785# Log requests to multiple loggers. (boolean value)
4786#split_loggers = false
4787
4788# The default service_type for endpoint URL discovery. (string value)
4789#service_type = image
4790
4791# The default service_name for endpoint URL discovery. (string value)
4792#service_name = <None>
4793
4794# List of interfaces, in order of preference, for endpoint URL. (list value)
4795#valid_interfaces = internal,public
4796
4797# The default region_name for endpoint URL discovery. (string value)
4798#region_name = <None>
4799
4800# Always use this endpoint URL for requests for this client. NOTE: The
4801# unversioned endpoint should be specified here; to request a particular API
4802# version, use the `version`, `min-version`, and/or `max-version` options.
4803# (string value)
4804#endpoint_override = <None>
4805
4806
4807[guestfs]
4808#
4809# libguestfs is a set of tools for accessing and modifying virtual
4810# machine (VM) disk images. You can use this for viewing and editing
4811# files inside guests, scripting changes to VMs, monitoring disk
4812# used/free statistics, creating guests, P2V, V2V, performing backups,
4813# cloning VMs, building VMs, formatting disks and resizing disks.
4814
4815#
4816# From nova.conf
4817#
4818
4819#
4820# Enable/disables guestfs logging.
4821#
4822# This configures guestfs to debug messages and push them to OpenStack
4823# logging system. When set to True, it traces libguestfs API calls and
4824# enable verbose debug messages. In order to use the above feature,
4825# "libguestfs" package must be installed.
4826#
4827# Related options:
4828#
4829# Since libguestfs access and modifies VM's managed by libvirt, below options
4830# should be set to give access to those VM's.
4831#
4832# * ``libvirt.inject_key``
4833# * ``libvirt.inject_partition``
4834# * ``libvirt.inject_password``
4835# (boolean value)
4836#debug = false
4837
4838
4839[healthcheck]
4840
4841#
4842# From oslo.middleware
4843#
4844
4845# DEPRECATED: The path to respond to healtcheck requests on. (string value)
4846# This option is deprecated for removal.
4847# Its value may be silently ignored in the future.
4848#path = /healthcheck
4849
4850# Show more detailed information as part of the response (boolean value)
4851#detailed = false
4852
4853# Additional backends that can perform health checks and report that information
4854# back as part of a request. (list value)
4855#backends =
4856
4857# Check the presence of a file to determine if an application is running on a
4858# port. Used by DisableByFileHealthcheck plugin. (string value)
4859#disable_by_file_path = <None>
4860
4861# Check the presence of a file based on a port to determine if an application is
4862# running on a port. Expects a "port:path" list of strings. Used by
4863# DisableByFilesPortsHealthcheck plugin. (list value)
4864#disable_by_file_paths =
4865
4866
4867[hyperv]
4868#
4869# The hyperv feature allows you to configure the Hyper-V hypervisor
4870# driver to be used within an OpenStack deployment.
4871
4872#
4873# From nova.conf
4874#
4875
4876#
4877# Dynamic memory ratio
4878#
4879# Enables dynamic memory allocation (ballooning) when set to a value
4880# greater than 1. The value expresses the ratio between the total RAM
4881# assigned to an instance and its startup RAM amount. For example a
4882# ratio of 2.0 for an instance with 1024MB of RAM implies 512MB of
4883# RAM allocated at startup.
4884#
4885# Possible values:
4886#
4887# * 1.0: Disables dynamic memory allocation (Default).
4888# * Float values greater than 1.0: Enables allocation of total implied
4889# RAM divided by this value for startup.
4890# (floating point value)
4891#dynamic_memory_ratio = 1.0
4892
4893#
4894# Enable instance metrics collection
4895#
4896# Enables metrics collections for an instance by using Hyper-V's
4897# metric APIs. Collected data can be retrieved by other apps and
4898# services, e.g.: Ceilometer.
4899# (boolean value)
4900#enable_instance_metrics_collection = false
4901
4902#
4903# Instances path share
4904#
4905# The name of a Windows share mapped to the "instances_path" dir
4906# and used by the resize feature to copy files to the target host.
4907# If left blank, an administrative share (hidden network share) will
4908# be used, looking for the same "instances_path" used locally.
4909#
4910# Possible values:
4911#
4912# * "": An administrative share will be used (Default).
4913# * Name of a Windows share.
4914#
4915# Related options:
4916#
4917# * "instances_path": The directory which will be used if this option
4918# here is left blank.
4919# (string value)
4920#instances_path_share =
4921
4922#
4923# Limit CPU features
4924#
4925# This flag is needed to support live migration to hosts with
4926# different CPU features and checked during instance creation
4927# in order to limit the CPU features used by the instance.
4928# (boolean value)
4929#limit_cpu_features = false
4930
4931#
4932# Mounted disk query retry count
4933#
4934# The number of times to retry checking for a mounted disk.
4935# The query runs until the device can be found or the retry
4936# count is reached.
4937#
4938# Possible values:
4939#
4940# * Positive integer values. Values greater than 1 is recommended
4941# (Default: 10).
4942#
4943# Related options:
4944#
4945# * Time interval between disk mount retries is declared with
4946# "mounted_disk_query_retry_interval" option.
4947# (integer value)
4948# Minimum value: 0
4949#mounted_disk_query_retry_count = 10
4950
4951#
4952# Mounted disk query retry interval
4953#
4954# Interval between checks for a mounted disk, in seconds.
4955#
4956# Possible values:
4957#
4958# * Time in seconds (Default: 5).
4959#
4960# Related options:
4961#
4962# * This option is meaningful when the mounted_disk_query_retry_count
4963# is greater than 1.
4964# * The retry loop runs with mounted_disk_query_retry_count and
4965# mounted_disk_query_retry_interval configuration options.
4966# (integer value)
4967# Minimum value: 0
4968#mounted_disk_query_retry_interval = 5
4969
4970#
4971# Power state check timeframe
4972#
4973# The timeframe to be checked for instance power state changes.
4974# This option is used to fetch the state of the instance from Hyper-V
4975# through the WMI interface, within the specified timeframe.
4976#
4977# Possible values:
4978#
4979# * Timeframe in seconds (Default: 60).
4980# (integer value)
4981# Minimum value: 0
4982#power_state_check_timeframe = 60
4983
4984#
4985# Power state event polling interval
4986#
4987# Instance power state change event polling frequency. Sets the
4988# listener interval for power state events to the given value.
4989# This option enhances the internal lifecycle notifications of
4990# instances that reboot themselves. It is unlikely that an operator
4991# has to change this value.
4992#
4993# Possible values:
4994#
4995# * Time in seconds (Default: 2).
4996# (integer value)
4997# Minimum value: 0
4998#power_state_event_polling_interval = 2
4999
5000#
5001# qemu-img command
5002#
5003# qemu-img is required for some of the image related operations
5004# like converting between different image types. You can get it
5005# from here: (http://qemu.weilnetz.de/) or you can install the
5006# Cloudbase OpenStack Hyper-V Compute Driver
5007# (https://cloudbase.it/openstack-hyperv-driver/) which automatically
5008# sets the proper path for this config option. You can either give the
5009# full path of qemu-img.exe or set its path in the PATH environment
5010# variable and leave this option to the default value.
5011#
5012# Possible values:
5013#
5014# * Name of the qemu-img executable, in case it is in the same
5015# directory as the nova-compute service or its path is in the
5016# PATH environment variable (Default).
5017# * Path of qemu-img command (DRIVELETTER:\PATH\TO\QEMU-IMG\COMMAND).
5018#
5019# Related options:
5020#
5021# * If the config_drive_cdrom option is False, qemu-img will be used to
5022# convert the ISO to a VHD, otherwise the configuration drive will
5023# remain an ISO. To use configuration drive with Hyper-V, you must
5024# set the mkisofs_cmd value to the full path to an mkisofs.exe
5025# installation.
5026# (string value)
5027#qemu_img_cmd = qemu-img.exe
5028
5029#
5030# External virtual switch name
5031#
5032# The Hyper-V Virtual Switch is a software-based layer-2 Ethernet
5033# network switch that is available with the installation of the
5034# Hyper-V server role. The switch includes programmatically managed
5035# and extensible capabilities to connect virtual machines to both
5036# virtual networks and the physical network. In addition, Hyper-V
5037# Virtual Switch provides policy enforcement for security, isolation,
5038# and service levels. The vSwitch represented by this config option
5039# must be an external one (not internal or private).
5040#
5041# Possible values:
5042#
5043# * If not provided, the first of a list of available vswitches
5044# is used. This list is queried using WQL.
5045# * Virtual switch name.
5046# (string value)
5047#vswitch_name = <None>
5048
5049#
5050# Wait soft reboot seconds
5051#
5052# Number of seconds to wait for instance to shut down after soft
5053# reboot request is made. We fall back to hard reboot if instance
5054# does not shutdown within this window.
5055#
5056# Possible values:
5057#
5058# * Time in seconds (Default: 60).
5059# (integer value)
5060# Minimum value: 0
5061#wait_soft_reboot_seconds = 60
5062
5063#
5064# Configuration drive cdrom
5065#
5066# OpenStack can be configured to write instance metadata to
5067# a configuration drive, which is then attached to the
5068# instance before it boots. The configuration drive can be
5069# attached as a disk drive (default) or as a CD drive.
5070#
5071# Possible values:
5072#
5073# * True: Attach the configuration drive image as a CD drive.
5074# * False: Attach the configuration drive image as a disk drive (Default).
5075#
5076# Related options:
5077#
5078# * This option is meaningful with force_config_drive option set to 'True'
5079# or when the REST API call to create an instance will have
5080# '--config-drive=True' flag.
5081# * config_drive_format option must be set to 'iso9660' in order to use
5082# CD drive as the configuration drive image.
5083# * To use configuration drive with Hyper-V, you must set the
5084# mkisofs_cmd value to the full path to an mkisofs.exe installation.
5085# Additionally, you must set the qemu_img_cmd value to the full path
5086# to an qemu-img command installation.
5087# * You can configure the Compute service to always create a configuration
5088# drive by setting the force_config_drive option to 'True'.
5089# (boolean value)
5090config_drive_cdrom = {{ compute.get('config_drive', {}).get('cdrom', False)|lower }}
5091
5092#
5093# Configuration drive inject password
5094#
5095# Enables setting the admin password in the configuration drive image.
5096#
5097# Related options:
5098#
5099# * This option is meaningful when used with other options that enable
5100# configuration drive usage with Hyper-V, such as force_config_drive.
5101# * Currently, the only accepted config_drive_format is 'iso9660'.
5102# (boolean value)
5103config_drive_inject_password = {{ compute.get('config_drive', {}).get('inject_password', False)|lower }}
5104
5105#
5106# Volume attach retry count
5107#
5108# The number of times to retry attaching a volume. Volume attachment
5109# is retried until success or the given retry count is reached.
5110#
5111# Possible values:
5112#
5113# * Positive integer values (Default: 10).
5114#
5115# Related options:
5116#
5117# * Time interval between attachment attempts is declared with
5118# volume_attach_retry_interval option.
5119# (integer value)
5120# Minimum value: 0
5121#volume_attach_retry_count = 10
5122
5123#
5124# Volume attach retry interval
5125#
5126# Interval between volume attachment attempts, in seconds.
5127#
5128# Possible values:
5129#
5130# * Time in seconds (Default: 5).
5131#
5132# Related options:
5133#
5134# * This options is meaningful when volume_attach_retry_count
5135# is greater than 1.
5136# * The retry loop runs with volume_attach_retry_count and
5137# volume_attach_retry_interval configuration options.
5138# (integer value)
5139# Minimum value: 0
5140#volume_attach_retry_interval = 5
5141
5142#
5143# Enable RemoteFX feature
5144#
5145# This requires at least one DirectX 11 capable graphics adapter for
5146# Windows / Hyper-V Server 2012 R2 or newer and RDS-Virtualization
5147# feature has to be enabled.
5148#
5149# Instances with RemoteFX can be requested with the following flavor
5150# extra specs:
5151#
5152# **os:resolution**. Guest VM screen resolution size. Acceptable values::
5153#
5154# 1024x768, 1280x1024, 1600x1200, 1920x1200, 2560x1600, 3840x2160
5155#
5156# ``3840x2160`` is only available on Windows / Hyper-V Server 2016.
5157#
5158# **os:monitors**. Guest VM number of monitors. Acceptable values::
5159#
5160# [1, 4] - Windows / Hyper-V Server 2012 R2
5161# [1, 8] - Windows / Hyper-V Server 2016
5162#
5163# **os:vram**. Guest VM VRAM amount. Only available on
5164# Windows / Hyper-V Server 2016. Acceptable values::
5165#
5166# 64, 128, 256, 512, 1024
5167# (boolean value)
5168#enable_remotefx = false
5169
5170#
5171# Use multipath connections when attaching iSCSI or FC disks.
5172#
5173# This requires the Multipath IO Windows feature to be enabled. MPIO must be
5174# configured to claim such devices.
5175# (boolean value)
5176#use_multipath_io = false
5177
5178#
5179# List of iSCSI initiators that will be used for estabilishing iSCSI sessions.
5180#
5181# If none are specified, the Microsoft iSCSI initiator service will choose the
5182# initiator.
5183# (list value)
5184#iscsi_initiator_list =
5185
5186{% if compute.ironic is defined -%}
5187[ironic]
5188#
5189# Configuration options for Ironic driver (Bare Metal).
5190# If using the Ironic driver following options must be set:
5191# * auth_type
5192# * auth_url
5193# * project_name
5194# * username
5195# * password
5196# * project_domain_id or project_domain_name
5197# * user_domain_id or user_domain_name
5198
5199#
5200# From nova.conf
5201#
5202
5203# DEPRECATED: URL override for the Ironic API endpoint. (uri value)
5204# This option is deprecated for removal.
5205# Its value may be silently ignored in the future.
5206# Reason: Endpoint lookup uses the service catalog via common keystoneauth1
5207# Adapter configuration options. In the current release, api_endpoint will
5208# override this behavior, but will be ignored and/or removed in a future
5209# release. To achieve the same result, use the endpoint_override option instead.
5210api_endpoint = {{ compute.ironic.get('protocol', 'http') }}://{{ compute.ironic.host }}:{{ compute.ironic.port }}
5211
5212#
5213# The number of times to retry when a request conflicts.
5214# If set to 0, only try once, no retries.
5215#
5216# Related options:
5217#
5218# * api_retry_interval
5219# (integer value)
5220# Minimum value: 0
5221#api_max_retries = 60
5222
5223#
5224# The number of seconds to wait before retrying the request.
5225#
5226# Related options:
5227#
5228# * api_max_retries
5229# (integer value)
5230# Minimum value: 0
5231#api_retry_interval = 2
5232
5233# Timeout (seconds) to wait for node serial console state changed. Set to 0 to
5234# disable timeout. (integer value)
5235# Minimum value: 0
5236#serial_console_state_timeout = 10
5237
5238# PEM encoded Certificate Authority to use when verifying HTTPs connections.
5239# (string value)
5240#cafile = <None>
5241{%- if compute.ironic.get('protocol', 'http') == 'https' %}
5242cafile = {{ compute.identity.get('cacert_file', compute.cacert_file) }}
5243{%- endif %}
5244
5245# PEM encoded client certificate cert file (string value)
5246#certfile = <None>
5247
5248# PEM encoded client certificate key file (string value)
5249#keyfile = <None>
5250
5251# Verify HTTPS connections. (boolean value)
5252#insecure = false
5253
5254# Timeout value for http requests (integer value)
5255#timeout = <None>
5256
5257# Collect per-API call timing information. (boolean value)
5258#collect_timing = false
5259
5260# Log requests to multiple loggers. (boolean value)
5261#split_loggers = false
5262
5263# Authentication type to load (string value)
5264# Deprecated group/name - [ironic]/auth_plugin
5265auth_type = {{ compute.ironic.auth_type }}
5266
5267# Config Section from which to load plugin specific options (string value)
5268#auth_section = <None>
5269
5270# Authentication URL (string value)
5271auth_url = {{ compute.identity.get('protocol', 'http') }}://{{ compute.identity.host }}:{{ compute.identity.port }}/v3
5272
5273# Scope for system operations (string value)
5274#system_scope = <None>
5275
5276# Domain ID to scope to (string value)
5277#domain_id = <None>
5278
5279# Domain name to scope to (string value)
5280#domain_name = <None>
5281
5282# Project ID to scope to (string value)
5283#project_id = <None>
5284
5285# Project name to scope to (string value)
5286project_name = {{ compute.identity.tenant }}
5287
5288# Domain ID containing project (string value)
5289#project_domain_id = <None>
5290
5291# Domain name containing project (string value)
5292project_domain_name = {{ compute.ironic.project_domain_name }}
5293
5294# Trust ID (string value)
5295#trust_id = <None>
5296
5297# User ID (string value)
5298#user_id = <None>
5299
5300# Username (string value)
5301# Deprecated group/name - [ironic]/user_name
5302username = {{ compute.ironic.user }}
5303
5304# User's domain id (string value)
5305#user_domain_id = <None>
5306
5307# User's domain name (string value)
5308user_domain_name = {{ compute.ironic.user_domain_name }}
5309
5310# User's password (string value)
5311password = {{ compute.ironic.password }}
5312
5313# The default service_type for endpoint URL discovery. (string value)
5314#service_type = baremetal
5315
5316# The default service_name for endpoint URL discovery. (string value)
5317#service_name = <None>
5318
5319# List of interfaces, in order of preference, for endpoint URL. (list value)
5320#valid_interfaces = internal,public
5321
5322# The default region_name for endpoint URL discovery. (string value)
5323#region_name = <None>
5324
5325# Always use this endpoint URL for requests for this client. NOTE: The
5326# unversioned endpoint should be specified here; to request a particular API
5327# version, use the `version`, `min-version`, and/or `max-version` options.
5328# (string value)
5329# Deprecated group/name - [ironic]/api_endpoint
5330#endpoint_override = <None>
5331{%- endif %}
5332
5333
5334[key_manager]
5335
5336#
5337# From nova.conf
5338#
5339
5340#
5341# Fixed key returned by key manager, specified in hex.
5342#
5343# Possible values:
5344#
5345# * Empty string or a key in hex value
5346# (string value)
5347#fixed_key = <None>
5348
5349# Specify the key manager implementation. Options are "barbican" and "vault".
5350# Default is "barbican". Will support the values earlier set using
5351# [key_manager]/api_class for some time. (string value)
5352# Deprecated group/name - [key_manager]/api_class
5353#backend = barbican
5354
5355# The type of authentication credential to create. Possible values are 'token',
5356# 'password', 'keystone_token', and 'keystone_password'. Required if no context
5357# is passed to the credential factory. (string value)
5358#auth_type = <None>
5359
5360# Token for authentication. Required for 'token' and 'keystone_token' auth_type
5361# if no context is passed to the credential factory. (string value)
5362#token = <None>
5363
5364# Username for authentication. Required for 'password' auth_type. Optional for
5365# the 'keystone_password' auth_type. (string value)
5366#username = <None>
5367
5368# Password for authentication. Required for 'password' and 'keystone_password'
5369# auth_type. (string value)
5370#password = <None>
5371
5372# Use this endpoint to connect to Keystone. (string value)
5373#auth_url = <None>
5374
5375# User ID for authentication. Optional for 'keystone_token' and
5376# 'keystone_password' auth_type. (string value)
5377#user_id = <None>
5378
5379# User's domain ID for authentication. Optional for 'keystone_token' and
5380# 'keystone_password' auth_type. (string value)
5381#user_domain_id = <None>
5382
5383# User's domain name for authentication. Optional for 'keystone_token' and
5384# 'keystone_password' auth_type. (string value)
5385#user_domain_name = <None>
5386
5387# Trust ID for trust scoping. Optional for 'keystone_token' and
5388# 'keystone_password' auth_type. (string value)
5389#trust_id = <None>
5390
5391# Domain ID for domain scoping. Optional for 'keystone_token' and
5392# 'keystone_password' auth_type. (string value)
5393#domain_id = <None>
5394
5395# Domain name for domain scoping. Optional for 'keystone_token' and
5396# 'keystone_password' auth_type. (string value)
5397#domain_name = <None>
5398
5399# Project ID for project scoping. Optional for 'keystone_token' and
5400# 'keystone_password' auth_type. (string value)
5401#project_id = <None>
5402
5403# Project name for project scoping. Optional for 'keystone_token' and
5404# 'keystone_password' auth_type. (string value)
5405#project_name = <None>
5406
5407# Project's domain ID for project. Optional for 'keystone_token' and
5408# 'keystone_password' auth_type. (string value)
5409#project_domain_id = <None>
5410
5411# Project's domain name for project. Optional for 'keystone_token' and
5412# 'keystone_password' auth_type. (string value)
5413#project_domain_name = <None>
5414
5415# Allow fetching a new token if the current one is going to expire. Optional for
5416# 'keystone_token' and 'keystone_password' auth_type. (boolean value)
5417#reauthenticate = true
5418
5419
5420[keystone]
5421# Configuration options for the identity service
5422
5423#
5424# From nova.conf
5425#
5426
5427# PEM encoded Certificate Authority to use when verifying HTTPs connections.
5428# (string value)
5429#cafile = <None>
5430
5431# PEM encoded client certificate cert file (string value)
5432#certfile = <None>
5433
5434# PEM encoded client certificate key file (string value)
5435#keyfile = <None>
5436
5437# Verify HTTPS connections. (boolean value)
5438#insecure = false
5439
5440# Timeout value for http requests (integer value)
5441#timeout = <None>
5442
5443# Collect per-API call timing information. (boolean value)
5444#collect_timing = false
5445
5446# Log requests to multiple loggers. (boolean value)
5447#split_loggers = false
5448
5449# The default service_type for endpoint URL discovery. (string value)
5450#service_type = identity
5451
5452# The default service_name for endpoint URL discovery. (string value)
5453#service_name = <None>
5454
5455# List of interfaces, in order of preference, for endpoint URL. (list value)
5456#valid_interfaces = internal,public
5457
5458# The default region_name for endpoint URL discovery. (string value)
5459#region_name = <None>
5460
5461# Always use this endpoint URL for requests for this client. NOTE: The
5462# unversioned endpoint should be specified here; to request a particular API
5463# version, use the `version`, `min-version`, and/or `max-version` options.
5464# (string value)
5465#endpoint_override = <None>
5466
5467
5468[keystone_authtoken]
5469{%- set _data = compute.identity %}
5470{%- if 'cacert_file' not in _data.keys() %}{% do _data.update({'cacert_file': compute.cacert_file}) %}{% endif %}
5471{%- set auth_type = _data.get('auth_type', 'password') %}
5472{%- if compute.get('cache',{}).members is defined and 'cache' not in _data.keys() %}
5473{% do _data.update({'cache': compute.cache}) %}
5474{%- endif %}
5475{%- include "oslo_templates/files/" ~ compute.version ~ "/keystonemiddleware/_auth_token.conf" %}
5476{%- include "oslo_templates/files/" ~ compute.version ~ "/keystoneauth/_type_" ~ auth_type ~ ".conf" %}
5477
5478
5479[libvirt]
5480#
5481# Libvirt options allows cloud administrator to configure related
5482# libvirt hypervisor driver to be used within an OpenStack deployment.
5483#
5484# Almost all of the libvirt config options are influence by ``virt_type`` config
5485# which describes the virtualization type (or so called domain type) libvirt
5486# should use for specific features such as live migration, snapshot.
5487
5488#
5489# From nova.conf
5490#
5491
5492#
5493# The ID of the image to boot from to rescue data from a corrupted instance.
5494#
5495# If the rescue REST API operation doesn't provide an ID of an image to
5496# use, the image which is referenced by this ID is used. If this
5497# option is not set, the image from the instance is used.
5498#
5499# Possible values:
5500#
5501# * An ID of an image or nothing. If it points to an *Amazon Machine
5502# Image* (AMI), consider to set the config options ``rescue_kernel_id``
5503# and ``rescue_ramdisk_id`` too. If nothing is set, the image of the instance
5504# is used.
5505#
5506# Related options:
5507#
5508# * ``rescue_kernel_id``: If the chosen rescue image allows the separate
5509# definition of its kernel disk, the value of this option is used,
5510# if specified. This is the case when *Amazon*'s AMI/AKI/ARI image
5511# format is used for the rescue image.
5512# * ``rescue_ramdisk_id``: If the chosen rescue image allows the separate
5513# definition of its RAM disk, the value of this option is used if,
5514# specified. This is the case when *Amazon*'s AMI/AKI/ARI image
5515# format is used for the rescue image.
5516# (string value)
5517#rescue_image_id = <None>
5518
5519#
5520# The ID of the kernel (AKI) image to use with the rescue image.
5521#
5522# If the chosen rescue image allows the separate definition of its kernel
5523# disk, the value of this option is used, if specified. This is the case
5524# when *Amazon*'s AMI/AKI/ARI image format is used for the rescue image.
5525#
5526# Possible values:
5527#
5528# * An ID of an kernel image or nothing. If nothing is specified, the kernel
5529# disk from the instance is used if it was launched with one.
5530#
5531# Related options:
5532#
5533# * ``rescue_image_id``: If that option points to an image in *Amazon*'s
5534# AMI/AKI/ARI image format, it's useful to use ``rescue_kernel_id`` too.
5535# (string value)
5536#rescue_kernel_id = <None>
5537
5538#
5539# The ID of the RAM disk (ARI) image to use with the rescue image.
5540#
5541# If the chosen rescue image allows the separate definition of its RAM
5542# disk, the value of this option is used, if specified. This is the case
5543# when *Amazon*'s AMI/AKI/ARI image format is used for the rescue image.
5544#
5545# Possible values:
5546#
5547# * An ID of a RAM disk image or nothing. If nothing is specified, the RAM
5548# disk from the instance is used if it was launched with one.
5549#
5550# Related options:
5551#
5552# * ``rescue_image_id``: If that option points to an image in *Amazon*'s
5553# AMI/AKI/ARI image format, it's useful to use ``rescue_ramdisk_id`` too.
5554# (string value)
5555#rescue_ramdisk_id = <None>
5556
5557#
5558# Describes the virtualization type (or so called domain type) libvirt should
5559# use.
5560#
5561# The choice of this type must match the underlying virtualization strategy
5562# you have chosen for this host.
5563#
5564# Possible values:
5565#
5566# * See the predefined set of case-sensitive values.
5567#
5568# Related options:
5569#
5570# * ``connection_uri``: depends on this
5571# * ``disk_prefix``: depends on this
5572# * ``cpu_mode``: depends on this
5573# * ``cpu_model``: depends on this
5574# (string value)
5575# Possible values:
5576# kvm - <No description provided>
5577# lxc - <No description provided>
5578# qemu - <No description provided>
5579# uml - <No description provided>
5580# xen - <No description provided>
5581# parallels - <No description provided>
5582{%- if compute.libvirt.virt_type is defined %}
5583virt_type = {{ compute.libvirt.virt_type }}
5584{%- else %}
5585virt_type = kvm
5586{%- endif%}
5587
5588#
5589# Overrides the default libvirt URI of the chosen virtualization type.
5590#
5591# If set, Nova will use this URI to connect to libvirt.
5592#
5593# Possible values:
5594#
5595# * An URI like ``qemu:///system`` or ``xen+ssh://oirase/`` for example.
5596# This is only necessary if the URI differs to the commonly known URIs
5597# for the chosen virtualization type.
5598#
5599# Related options:
5600#
5601# * ``virt_type``: Influences what is used as default value here.
5602# (string value)
5603#connection_uri =
5604{%- if compute.get('libvirt', {}).uri is defined %}
5605connection_uri = {{ compute.libvirt.uri }}
5606{%- endif %}
5607
5608#
5609# Determines the way how the file system is chosen to inject data into it.
5610#
5611# *libguestfs* will be used a first solution to inject data. If that's not
5612# available on the host, the image will be locally mounted on the host as a
5613# fallback solution. If libguestfs is not able to determine the root partition
5614# (because there are more or less than one root partition) or cannot mount the
5615# file system it will result in an error and the instance won't be boot.
5616#
5617# Possible values:
5618#
5619# * -2 => disable the injection of data.
5620# * -1 => find the root partition with the file system to mount with libguestfs
5621# * 0 => The image is not partitioned
5622# * >0 => The number of the partition to use for the injection
5623#
5624# *Linux* distribution guest only.
5625#
5626# Related options:
5627#
5628# * ``inject_key``: If this option allows the injection of a SSH key it depends
5629# on value greater or equal to -1 for ``inject_partition``.
5630# * ``inject_password``: If this option allows the injection of an admin
5631# password
5632# it depends on value greater or equal to -1 for ``inject_partition``.
5633# * ``guestfs`` You can enable the debug log level of libguestfs with this
5634# config option. A more verbose output will help in debugging issues.
5635# * ``virt_type``: If you use ``lxc`` as virt_type it will be treated as a
5636# single partition image
5637# (integer value)
5638# Minimum value: -2
5639inject_partition = {{ compute.libvirt.inject_partition }}
5640
5641# DEPRECATED:
5642# Enable a mouse cursor within a graphical VNC or SPICE sessions.
5643#
5644# This will only be taken into account if the VM is fully virtualized and VNC
5645# and/or SPICE is enabled. If the node doesn't support a graphical framebuffer,
5646# then it is valid to set this to False.
5647#
5648# Related options:
5649#
5650# * ``[vnc]enabled``: If VNC is enabled, ``use_usb_tablet`` will have an effect.
5651# * ``[spice]enabled`` + ``[spice].agent_enabled``: If SPICE is enabled and the
5652# spice agent is disabled, the config value of ``use_usb_tablet`` will have
5653# an effect.
5654# (boolean value)
5655# This option is deprecated for removal since 14.0.0.
5656# Its value may be silently ignored in the future.
5657# Reason: This option is being replaced by the 'pointer_model' option.
5658#use_usb_tablet = true
5659
5660{%- if compute.get('ceph', {}).get('ephemeral', False) %}
5661images_type = rbd
5662
5663# The RADOS pool in which rbd volumes are stored (string value)
5664images_rbd_pool = {{ compute.ceph.rbd_pool }}
5665
5666# Path to the ceph configuration file to use (string value)
5667images_rbd_ceph_conf = /etc/ceph/ceph.conf
5668
5669#
5670# The RADOS client name for accessing rbd(RADOS Block Devices) volumes.
5671#
5672# Libvirt will refer to this user when connecting and authenticating with
5673# the Ceph RBD server.
5674# (string value)
5675rbd_user = {{ compute.ceph.rbd_user }}
5676
5677#
5678# The libvirt UUID of the secret for the rbd_user volumes.
5679# (string value)
5680rbd_secret_uuid = {{ compute.ceph.secret_uuid }}
5681
5682inject_password = false
5683inject_key = false
5684{%- elif compute.get('lvm', {}).get('ephemeral', False) %}
5685images_type = lvm
5686
5687#
5688# LVM Volume Group that is used for VM images, when you specify images_type=lvm
5689#
5690# Related options:
5691#
5692# * images_type
5693# (string value)
5694images_volume_group = {{ compute.lvm.images_volume_group }}
5695
5696{%- if compute.lvm.volume_clear is defined %}
5697#
5698# Method used to wipe ephemeral disks when they are deleted. Only takes effect
5699# if LVM is set as backing storage.
5700#
5701# Possible values:
5702#
5703# * none - do not wipe deleted volumes
5704# * zero - overwrite volumes with zeroes
5705# * shred - overwrite volume repeatedly
5706#
5707# Related options:
5708#
5709# * images_type - must be set to ``lvm``
5710# * volume_clear_size
5711# (string value)
5712# Possible values:
5713# none - <No description provided>
5714# zero - <No description provided>
5715# shred - <No description provided>
5716volume_clear = {{ compute.lvm.volume_clear }}
5717{%- endif %}
5718
5719{%- if compute.lvm.volume_clear_size is defined %}
5720#
5721# Size of area in MiB, counting from the beginning of the allocated volume,
5722# that will be cleared using method set in ``volume_clear`` option.
5723#
5724# Possible values:
5725#
5726# * 0 - clear whole volume
5727# * >0 - clear specified amount of MiB
5728#
5729# Related options:
5730#
5731# * images_type - must be set to ``lvm``
5732# * volume_clear - must be set and the value must be different than ``none``
5733# for this option to have any impact
5734# (integer value)
5735# Minimum value: 0
5736volume_clear_size = {{ compute.lvm.volume_clear_size }}
5737{%- endif %}
5738{%- else %}
5739#
5740# Allow the injection of an admin password for instance only at ``create`` and
5741# ``rebuild`` process.
5742#
5743# There is no agent needed within the image to do this. If *libguestfs* is
5744# available on the host, it will be used. Otherwise *nbd* is used. The file
5745# system of the image will be mounted and the admin password, which is provided
5746# in the REST API call will be injected as password for the root user. If no
5747# root user is available, the instance won't be launched and an error is thrown.
5748# Be aware that the injection is *not* possible when the instance gets launched
5749# from a volume.
5750#
5751# *Linux* distribution guest only.
5752#
5753# Possible values:
5754#
5755# * True: Allows the injection.
5756# * False: Disallows the injection. Any via the REST API provided admin password
5757# will be silently ignored.
5758#
5759# Related options:
5760#
5761# * ``inject_partition``: That option will decide about the discovery and usage
5762# of the file system. It also can disable the injection at all.
5763# (boolean value)
5764{%- if compute.libvirt.get('inject_partition', '-2')|string == '-2' %}
5765inject_password = false
5766{%- else %}
5767inject_password = {{ compute.libvirt.inject_password }}
5768{%- endif %}
5769
5770#
5771# Allow the injection of an SSH key at boot time.
5772#
5773# There is no agent needed within the image to do this. If *libguestfs* is
5774# available on the host, it will be used. Otherwise *nbd* is used. The file
5775# system of the image will be mounted and the SSH key, which is provided
5776# in the REST API call will be injected as SSH key for the root user and
5777# appended to the ``authorized_keys`` of that user. The SELinux context will
5778# be set if necessary. Be aware that the injection is *not* possible when the
5779# instance gets launched from a volume.
5780#
5781# This config option will enable directly modifying the instance disk and does
5782# not affect what cloud-init may do using data from config_drive option or the
5783# metadata service.
5784#
5785# *Linux* distribution guest only.
5786#
5787# Related options:
5788#
5789# * ``inject_partition``: That option will decide about the discovery and usage
5790# of the file system. It also can disable the injection at all.
5791# (boolean value)
5792inject_key = true
5793{%- endif %}
5794
5795#
5796# The IP address or hostname to be used as the target for live migration
5797# traffic.
5798#
5799# If this option is set to None, the hostname of the migration target compute
5800# node will be used.
5801#
5802# This option is useful in environments where the live-migration traffic can
5803# impact the network plane significantly. A separate network for live-migration
5804# traffic can then use this config option and avoids the impact on the
5805# management network.
5806#
5807# Possible values:
5808#
5809# * A valid IP address or hostname, else None.
5810#
5811# Related options:
5812#
5813# * ``live_migration_tunnelled``: The live_migration_inbound_addr value is
5814# ignored if tunneling is enabled.
5815# (string value)
5816#live_migration_inbound_addr = <None>
5817{%- if compute.libvirt.migration_inbound_addr is defined %}
5818live_migration_inbound_addr = {{ compute.libvirt.migration_inbound_addr }}
5819{%- endif %}
5820
5821# DEPRECATED:
5822# Live migration target URI to use.
5823#
5824# Override the default libvirt live migration target URI (which is dependent
5825# on virt_type). Any included "%s" is replaced with the migration target
5826# hostname.
5827#
5828# If this option is set to None (which is the default), Nova will automatically
5829# generate the `live_migration_uri` value based on only 4 supported `virt_type`
5830# in following list:
5831#
5832# * 'kvm': 'qemu+tcp://%s/system'
5833# * 'qemu': 'qemu+tcp://%s/system'
5834# * 'xen': 'xenmigr://%s/system'
5835# * 'parallels': 'parallels+tcp://%s/system'
5836#
5837# Related options:
5838#
5839# * ``live_migration_inbound_addr``: If ``live_migration_inbound_addr`` value
5840# is not None and ``live_migration_tunnelled`` is False, the ip/hostname
5841# address of target compute node is used instead of ``live_migration_uri`` as
5842# the uri for live migration.
5843# * ``live_migration_scheme``: If ``live_migration_uri`` is not set, the scheme
5844# used for live migration is taken from ``live_migration_scheme`` instead.
5845# (string value)
5846# This option is deprecated for removal since 15.0.0.
5847# Its value may be silently ignored in the future.
5848# Reason:
5849# live_migration_uri is deprecated for removal in favor of two other options
5850# that
5851# allow to change live migration scheme and target URI:
5852# ``live_migration_scheme``
5853# and ``live_migration_inbound_addr`` respectively.
5854#live_migration_uri = <None>
5855
5856#
5857# URI scheme used for live migration.
5858#
5859# Override the default libvirt live migration scheme (which is dependent on
5860# virt_type). If this option is set to None, nova will automatically choose a
5861# sensible default based on the hypervisor. It is not recommended that you
5862# change
5863# this unless you are very sure that hypervisor supports a particular scheme.
5864#
5865# Related options:
5866#
5867# * ``virt_type``: This option is meaningful only when ``virt_type`` is set to
5868# `kvm` or `qemu`.
5869# * ``live_migration_uri``: If ``live_migration_uri`` value is not None, the
5870# scheme used for live migration is taken from ``live_migration_uri`` instead.
5871# (string value)
5872{%- if compute.libvirt.tls.get('enabled', False) %}
5873live_migration_scheme = tls
5874{%- endif %}
5875
5876#
5877# Enable tunnelled migration.
5878#
5879# This option enables the tunnelled migration feature, where migration data is
5880# transported over the libvirtd connection. If enabled, we use the
5881# VIR_MIGRATE_TUNNELLED migration flag, avoiding the need to configure
5882# the network to allow direct hypervisor to hypervisor communication.
5883# If False, use the native transport. If not set, Nova will choose a
5884# sensible default based on, for example the availability of native
5885# encryption support in the hypervisor. Enabling this option will definitely
5886# impact performance massively.
5887#
5888# Note that this option is NOT compatible with use of block migration.
5889#
5890# Related options:
5891#
5892# * ``live_migration_inbound_addr``: The live_migration_inbound_addr value is
5893# ignored if tunneling is enabled.
5894# (boolean value)
5895#live_migration_tunnelled = false
5896{%- if compute.libvirt.live_migration_tunnelled is defined %}
5897live_migration_tunnelled = {{ compute.libvirt.live_migration_tunnelled }}
5898{%- endif %}
5899
5900#
5901# Maximum bandwidth(in MiB/s) to be used during migration.
5902#
5903# If set to 0, the hypervisor will choose a suitable default. Some hypervisors
5904# do not support this feature and will return an error if bandwidth is not 0.
5905# Please refer to the libvirt documentation for further details.
5906# (integer value)
5907#live_migration_bandwidth = 0
5908
5909#
5910# Maximum permitted downtime, in milliseconds, for live migration
5911# switchover.
5912#
5913# Will be rounded up to a minimum of 100ms. You can increase this value
5914# if you want to allow live-migrations to complete faster, or avoid
5915# live-migration timeout errors by allowing the guest to be paused for
5916# longer during the live-migration switch over.
5917#
5918# Related options:
5919#
5920# * live_migration_completion_timeout
5921# (integer value)
5922# Minimum value: 100
5923#live_migration_downtime = 500
5924
5925#
5926# Number of incremental steps to reach max downtime value.
5927#
5928# Will be rounded up to a minimum of 3 steps.
5929# (integer value)
5930# Minimum value: 3
5931#live_migration_downtime_steps = 10
5932
5933#
5934# Time to wait, in seconds, between each step increase of the migration
5935# downtime.
5936#
5937# Minimum delay is 3 seconds. Value is per GiB of guest RAM + disk to be
5938# transferred, with lower bound of a minimum of 2 GiB per device.
5939# (integer value)
5940# Minimum value: 3
5941#live_migration_downtime_delay = 75
5942
5943#
5944# Time to wait, in seconds, for migration to successfully complete transferring
5945# data before aborting the operation.
5946#
5947# Value is per GiB of guest RAM + disk to be transferred, with lower bound of
5948# a minimum of 2 GiB. Should usually be larger than downtime delay * downtime
5949# steps. Set to 0 to disable timeouts.
5950#
5951# Related options:
5952#
5953# * live_migration_downtime
5954# * live_migration_downtime_steps
5955# * live_migration_downtime_delay
5956# (integer value)
5957# Note: This option can be changed without restarting.
5958#live_migration_completion_timeout = 800
5959
5960# DEPRECATED:
5961# Time to wait, in seconds, for migration to make forward progress in
5962# transferring data before aborting the operation.
5963#
5964# Set to 0 to disable timeouts.
5965#
5966# This is deprecated, and now disabled by default because we have found serious
5967# bugs in this feature that caused false live-migration timeout failures. This
5968# feature will be removed or replaced in a future release.
5969# (integer value)
5970# Note: This option can be changed without restarting.
5971# This option is deprecated for removal.
5972# Its value may be silently ignored in the future.
5973# Reason: Serious bugs found in this feature.
5974#live_migration_progress_timeout = 0
5975
5976#
5977# This option allows nova to switch an on-going live migration to post-copy
5978# mode, i.e., switch the active VM to the one on the destination node before the
5979# migration is complete, therefore ensuring an upper bound on the memory that
5980# needs to be transferred. Post-copy requires libvirt>=1.3.3 and QEMU>=2.5.0.
5981#
5982# When permitted, post-copy mode will be automatically activated if a
5983# live-migration memory copy iteration does not make percentage increase of at
5984# least 10% over the last iteration.
5985#
5986# The live-migration force complete API also uses post-copy when permitted. If
5987# post-copy mode is not available, force complete falls back to pausing the VM
5988# to ensure the live-migration operation will complete.
5989#
5990# When using post-copy mode, if the source and destination hosts loose network
5991# connectivity, the VM being live-migrated will need to be rebooted. For more
5992# details, please see the Administration guide.
5993#
5994# Related options:
5995#
5996# * live_migration_permit_auto_converge
5997# (boolean value)
5998#live_migration_permit_post_copy = false
5999
6000#
6001# This option allows nova to start live migration with auto converge on.
6002#
6003# Auto converge throttles down CPU if a progress of on-going live migration
6004# is slow. Auto converge will only be used if this flag is set to True and
6005# post copy is not permitted or post copy is unavailable due to the version
6006# of libvirt and QEMU in use.
6007#
6008# Related options:
6009#
6010# * live_migration_permit_post_copy
6011# (boolean value)
6012#live_migration_permit_auto_converge = false
6013{%- if compute.libvirt.live_migration_permit_auto_converge is defined %}
6014live_migration_permit_auto_converge = {{ compute.libvirt.live_migration_permit_auto_converge|lower }}
6015{%- endif %}
6016
6017#
6018# Determine the snapshot image format when sending to the image service.
6019#
6020# If set, this decides what format is used when sending the snapshot to the
6021# image service.
6022# If not set, defaults to same type as source image.
6023#
6024# Possible values:
6025#
6026# * ``raw``: RAW disk format
6027# * ``qcow2``: KVM default disk format
6028# * ``vmdk``: VMWare default disk format
6029# * ``vdi``: VirtualBox default disk format
6030# * If not set, defaults to same type as source image.
6031# (string value)
6032# Possible values:
6033# raw - <No description provided>
6034# qcow2 - <No description provided>
6035# vmdk - <No description provided>
6036# vdi - <No description provided>
6037#snapshot_image_format = <None>
6038
6039#
6040# Override the default disk prefix for the devices attached to an instance.
6041#
6042# If set, this is used to identify a free disk device name for a bus.
6043#
6044# Possible values:
6045#
6046# * Any prefix which will result in a valid disk device name like 'sda' or 'hda'
6047# for example. This is only necessary if the device names differ to the
6048# commonly known device name prefixes for a virtualization type such as: sd,
6049# xvd, uvd, vd.
6050#
6051# Related options:
6052#
6053# * ``virt_type``: Influences which device type is used, which determines
6054# the default disk prefix.
6055# (string value)
6056#disk_prefix = <None>
6057
6058# Number of seconds to wait for instance to shut down after soft reboot request
6059# is made. We fall back to hard reboot if instance does not shutdown within this
6060# window. (integer value)
6061#wait_soft_reboot_seconds = 120
6062
6063#
6064# Is used to set the CPU mode an instance should have.
6065#
6066# If virt_type="kvm|qemu", it will default to "host-model", otherwise it will
6067# default to "none".
6068#
6069# Possible values:
6070#
6071# * ``host-model``: Clones the host CPU feature flags
6072# * ``host-passthrough``: Use the host CPU model exactly
6073# * ``custom``: Use a named CPU model
6074# * ``none``: Don't set a specific CPU model. For instances with
6075# ``virt_type`` as KVM/QEMU, the default CPU model from QEMU will be used,
6076# which provides a basic set of CPU features that are compatible with most
6077# hosts.
6078#
6079# Related options:
6080#
6081# * ``cpu_model``: This should be set ONLY when ``cpu_mode`` is set to
6082# ``custom``. Otherwise, it would result in an error and the instance launch
6083# will fail.
6084# (string value)
6085# Possible values:
6086# host-model - <No description provided>
6087# host-passthrough - <No description provided>
6088# custom - <No description provided>
6089# none - <No description provided>
6090cpu_mode = {{ compute.cpu_mode }}
6091
6092#
6093# Set the name of the libvirt CPU model the instance should use.
6094#
6095# Possible values:
6096#
6097# * The named CPU models listed in ``/usr/share/libvirt/cpu_map.xml``
6098#
6099# Related options:
6100#
6101# * ``cpu_mode``: This should be set to ``custom`` ONLY when you want to
6102# configure (via ``cpu_model``) a specific named CPU model. Otherwise, it
6103# would result in an error and the instance launch will fail.
6104# * ``virt_type``: Only the virtualization types ``kvm`` and ``qemu`` use this.
6105# (string value)
6106#cpu_model = <None>
6107{%- if compute.get('libvirt', {}).cpu_model is defined and compute.cpu_mode == 'custom' %}
6108cpu_model = {{ compute.libvirt.cpu_model }}
6109{%- endif %}
6110
6111#
6112# This allows specifying granular CPU feature flags when configuring CPU
6113# models. For example, to explicitly specify the ``pcid``
6114# (Process-Context ID, an Intel processor feature -- which is now required
6115# to address the guest performance degradation as a result of applying the
6116# "Meltdown" CVE fixes to certain Intel CPU models) flag to the
6117# "IvyBridge" virtual CPU model::
6118#
6119# [libvirt]
6120# cpu_mode = custom
6121# cpu_model = IvyBridge
6122# cpu_model_extra_flags = pcid
6123#
6124# To specify multiple CPU flags (e.g. the Intel ``VMX`` to expose the
6125# virtualization extensions to the guest, or ``pdpe1gb`` to configure 1GB
6126# huge pages for CPU models that do not provide it)::
6127#
6128# [libvirt]
6129# cpu_mode = custom
6130# cpu_model = Haswell-noTSX-IBRS
6131# cpu_model_extra_flags = PCID, VMX, pdpe1gb
6132#
6133# As it can be noticed from above, the ``cpu_model_extra_flags`` config
6134# attribute is case insensitive. And specifying extra flags is valid in
6135# combination with all the three possible values for ``cpu_mode``:
6136# ``custom`` (this also requires an explicit ``cpu_model`` to be
6137# specified), ``host-model``, or ``host-passthrough``. A valid example
6138# for allowing extra CPU flags even for ``host-passthrough`` mode is that
6139# sometimes QEMU may disable certain CPU features -- e.g. Intel's
6140# "invtsc", Invariable Time Stamp Counter, CPU flag. And if you need to
6141# expose that CPU flag to the Nova instance, the you need to explicitly
6142# ask for it.
6143#
6144# The possible values for ``cpu_model_extra_flags`` depends on the CPU
6145# model in use. Refer to ``/usr/share/libvirt/cpu_map.xml`` possible CPU
6146# feature flags for a given CPU model.
6147#
6148# Note that when using this config attribute to set the 'PCID' CPU flag
6149# with the ``custom`` CPU mode, not all virtual (i.e. libvirt / QEMU) CPU
6150# models need it:
6151#
6152# * The only virtual CPU models that include the 'PCID' capability are
6153# Intel "Haswell", "Broadwell", and "Skylake" variants.
6154#
6155# * The libvirt / QEMU CPU models "Nehalem", "Westmere", "SandyBridge",
6156# and "IvyBridge" will _not_ expose the 'PCID' capability by default,
6157# even if the host CPUs by the same name include it. I.e. 'PCID' needs
6158# to be explicitly specified when using the said virtual CPU models.
6159#
6160# The libvirt driver's default CPU mode, ``host-model``, will do the right
6161# thing with respect to handling 'PCID' CPU flag for the guest --
6162# *assuming* you are running updated processor microcode, host and guest
6163# kernel, libvirt, and QEMU. The other mode, ``host-passthrough``, checks
6164# if 'PCID' is available in the hardware, and if so directly passes it
6165# through to the Nova guests. Thus, in context of 'PCID', with either of
6166# these CPU modes (``host-model`` or ``host-passthrough``), there is no
6167# need to use the ``cpu_model_extra_flags``.
6168#
6169# Related options:
6170#
6171# * cpu_mode
6172# * cpu_model
6173# (list value)
6174#cpu_model_extra_flags =
6175
6176# Location where libvirt driver will store snapshots before uploading them to
6177# image service (string value)
6178#snapshots_directory = $instances_path/snapshots
6179
6180# Location where the Xen hvmloader is kept (string value)
6181#xen_hvmloader_path = /usr/lib/xen/boot/hvmloader
6182
6183#
6184# Specific cache modes to use for different disk types.
6185#
6186# For example: file=directsync,block=none,network=writeback
6187#
6188# For local or direct-attached storage, it is recommended that you use
6189# writethrough (default) mode, as it ensures data integrity and has acceptable
6190# I/O performance for applications running in the guest, especially for read
6191# operations. However, caching mode none is recommended for remote NFS storage,
6192# because direct I/O operations (O_DIRECT) perform better than synchronous I/O
6193# operations (with O_SYNC). Caching mode none effectively turns all guest I/O
6194# operations into direct I/O operations on the host, which is the NFS client in
6195# this environment.
6196#
6197# Possible cache modes:
6198#
6199# * default: Same as writethrough.
6200# * none: With caching mode set to none, the host page cache is disabled, but
6201# the disk write cache is enabled for the guest. In this mode, the write
6202# performance in the guest is optimal because write operations bypass the host
6203# page cache and go directly to the disk write cache. If the disk write cache
6204# is battery-backed, or if the applications or storage stack in the guest
6205# transfer data properly (either through fsync operations or file system
6206# barriers), then data integrity can be ensured. However, because the host
6207# page cache is disabled, the read performance in the guest would not be as
6208# good as in the modes where the host page cache is enabled, such as
6209# writethrough mode. Shareable disk devices, like for a multi-attachable block
6210# storage volume, will have their cache mode set to 'none' regardless of
6211# configuration.
6212# * writethrough: writethrough mode is the default caching mode. With
6213# caching set to writethrough mode, the host page cache is enabled, but the
6214# disk write cache is disabled for the guest. Consequently, this caching mode
6215# ensures data integrity even if the applications and storage stack in the
6216# guest do not transfer data to permanent storage properly (either through
6217# fsync operations or file system barriers). Because the host page cache is
6218# enabled in this mode, the read performance for applications running in the
6219# guest is generally better. However, the write performance might be reduced
6220# because the disk write cache is disabled.
6221# * writeback: With caching set to writeback mode, both the host page cache
6222# and the disk write cache are enabled for the guest. Because of this, the
6223# I/O performance for applications running in the guest is good, but the data
6224# is not protected in a power failure. As a result, this caching mode is
6225# recommended only for temporary data where potential data loss is not a
6226# concern.
6227# NOTE: Certain backend disk mechanisms may provide safe writeback cache
6228# semantics. Specifically those that bypass the host page cache, such as
6229# QEMU's integrated RBD driver. Ceph documentation recommends setting this
6230# to writeback for maximum performance while maintaining data safety.
6231# * directsync: Like "writethrough", but it bypasses the host page cache.
6232# * unsafe: Caching mode of unsafe ignores cache transfer operations
6233# completely. As its name implies, this caching mode should be used only for
6234# temporary data where data loss is not a concern. This mode can be useful for
6235# speeding up guest installations, but you should switch to another caching
6236# mode in production environments.
6237# (list value)
6238disk_cachemodes = "{{ compute.get('disk_cachemodes', 'network=writeback,block=none') }}"
6239
6240#
6241# The path to an RNG (Random Number Generator) device that will be used as
6242# the source of entropy on the host. Since libvirt 1.3.4, any path (that
6243# returns random numbers when read) is accepted. The recommended source
6244# of entropy is ``/dev/urandom`` -- it is non-blocking, therefore
6245# relatively fast; and avoids the limitations of ``/dev/random``, which is
6246# a legacy interface. For more details (and comparision between different
6247# RNG sources), refer to the "Usage" section in the Linux kernel API
6248# documentation for ``[u]random``:
6249# http://man7.org/linux/man-pages/man4/urandom.4.html and
6250# http://man7.org/linux/man-pages/man7/random.7.html.
6251# (string value)
6252#rng_dev_path = /dev/urandom
Oleksandr Pidrepnyid9020082019-03-04 19:18:19 +02006253{%- if compute.get('libvirt', {}).rng_dev_path is defined %}
6254rng_dev_path={{ compute.libvirt.rng_dev_path }}
6255{%- endif %}
Michael Polenchukf37e5b62018-11-28 17:55:45 +04006256
6257# For qemu or KVM guests, set this option to specify a default machine type per
6258# host architecture. You can find a list of supported machine types in your
6259# environment by checking the output of the "virsh capabilities"command. The
6260# format of the value for this config option is host-arch=machine-type. For
6261# example: x86_64=machinetype1,armv7l=machinetype2 (list value)
6262#hw_machine_type = <None>
6263
6264# The data source used to the populate the host "serial" UUID exposed to guest
6265# in the virtual BIOS. (string value)
6266# Possible values:
6267# none - <No description provided>
6268# os - <No description provided>
6269# hardware - <No description provided>
6270# auto - <No description provided>
6271#sysinfo_serial = auto
6272
6273# A number of seconds to memory usage statistics period. Zero or negative value
6274# mean to disable memory usage statistics. (integer value)
6275#mem_stats_period_seconds = 10
6276
6277# List of uid targets and ranges.Syntax is guest-uid:host-uid:countMaximum of 5
6278# allowed. (list value)
6279#uid_maps =
6280
6281# List of guid targets and ranges.Syntax is guest-gid:host-gid:countMaximum of 5
6282# allowed. (list value)
6283#gid_maps =
6284
6285# In a realtime host context vCPUs for guest will run in that scheduling
6286# priority. Priority depends on the host kernel (usually 1-99) (integer value)
6287#realtime_scheduler_priority = 1
6288
6289#
6290# This will allow you to specify a list of events to monitor low-level
6291# performance of guests, and collect related statsitics via the libvirt
6292# driver, which in turn uses the Linux kernel's `perf` infrastructure.
6293# With this config attribute set, Nova will generate libvirt guest XML to
6294# monitor the specified events. For more information, refer to the
6295# "Performance monitoring events" section here:
6296# https://libvirt.org/formatdomain.html#elementsPerf. And here:
6297# https://libvirt.org/html/libvirt-libvirt-domain.html -- look for
6298# ``VIR_PERF_PARAM_*``
6299#
6300# For example, to monitor the count of CPU cycles (total/elapsed) and the
6301# count of cache misses, enable them as follows::
6302#
6303# [libvirt]
6304# enabled_perf_events = cpu_clock, cache_misses
6305#
6306# Possible values: A string list. The list of supported events can be
6307# found here: https://libvirt.org/formatdomain.html#elementsPerf.
6308#
6309# Note that support for Intel CMT events (`cmt`, `mbmbt`, `mbml`) is
6310# deprecated, and will be removed in the "Stein" release. That's because
6311# the upstream Linux kernel (from 4.14 onwards) has deleted support for
6312# Intel CMT, because it is broken by design.
6313# (list value)
6314#enabled_perf_events =
6315
6316#
6317# The number of PCIe ports an instance will get.
6318#
6319# Libvirt allows a custom number of PCIe ports (pcie-root-port controllers) a
6320# target instance will get. Some will be used by default, rest will be available
6321# for hotplug use.
6322#
6323# By default we have just 1-2 free ports which limits hotplug.
6324#
6325# More info: https://github.com/qemu/qemu/blob/master/docs/pcie.txt
6326#
6327# Due to QEMU limitations for aarch64/virt maximum value is set to '28'.
6328#
6329# Default value '0' moves calculating amount of ports to libvirt.
6330# (integer value)
6331# Minimum value: 0
6332# Maximum value: 28
6333#num_pcie_ports = 0
6334
6335#
6336# Available capacity in MiB for file-backed memory.
6337#
6338# Set to 0 to disable file-backed memory.
6339#
6340# When enabled, instances will create memory files in the directory specified
6341# in ``/etc/libvirt/qemu.conf``'s ``memory_backing_dir`` option. The default
6342# location is ``/var/lib/libvirt/qemu/ram``.
6343#
6344# When enabled, the value defined for this option is reported as the node memory
6345# capacity. Compute node system memory will be used as a cache for file-backed
6346# memory, via the kernel's pagecache mechanism.
6347#
6348# .. note::
6349# This feature is not compatible with hugepages.
6350#
6351# .. note::
6352# This feature is not compatible with memory overcommit.
6353#
6354# Related options:
6355#
6356# * ``virt_type`` must be set to ``kvm`` or ``qemu``.
6357# * ``ram_allocation_ratio`` must be set to 1.0.
6358# (integer value)
6359# Minimum value: 0
6360#file_backed_memory = 0
6361
6362#
6363# VM Images format.
6364#
6365# If default is specified, then use_cow_images flag is used instead of this
6366# one.
6367#
6368# Related options:
6369#
6370# * virt.use_cow_images
6371# * images_volume_group
6372# (string value)
6373# Possible values:
6374# raw - <No description provided>
6375# flat - <No description provided>
6376# qcow2 - <No description provided>
6377# lvm - <No description provided>
6378# rbd - <No description provided>
6379# ploop - <No description provided>
6380# default - <No description provided>
6381#images_type = default
6382
6383# DEPRECATED:
6384# Create sparse logical volumes (with virtualsize) if this flag is set to True.
6385# (boolean value)
6386# This option is deprecated for removal since 18.0.0.
6387# Its value may be silently ignored in the future.
6388# Reason:
6389# Sparse logical volumes is a feature that is not tested hence not supported.
6390# LVM logical volumes are preallocated by default. If you want thin
6391# provisioning,
6392# use Cinder thin-provisioned volumes.
6393#sparse_logical_volumes = false
6394
6395#
6396# Discard option for nova managed disks.
6397#
6398# Requires:
6399#
6400# * Libvirt >= 1.0.6
6401# * Qemu >= 1.5 (raw format)
6402# * Qemu >= 1.6 (qcow2 format)
6403# (string value)
6404# Possible values:
6405# ignore - <No description provided>
6406# unmap - <No description provided>
6407#hw_disk_discard = <None>
6408{%- if compute.libvirt.hw_disk_discard is defined %}
6409hw_disk_discard = {{ compute.libvirt.hw_disk_discard }}
6410{%- endif %}
6411
6412# DEPRECATED: Allows image information files to be stored in non-standard
6413# locations (string value)
6414# This option is deprecated for removal since 14.0.0.
6415# Its value may be silently ignored in the future.
6416# Reason: Image info files are no longer used by the image cache
6417#image_info_filename_pattern = $instances_path/$image_cache_subdirectory_name/%(image)s.info
6418
6419# Unused resized base images younger than this will not be removed (integer
6420# value)
6421#remove_unused_resized_minimum_age_seconds = 3600
6422
6423# DEPRECATED: Write a checksum for files in _base to disk (boolean value)
6424# This option is deprecated for removal since 14.0.0.
6425# Its value may be silently ignored in the future.
6426# Reason: The image cache no longer periodically calculates checksums of stored
6427# images. Data integrity can be checked at the block or filesystem level.
6428#checksum_base_images = false
6429
6430# DEPRECATED: How frequently to checksum base images (integer value)
6431# This option is deprecated for removal since 14.0.0.
6432# Its value may be silently ignored in the future.
6433# Reason: The image cache no longer periodically calculates checksums of stored
6434# images. Data integrity can be checked at the block or filesystem level.
6435#checksum_interval_seconds = 3600
6436
6437#
6438# Enable snapshot compression for ``qcow2`` images.
6439#
6440# Note: you can set ``snapshot_image_format`` to ``qcow2`` to force all
6441# snapshots to be in ``qcow2`` format, independently from their original image
6442# type.
6443#
6444# Related options:
6445#
6446# * snapshot_image_format
6447# (boolean value)
6448#snapshot_compression = false
6449
6450# Use virtio for bridge interfaces with KVM/QEMU (boolean value)
6451#use_virtio_for_bridges = true
6452
6453#
6454# Use multipath connection of the iSCSI or FC volume
6455#
6456# Volumes can be connected in the LibVirt as multipath devices. This will
6457# provide high availability and fault tolerance.
6458# (boolean value)
6459# Deprecated group/name - [libvirt]/iscsi_use_multipath
6460#volume_use_multipath = false
6461{%- if compute.libvirt.volume_use_multipath is defined %}
6462volume_use_multipath = {{ compute.libvirt.volume_use_multipath }}
6463{%- endif %}
6464
6465#
6466# Number of times to scan given storage protocol to find volume.
6467# (integer value)
6468# Deprecated group/name - [libvirt]/num_iscsi_scan_tries
6469#num_volume_scan_tries = 5
6470
6471#
6472# Number of times to rediscover AoE target to find volume.
6473#
6474# Nova provides support for block storage attaching to hosts via AOE (ATA over
6475# Ethernet). This option allows the user to specify the maximum number of retry
6476# attempts that can be made to discover the AoE device.
6477# (integer value)
6478#num_aoe_discover_tries = 3
6479
6480#
6481# The iSCSI transport iface to use to connect to target in case offload support
6482# is desired.
6483#
6484# Default format is of the form <transport_name>.<hwaddress> where
6485# <transport_name> is one of (be2iscsi, bnx2i, cxgb3i, cxgb4i, qla4xxx, ocs) and
6486# <hwaddress> is the MAC address of the interface and can be generated via the
6487# iscsiadm -m iface command. Do not confuse the iscsi_iface parameter to be
6488# provided here with the actual transport name.
6489# (string value)
6490# Deprecated group/name - [libvirt]/iscsi_transport
6491#iscsi_iface = <None>
6492
6493#
6494# Number of times to scan iSER target to find volume.
6495#
6496# iSER is a server network protocol that extends iSCSI protocol to use Remote
6497# Direct Memory Access (RDMA). This option allows the user to specify the
6498# maximum
6499# number of scan attempts that can be made to find iSER volume.
6500# (integer value)
6501#num_iser_scan_tries = 5
6502
6503#
6504# Use multipath connection of the iSER volume.
6505#
6506# iSER volumes can be connected as multipath devices. This will provide high
6507# availability and fault tolerance.
6508# (boolean value)
6509#iser_use_multipath = false
6510
6511#
6512# Directory where the NFS volume is mounted on the compute node.
6513# The default is 'mnt' directory of the location where nova's Python module
6514# is installed.
6515#
6516# NFS provides shared storage for the OpenStack Block Storage service.
6517#
6518# Possible values:
6519#
6520# * A string representing absolute path of mount point.
6521# (string value)
6522#nfs_mount_point_base = $state_path/mnt
6523
6524#
6525# Mount options passed to the NFS client. See section of the nfs man page
6526# for details.
6527#
6528# Mount options controls the way the filesystem is mounted and how the
6529# NFS client behaves when accessing files on this mount point.
6530#
6531# Possible values:
6532#
6533# * Any string representing mount options separated by commas.
6534# * Example string: vers=3,lookupcache=pos
6535# (string value)
6536#nfs_mount_options = <None>
6537{%- if compute.nfs_mount_options is defined %}
6538nfs_mount_options = "{{ compute.nfs_mount_options }}"
6539{%- endif %}
6540
6541#
6542# Directory where the Quobyte volume is mounted on the compute node.
6543#
6544# Nova supports Quobyte volume driver that enables storing Block Storage
6545# service volumes on a Quobyte storage back end. This Option specifies the
6546# path of the directory where Quobyte volume is mounted.
6547#
6548# Possible values:
6549#
6550# * A string representing absolute path of mount point.
6551# (string value)
6552#quobyte_mount_point_base = $state_path/mnt
6553
6554# Path to a Quobyte Client configuration file. (string value)
6555#quobyte_client_cfg = <None>
6556
6557#
6558# Directory where the SMBFS shares are mounted on the compute node.
6559# (string value)
6560#smbfs_mount_point_base = $state_path/mnt
6561
6562#
6563# Mount options passed to the SMBFS client.
6564#
6565# Provide SMBFS options as a single string containing all parameters.
6566# See mount.cifs man page for details. Note that the libvirt-qemu ``uid``
6567# and ``gid`` must be specified.
6568# (string value)
6569#smbfs_mount_options =
6570
6571#
6572# libvirt's transport method for remote file operations.
6573#
6574# Because libvirt cannot use RPC to copy files over network to/from other
6575# compute nodes, other method must be used for:
6576#
6577# * creating directory on remote host
6578# * creating file on remote host
6579# * removing file from remote host
6580# * copying file to remote host
6581# (string value)
6582# Possible values:
6583# ssh - <No description provided>
6584# rsync - <No description provided>
6585#remote_filesystem_transport = ssh
6586
6587#
6588# Directory where the Virtuozzo Storage clusters are mounted on the compute
6589# node.
6590#
6591# This option defines non-standard mountpoint for Vzstorage cluster.
6592#
6593# Related options:
6594#
6595# * vzstorage_mount_* group of parameters
6596# (string value)
6597#vzstorage_mount_point_base = $state_path/mnt
6598
6599#
6600# Mount owner user name.
6601#
6602# This option defines the owner user of Vzstorage cluster mountpoint.
6603#
6604# Related options:
6605#
6606# * vzstorage_mount_* group of parameters
6607# (string value)
6608#vzstorage_mount_user = stack
6609
6610#
6611# Mount owner group name.
6612#
6613# This option defines the owner group of Vzstorage cluster mountpoint.
6614#
6615# Related options:
6616#
6617# * vzstorage_mount_* group of parameters
6618# (string value)
6619#vzstorage_mount_group = qemu
6620
6621#
6622# Mount access mode.
6623#
6624# This option defines the access bits of Vzstorage cluster mountpoint,
6625# in the format similar to one of chmod(1) utility, like this: 0770.
6626# It consists of one to four digits ranging from 0 to 7, with missing
6627# lead digits assumed to be 0's.
6628#
6629# Related options:
6630#
6631# * vzstorage_mount_* group of parameters
6632# (string value)
6633#vzstorage_mount_perms = 0770
6634
6635#
6636# Path to vzstorage client log.
6637#
6638# This option defines the log of cluster operations,
6639# it should include "%(cluster_name)s" template to separate
6640# logs from multiple shares.
6641#
6642# Related options:
6643#
6644# * vzstorage_mount_opts may include more detailed logging options.
6645# (string value)
6646#vzstorage_log_path = /var/log/vstorage/%(cluster_name)s/nova.log.gz
6647
6648#
6649# Path to the SSD cache file.
6650#
6651# You can attach an SSD drive to a client and configure the drive to store
6652# a local cache of frequently accessed data. By having a local cache on a
6653# client's SSD drive, you can increase the overall cluster performance by
6654# up to 10 and more times.
6655# WARNING! There is a lot of SSD models which are not server grade and
6656# may loose arbitrary set of data changes on power loss.
6657# Such SSDs should not be used in Vstorage and are dangerous as may lead
6658# to data corruptions and inconsistencies. Please consult with the manual
6659# on which SSD models are known to be safe or verify it using
6660# vstorage-hwflush-check(1) utility.
6661#
6662# This option defines the path which should include "%(cluster_name)s"
6663# template to separate caches from multiple shares.
6664#
6665# Related options:
6666#
6667# * vzstorage_mount_opts may include more detailed cache options.
6668# (string value)
6669#vzstorage_cache_path = <None>
6670
6671#
6672# Extra mount options for pstorage-mount
6673#
6674# For full description of them, see
6675# https://static.openvz.org/vz-man/man1/pstorage-mount.1.gz.html
6676# Format is a python string representation of arguments list, like:
6677# "['-v', '-R', '500']"
6678# Shouldn't include -c, -l, -C, -u, -g and -m as those have
6679# explicit vzstorage_* options.
6680#
6681# Related options:
6682#
6683# * All other vzstorage_* options
6684# (list value)
6685#vzstorage_mount_opts =
6686
6687#
6688# Configure virtio rx queue size.
6689#
6690# This option is only usable for virtio-net device with vhost and
6691# vhost-user backend. Available only with QEMU/KVM. Requires libvirt
6692# v2.3 QEMU v2.7. (integer value)
6693# Possible values:
6694# 256 - <No description provided>
6695# 512 - <No description provided>
6696# 1024 - <No description provided>
6697#rx_queue_size = <None>
6698
6699#
6700# Configure virtio tx queue size.
6701#
6702# This option is only usable for virtio-net device with vhost-user
6703# backend. Available only with QEMU/KVM. Requires libvirt v3.7 QEMU
6704# v2.10. (integer value)
6705# Possible values:
6706# 256 - <No description provided>
6707# 512 - <No description provided>
6708# 1024 - <No description provided>
6709#tx_queue_size = <None>
6710
6711#
6712# Number of times to rediscover NVMe target to find volume
6713#
6714# Nova provides support for block storage attaching to hosts via NVMe
6715# (Non-Volatile Memory Express). This option allows the user to specify the
6716# maximum number of retry attempts that can be made to discover the NVMe device.
6717# (integer value)
6718#num_nvme_discover_tries = 5
6719
6720
6721[matchmaker_redis]
6722
6723#
6724# From oslo.messaging
6725#
6726
6727# DEPRECATED: Host to locate redis. (string value)
6728# This option is deprecated for removal.
6729# Its value may be silently ignored in the future.
6730# Reason: Replaced by [DEFAULT]/transport_url
6731#host = 127.0.0.1
6732
6733# DEPRECATED: Use this port to connect to redis host. (port value)
6734# Minimum value: 0
6735# Maximum value: 65535
6736# This option is deprecated for removal.
6737# Its value may be silently ignored in the future.
6738# Reason: Replaced by [DEFAULT]/transport_url
6739#port = 6379
6740
6741# DEPRECATED: Password for Redis server (optional). (string value)
6742# This option is deprecated for removal.
6743# Its value may be silently ignored in the future.
6744# Reason: Replaced by [DEFAULT]/transport_url
6745#password =
6746
6747# DEPRECATED: List of Redis Sentinel hosts (fault tolerance mode), e.g.,
6748# [host:port, host1:port ... ] (list value)
6749# This option is deprecated for removal.
6750# Its value may be silently ignored in the future.
6751# Reason: Replaced by [DEFAULT]/transport_url
6752#sentinel_hosts =
6753
6754# Redis replica set name. (string value)
6755#sentinel_group_name = oslo-messaging-zeromq
6756
6757# Time in ms to wait between connection attempts. (integer value)
6758#wait_timeout = 2000
6759
6760# Time in ms to wait before the transaction is killed. (integer value)
6761#check_timeout = 20000
6762
6763# Timeout in ms on blocking socket operations. (integer value)
6764#socket_timeout = 10000
6765
6766
6767[metrics]
6768#
6769# Configuration options for metrics
6770#
6771# Options under this group allow to adjust how values assigned to metrics are
6772# calculated.
6773
6774#
6775# From nova.conf
6776#
6777
6778#
6779# When using metrics to weight the suitability of a host, you can use this
6780# option
6781# to change how the calculated weight influences the weight assigned to a host
6782# as
6783# follows:
6784#
6785# * >1.0: increases the effect of the metric on overall weight
6786# * 1.0: no change to the calculated weight
6787# * >0.0,<1.0: reduces the effect of the metric on overall weight
6788# * 0.0: the metric value is ignored, and the value of the
6789# 'weight_of_unavailable' option is returned instead
6790# * >-1.0,<0.0: the effect is reduced and reversed
6791# * -1.0: the effect is reversed
6792# * <-1.0: the effect is increased proportionally and reversed
6793#
6794# This option is only used by the FilterScheduler and its subclasses; if you use
6795# a different scheduler, this option has no effect.
6796#
6797# Possible values:
6798#
6799# * An integer or float value, where the value corresponds to the multipler
6800# ratio for this weigher.
6801#
6802# Related options:
6803#
6804# * weight_of_unavailable
6805# (floating point value)
6806#weight_multiplier = 1.0
6807
6808#
6809# This setting specifies the metrics to be weighed and the relative ratios for
6810# each metric. This should be a single string value, consisting of a series of
6811# one or more 'name=ratio' pairs, separated by commas, where 'name' is the name
6812# of the metric to be weighed, and 'ratio' is the relative weight for that
6813# metric.
6814#
6815# Note that if the ratio is set to 0, the metric value is ignored, and instead
6816# the weight will be set to the value of the 'weight_of_unavailable' option.
6817#
6818# As an example, let's consider the case where this option is set to:
6819#
6820# ``name1=1.0, name2=-1.3``
6821#
6822# The final weight will be:
6823#
6824# ``(name1.value * 1.0) + (name2.value * -1.3)``
6825#
6826# This option is only used by the FilterScheduler and its subclasses; if you use
6827# a different scheduler, this option has no effect.
6828#
6829# Possible values:
6830#
6831# * A list of zero or more key/value pairs separated by commas, where the key is
6832# a string representing the name of a metric and the value is a numeric weight
6833# for that metric. If any value is set to 0, the value is ignored and the
6834# weight will be set to the value of the 'weight_of_unavailable' option.
6835#
6836# Related options:
6837#
6838# * weight_of_unavailable
6839# (list value)
6840#weight_setting =
6841
6842#
6843# This setting determines how any unavailable metrics are treated. If this
6844# option
6845# is set to True, any hosts for which a metric is unavailable will raise an
6846# exception, so it is recommended to also use the MetricFilter to filter out
6847# those hosts before weighing.
6848#
6849# This option is only used by the FilterScheduler and its subclasses; if you use
6850# a different scheduler, this option has no effect.
6851#
6852# Possible values:
6853#
6854# * True or False, where False ensures any metric being unavailable for a host
6855# will set the host weight to 'weight_of_unavailable'.
6856#
6857# Related options:
6858#
6859# * weight_of_unavailable
6860# (boolean value)
6861#required = true
6862
6863#
6864# When any of the following conditions are met, this value will be used in place
6865# of any actual metric value:
6866#
6867# * One of the metrics named in 'weight_setting' is not available for a host,
6868# and the value of 'required' is False
6869# * The ratio specified for a metric in 'weight_setting' is 0
6870# * The 'weight_multiplier' option is set to 0
6871#
6872# This option is only used by the FilterScheduler and its subclasses; if you use
6873# a different scheduler, this option has no effect.
6874#
6875# Possible values:
6876#
6877# * An integer or float value, where the value corresponds to the multipler
6878# ratio for this weigher.
6879#
6880# Related options:
6881#
6882# * weight_setting
6883# * required
6884# * weight_multiplier
6885# (floating point value)
6886#weight_of_unavailable = -10000.0
6887
6888
6889[mks]
6890#
6891# Nova compute node uses WebMKS, a desktop sharing protocol to provide
6892# instance console access to VM's created by VMware hypervisors.
6893#
6894# Related options:
6895# Following options must be set to provide console access.
6896# * mksproxy_base_url
6897# * enabled
6898
6899#
6900# From nova.conf
6901#
6902
6903#
6904# Location of MKS web console proxy
6905#
6906# The URL in the response points to a WebMKS proxy which
6907# starts proxying between client and corresponding vCenter
6908# server where instance runs. In order to use the web based
6909# console access, WebMKS proxy should be installed and configured
6910#
6911# Possible values:
6912#
6913# * Must be a valid URL of the form:``http://host:port/`` or
6914# ``https://host:port/``
6915# (uri value)
6916#mksproxy_base_url = http://127.0.0.1:6090/
6917
6918#
6919# Enables graphical console access for virtual machines.
6920# (boolean value)
6921#enabled = false
6922
6923
6924[neutron]
6925#
6926# Configuration options for neutron (network connectivity as a service).
6927
6928#
6929# From nova.conf
6930#
6931
6932# DEPRECATED:
6933# This option specifies the URL for connecting to Neutron.
6934#
6935# Possible values:
6936#
6937# * Any valid URL that points to the Neutron API service is appropriate here.
6938# This typically matches the URL returned for the 'network' service type
6939# from the Keystone service catalog.
6940# (uri value)
6941# This option is deprecated for removal since 17.0.0.
6942# Its value may be silently ignored in the future.
6943# Reason: Endpoint lookup uses the service catalog via common keystoneauth1
6944# Adapter configuration options. In the current release, "url" will override
6945# this behavior, but will be ignored and/or removed in a future release. To
6946# achieve the same result, use the endpoint_override option instead.
6947#url = http://127.0.0.1:9696
6948
6949#
6950# Default name for the Open vSwitch integration bridge.
6951#
6952# Specifies the name of an integration bridge interface used by OpenvSwitch.
6953# This option is only used if Neutron does not specify the OVS bridge name in
6954# port binding responses.
6955# (string value)
6956#ovs_bridge = br-int
6957
6958#
6959# Default name for the floating IP pool.
6960#
6961# Specifies the name of floating IP pool used for allocating floating IPs. This
6962# option is only used if Neutron does not specify the floating IP pool name in
6963# port binding reponses.
6964# (string value)
6965#default_floating_pool = nova
6966
6967#
6968# Integer value representing the number of seconds to wait before querying
6969# Neutron for extensions. After this number of seconds the next time Nova
6970# needs to create a resource in Neutron it will requery Neutron for the
6971# extensions that it has loaded. Setting value to 0 will refresh the
6972# extensions with no wait.
6973# (integer value)
6974# Minimum value: 0
6975extension_sync_interval = {{ compute.network.get('extension_sync_interval', '600') }}
6976
6977#
6978# List of physnets present on this host.
6979#
6980# For each *physnet* listed, an additional section,
6981# ``[neutron_physnet_$PHYSNET]``, will be added to the configuration file. Each
6982# section must be configured with a single configuration option, ``numa_nodes``,
6983# which should be a list of node IDs for all NUMA nodes this physnet is
6984# associated with. For example::
6985#
6986# [neutron]
6987# physnets = foo, bar
6988#
6989# [neutron_physnet_foo]
6990# numa_nodes = 0
6991#
6992# [neutron_physnet_bar]
6993# numa_nodes = 0,1
6994#
6995# Any *physnet* that is not listed using this option will be treated as having
6996# no
6997# particular NUMA node affinity.
6998#
6999# Tunnelled networks (VXLAN, GRE, ...) cannot be accounted for in this way and
7000# are instead configured using the ``[neutron_tunnel]`` group. For example::
7001#
7002# [neutron_tunnel]
7003# numa_nodes = 1
7004#
7005# Related options:
7006#
7007# * ``[neutron_tunnel] numa_nodes`` can be used to configure NUMA affinity for
7008# all tunneled networks
7009# * ``[neutron_physnet_$PHYSNET] numa_nodes`` must be configured for each value
7010# of ``$PHYSNET`` specified by this option
7011# (list value)
7012#physnets =
7013
7014#
7015# When set to True, this option indicates that Neutron will be used to proxy
7016# metadata requests and resolve instance ids. Otherwise, the instance ID must be
7017# passed to the metadata request in the 'X-Instance-ID' header.
7018#
7019# Related options:
7020#
7021# * metadata_proxy_shared_secret
7022# (boolean value)
7023#service_metadata_proxy = false
7024
7025#
7026# This option holds the shared secret string used to validate proxy requests to
7027# Neutron metadata requests. In order to be used, the
7028# 'X-Metadata-Provider-Signature' header must be supplied in the request.
7029#
7030# Related options:
7031#
7032# * service_metadata_proxy
7033# (string value)
7034#metadata_proxy_shared_secret =
7035
7036# PEM encoded Certificate Authority to use when verifying HTTPs connections.
7037# (string value)
7038#cafile = <None>
7039{%- if compute.network.get('protocol', 'http') == 'https' %}
7040cafile = {{ compute.network.get('cacert_file', compute.cacert_file) }}
7041{%- endif %}
7042
7043# PEM encoded client certificate cert file (string value)
7044#certfile = <None>
7045
7046# PEM encoded client certificate key file (string value)
7047#keyfile = <None>
7048
7049# Verify HTTPS connections. (boolean value)
7050#insecure = false
7051
7052# Timeout value for http requests (integer value)
7053timeout = 300
7054
7055# Collect per-API call timing information. (boolean value)
7056#collect_timing = false
7057
7058# Log requests to multiple loggers. (boolean value)
7059#split_loggers = false
7060
7061# Authentication type to load (string value)
7062# Deprecated group/name - [neutron]/auth_plugin
7063auth_type = v3password
7064
7065# Config Section from which to load plugin specific options (string value)
7066#auth_section = <None>
7067
7068# Authentication URL (string value)
7069auth_url = {{ compute.identity.get('protocol', 'http') }}://{{ compute.identity.host }}:{{ compute.identity.port }}/v3
7070
7071# Scope for system operations (string value)
7072#system_scope = <None>
7073
7074# Domain ID to scope to (string value)
7075#domain_id = <None>
7076
7077# Domain name to scope to (string value)
7078#domain_name = <None>
7079
7080# Project ID to scope to (string value)
7081#project_id = <None>
7082
7083# Project name to scope to (string value)
7084project_name = {{ compute.identity.tenant }}
7085
7086# Domain ID containing project (string value)
7087#project_domain_id = <None>
7088
7089# Domain name containing project (string value)
7090project_domain_name = {{ compute.get('project_domain_name', 'Default') }}
7091
7092# Trust ID (string value)
7093#trust_id = <None>
7094
7095# Optional domain ID to use with v3 and v2 parameters. It will be used for both
7096# the user and project domain in v3 and ignored in v2 authentication. (string
7097# value)
7098#default_domain_id = <None>
7099
7100# Optional domain name to use with v3 API and v2 parameters. It will be used for
7101# both the user and project domain in v3 and ignored in v2 authentication.
7102# (string value)
7103#default_domain_name = <None>
7104
7105# User ID (string value)
7106#user_id = <None>
7107
7108# Username (string value)
7109# Deprecated group/name - [neutron]/user_name
7110username = {{ compute.network.user }}
7111
7112# User's domain id (string value)
7113#user_domain_id = <None>
7114
7115# User's domain name (string value)
7116user_domain_name = {{ compute.get('user_domain_name', 'Default') }}
7117
7118# User's password (string value)
7119password = {{ compute.network.password }}
7120
7121# Tenant ID (string value)
7122#tenant_id = <None>
7123
7124# Tenant Name (string value)
7125#tenant_name = <None>
7126
7127# The default service_type for endpoint URL discovery. (string value)
7128#service_type = network
7129
7130# The default service_name for endpoint URL discovery. (string value)
7131#service_name = <None>
7132
7133# List of interfaces, in order of preference, for endpoint URL. (list value)
7134#valid_interfaces = internal,public
7135
7136# The default region_name for endpoint URL discovery. (string value)
7137region_name= {{ compute.network.region }}
7138
7139# Always use this endpoint URL for requests for this client. NOTE: The
7140# unversioned endpoint should be specified here; to request a particular API
7141# version, use the `version`, `min-version`, and/or `max-version` options.
7142# (string value)
7143#endpoint_override = <None>
7144
7145
7146[notifications]
7147#
7148# Most of the actions in Nova which manipulate the system state generate
7149# notifications which are posted to the messaging component (e.g. RabbitMQ) and
7150# can be consumed by any service outside the OpenStack. More technical details
7151# at https://docs.openstack.org/nova/latest/reference/notifications.html
7152
7153#
7154# From nova.conf
7155#
7156
7157#
7158# If set, send compute.instance.update notifications on
7159# instance state changes.
7160#
7161# Please refer to
7162# https://docs.openstack.org/nova/latest/reference/notifications.html for
7163# additional information on notifications.
7164#
7165# Possible values:
7166#
7167# * None - no notifications
7168# * "vm_state" - notifications are sent with VM state transition information in
7169# the ``old_state`` and ``state`` fields. The ``old_task_state`` and
7170# ``new_task_state`` fields will be set to the current task_state of the
7171# instance.
7172# * "vm_and_task_state" - notifications are sent with VM and task state
7173# transition information.
7174# (string value)
7175# Possible values:
7176# <None> - <No description provided>
7177# vm_state - <No description provided>
7178# vm_and_task_state - <No description provided>
7179#notify_on_state_change = <None>
7180{%- if compute.get('notification', {}).notify_on is defined %}
7181{%- for key, value in compute.notification.notify_on.iteritems() %}
7182notify_on_{{ key }} = {{ value }}
7183{%- endfor %}
7184{%- elif pillar.ceilometer is defined %}
7185notify_on_state_change = vm_and_task_state
7186{%- endif %}
7187
7188# Default notification level for outgoing notifications. (string value)
7189# Possible values:
7190# DEBUG - <No description provided>
7191# INFO - <No description provided>
7192# WARN - <No description provided>
7193# ERROR - <No description provided>
7194# CRITICAL - <No description provided>
7195# Deprecated group/name - [DEFAULT]/default_notification_level
7196#default_level = INFO
7197
7198#
7199# Specifies which notification format shall be used by nova.
7200#
7201# The default value is fine for most deployments and rarely needs to be changed.
7202# This value can be set to 'versioned' once the infrastructure moves closer to
7203# consuming the newer format of notifications. After this occurs, this option
7204# will be removed.
7205#
7206# Note that notifications can be completely disabled by setting ``driver=noop``
7207# in the ``[oslo_messaging_notifications]`` group.
7208#
7209# Possible values:
7210#
7211# * unversioned: Only the legacy unversioned notifications are emitted.
7212# * versioned: Only the new versioned notifications are emitted.
7213# * both: Both the legacy unversioned and the new versioned notifications are
7214# emitted. (Default)
7215#
7216# The list of versioned notifications is visible in
7217# https://docs.openstack.org/nova/latest/reference/notifications.html
7218# (string value)
7219# Possible values:
7220# unversioned - <No description provided>
7221# versioned - <No description provided>
7222# both - <No description provided>
7223#notification_format = both
7224
7225#
7226# Specifies the topics for the versioned notifications issued by nova.
7227#
7228# The default value is fine for most deployments and rarely needs to be changed.
7229# However, if you have a third-party service that consumes versioned
7230# notifications, it might be worth getting a topic for that service.
7231# Nova will send a message containing a versioned notification payload to each
7232# topic queue in this list.
7233#
7234# The list of versioned notifications is visible in
7235# https://docs.openstack.org/nova/latest/reference/notifications.html
7236# (list value)
7237#versioned_notifications_topics = versioned_notifications
7238
7239#
7240# If enabled, include block device information in the versioned notification
7241# payload. Sending block device information is disabled by default as providing
7242# that information can incur some overhead on the system since the information
7243# may need to be loaded from the database.
7244# (boolean value)
7245#bdms_in_notifications = false
7246
7247
7248[osapi_v21]
7249
7250#
7251# From nova.conf
7252#
7253
7254# DEPRECATED:
7255# This option is a string representing a regular expression (regex) that matches
7256# the project_id as contained in URLs. If not set, it will match normal UUIDs
7257# created by keystone.
7258#
7259# Possible values:
7260#
7261# * A string representing any legal regular expression
7262# (string value)
7263# This option is deprecated for removal since 13.0.0.
7264# Its value may be silently ignored in the future.
7265# Reason:
7266# Recent versions of nova constrain project IDs to hexadecimal characters and
7267# dashes. If your installation uses IDs outside of this range, you should use
7268# this option to provide your own regex and give you time to migrate offending
7269# projects to valid IDs before the next release.
7270#project_id_regex = <None>
7271
7272
7273[oslo_concurrency]
7274
7275#
7276# From oslo.concurrency
7277#
7278
7279# Enables or disables inter-process locks. (boolean value)
7280#disable_process_locking = false
7281
7282# Directory to use for lock files. For security, the specified directory should
7283# only be writable by the user running the processes that need locking. Defaults
7284# to environment variable OSLO_LOCK_PATH. If external locks are used, a lock
7285# path must be set. (string value)
7286#lock_path = <None>
7287
7288
7289[oslo_messaging_notifications]
7290{%- set _data = compute.notification %}
7291{%- include "oslo_templates/files/" ~ compute.version ~ "/oslo/messaging/_notifications.conf" %}
7292
7293{%- if compute.message_queue is defined %}
7294{%- set _data = compute.message_queue %}
7295{%- if _data.engine == 'rabbitmq' %}
7296 {%- set messaging_engine = 'rabbit' %}
7297{%- else %}
7298 {%- set messaging_engine = _data.engine %}
7299{%- endif %}
7300[oslo_messaging_{{ messaging_engine }}]
7301{%- if _data.ssl is defined and 'cacert_file' not in _data.get('ssl', {}).keys() %}{% do _data['ssl'].update({'cacert_file': compute.cacert_file}) %}{% endif %}
7302{%- include "oslo_templates/files/" ~ compute.version ~ "/oslo/messaging/_" ~ messaging_engine ~ ".conf" %}
7303{%- endif %}
7304
7305
7306[oslo_middleware]
7307{%- set _data = compute %}
7308{%- include "oslo_templates/files/" ~ compute.version ~ "/oslo/_middleware.conf" %}
7309
7310
7311[oslo_policy]
7312{%- if compute.policy is defined %}
7313{%- set _data = compute.policy %}
7314{%- include "oslo_templates/files/" ~ compute.version ~ "/oslo/_policy.conf" %}
7315{%- endif %}
7316
7317
7318[pci]
7319
7320#
7321# From nova.conf
7322#
7323
7324#
7325# An alias for a PCI passthrough device requirement.
7326#
7327# This allows users to specify the alias in the extra specs for a flavor,
7328# without
7329# needing to repeat all the PCI property requirements.
7330#
7331# Possible Values:
7332#
7333# * A list of JSON values which describe the aliases. For example::
7334#
7335# alias = {
7336# "name": "QuickAssist",
7337# "product_id": "0443",
7338# "vendor_id": "8086",
7339# "device_type": "type-PCI",
7340# "numa_policy": "required"
7341# }
7342#
7343# This defines an alias for the Intel QuickAssist card. (multi valued). Valid
7344# key values are :
7345#
7346# ``name``
7347# Name of the PCI alias.
7348#
7349# ``product_id``
7350# Product ID of the device in hexadecimal.
7351#
7352# ``vendor_id``
7353# Vendor ID of the device in hexadecimal.
7354#
7355# ``device_type``
7356# Type of PCI device. Valid values are: ``type-PCI``, ``type-PF`` and
7357# ``type-VF``.
7358#
7359# ``numa_policy``
7360# Required NUMA affinity of device. Valid values are: ``legacy``,
7361# ``preferred`` and ``required``.
7362# (multi valued)
7363# Deprecated group/name - [DEFAULT]/pci_alias
7364#alias =
7365{%- if compute.get('pci', {}).get('alias', false) %}
7366 {%- for alias_name, alias in compute.pci.alias.iteritems() %}
7367alias = {{ alias|json }}
7368 {%- endfor %}
7369{%- endif %}
7370
7371#
7372# White list of PCI devices available to VMs.
7373#
7374# Possible values:
7375#
7376# * A JSON dictionary which describe a whitelisted PCI device. It should take
7377# the following format::
7378#
7379# ["vendor_id": "<id>",] ["product_id": "<id>",]
7380# ["address": "[[[[<domain>]:]<bus>]:][<slot>][.[<function>]]" |
7381# "devname": "<name>",]
7382# {"<tag>": "<tag_value>",}
7383#
7384# Where ``[`` indicates zero or one occurrences, ``{`` indicates zero or
7385# multiple occurrences, and ``|`` mutually exclusive options. Note that any
7386# missing fields are automatically wildcarded.
7387#
7388# Valid key values are :
7389#
7390# ``vendor_id``
7391# Vendor ID of the device in hexadecimal.
7392#
7393# ``product_id``
7394# Product ID of the device in hexadecimal.
7395#
7396# ``address``
7397# PCI address of the device. Both traditional glob style and regular
7398# expression syntax is supported.
7399#
7400# ``devname``
7401# Device name of the device (for e.g. interface name). Not all PCI devices
7402# have a name.
7403#
7404# ``<tag>``
7405# Additional ``<tag>`` and ``<tag_value>`` used for matching PCI devices.
7406# Supported ``<tag>`` values are :
7407#
7408# - ``physical_network``
7409# - ``trusted``
7410#
7411# Valid examples are::
7412#
7413# passthrough_whitelist = {"devname":"eth0",
7414# "physical_network":"physnet"}
7415# passthrough_whitelist = {"address":"*:0a:00.*"}
7416# passthrough_whitelist = {"address":":0a:00.",
7417# "physical_network":"physnet1"}
7418# passthrough_whitelist = {"vendor_id":"1137",
7419# "product_id":"0071"}
7420# passthrough_whitelist = {"vendor_id":"1137",
7421# "product_id":"0071",
7422# "address": "0000:0a:00.1",
7423# "physical_network":"physnet1"}
7424# passthrough_whitelist = {"address":{"domain": ".*",
7425# "bus": "02", "slot": "01",
7426# "function": "[2-7]"},
7427# "physical_network":"physnet1"}
7428# passthrough_whitelist = {"address":{"domain": ".*",
7429# "bus": "02", "slot": "0[1-2]",
7430# "function": ".*"},
7431# "physical_network":"physnet1"}
7432# passthrough_whitelist = {"devname": "eth0", "physical_network":"physnet1",
7433# "trusted": "true"}
7434#
7435# The following are invalid, as they specify mutually exclusive options::
7436#
7437# passthrough_whitelist = {"devname":"eth0",
7438# "physical_network":"physnet",
7439# "address":"*:0a:00.*"}
7440#
7441# * A JSON list of JSON dictionaries corresponding to the above format. For
7442# example::
7443#
7444# passthrough_whitelist = [{"product_id":"0001", "vendor_id":"8086"},
7445# {"product_id":"0002", "vendor_id":"8086"}]
7446# (multi valued)
7447# Deprecated group/name - [DEFAULT]/pci_passthrough_whitelist
7448#passthrough_whitelist =
7449{%- if compute.get('sriov', false) %}
7450 {%- for nic_name, sriov in compute.sriov.iteritems() %}
7451passthrough_whitelist = {{ sriov|json }}
7452 {%- endfor %}
7453{%- endif %}
7454
Oleksandr Pidrepnyi14f08272019-02-20 12:48:17 +02007455{%- if compute.get('pci', {}).get('passthrough_whitelist', false) %}
7456 {%- for item in compute.pci.passthrough_whitelist %}
7457passthrough_whitelist = {{ item | json }}
7458 {%- endfor %}
7459{%- endif %}
7460
Michael Polenchukf37e5b62018-11-28 17:55:45 +04007461
7462[placement]
7463
7464#
7465# From nova.conf
7466#
7467
7468#
7469# If True, when limiting allocation candidate results, the results will be
7470# a random sampling of the full result set. If False, allocation candidates
7471# are returned in a deterministic but undefined order. That is, all things
7472# being equal, two requests for allocation candidates will return the same
7473# results in the same order; but no guarantees are made as to how that order
7474# is determined.
7475# (boolean value)
7476#randomize_allocation_candidates = false
7477
7478# The file that defines placement policies. This can be an absolute path or
7479# relative to the configuration file. (string value)
7480#policy_file = placement-policy.yaml
7481
7482#
7483# Early API microversions (<1.8) allowed creating allocations and not specifying
7484# a project or user identifier for the consumer. In cleaning up the data
7485# modeling, we no longer allow missing project and user information. If an older
7486# client makes an allocation, we'll use this in place of the information it
7487# doesn't provide.
7488# (string value)
7489#incomplete_consumer_project_id = 00000000-0000-0000-0000-000000000000
7490
7491#
7492# Early API microversions (<1.8) allowed creating allocations and not specifying
7493# a project or user identifier for the consumer. In cleaning up the data
7494# modeling, we no longer allow missing project and user information. If an older
7495# client makes an allocation, we'll use this in place of the information it
7496# doesn't provide.
7497# (string value)
7498#incomplete_consumer_user_id = 00000000-0000-0000-0000-000000000000
7499
7500# PEM encoded Certificate Authority to use when verifying HTTPs connections.
7501# (string value)
7502#cafile = <None>
7503{%- if compute.identity.get('protocol', 'http') == 'https' %}
7504cafile = {{ compute.identity.get('cacert_file', compute.cacert_file) }}
7505{%- endif %}
7506
7507# PEM encoded client certificate cert file (string value)
7508#certfile = <None>
7509
7510# PEM encoded client certificate key file (string value)
7511#keyfile = <None>
7512
7513# Verify HTTPS connections. (boolean value)
7514#insecure = false
7515
7516# Timeout value for http requests (integer value)
7517#timeout = <None>
7518
7519# Collect per-API call timing information. (boolean value)
7520#collect_timing = false
7521
7522# Log requests to multiple loggers. (boolean value)
7523#split_loggers = false
7524
7525# Authentication type to load (string value)
7526# Deprecated group/name - [placement]/auth_plugin
7527auth_type = password
7528
7529# Config Section from which to load plugin specific options (string value)
7530#auth_section = <None>
7531
7532# Authentication URL (string value)
7533auth_url = {{ compute.identity.get('protocol', 'http') }}://{{ compute.identity.host }}:35357/v3
7534
7535# Scope for system operations (string value)
7536#system_scope = <None>
7537
7538# Domain ID to scope to (string value)
7539#domain_id = <None>
7540
7541# Domain name to scope to (string value)
7542#domain_name = <None>
7543
7544# Project ID to scope to (string value)
7545#project_id = <None>
7546
7547# Project name to scope to (string value)
7548project_name = {{ compute.identity.tenant }}
7549
7550# Domain ID containing project (string value)
7551project_domain_id = {{ compute.identity.get('domain', 'default') }}
7552
7553# Domain name containing project (string value)
7554#project_domain_name = <None>
7555
7556# Trust ID (string value)
7557#trust_id = <None>
7558
7559# Optional domain ID to use with v3 and v2 parameters. It will be used for both
7560# the user and project domain in v3 and ignored in v2 authentication. (string
7561# value)
7562#default_domain_id = <None>
7563
7564# Optional domain name to use with v3 API and v2 parameters. It will be used for
7565# both the user and project domain in v3 and ignored in v2 authentication.
7566# (string value)
7567#default_domain_name = <None>
7568
7569# User ID (string value)
7570#user_id = <None>
7571
7572# Username (string value)
7573# Deprecated group/name - [placement]/user_name
7574username = {{ compute.identity.user }}
7575
7576# User's domain id (string value)
7577user_domain_id = {{ compute.identity.get('domain', 'default') }}
7578
7579# User's domain name (string value)
7580#user_domain_name = <None>
7581
7582# User's password (string value)
7583password = {{ compute.identity.password }}
7584
7585# Tenant ID (string value)
7586#tenant_id = <None>
7587
7588# Tenant Name (string value)
7589#tenant_name = <None>
7590
7591# The default service_type for endpoint URL discovery. (string value)
7592#service_type = placement
7593
7594# The default service_name for endpoint URL discovery. (string value)
7595#service_name = <None>
7596
7597# List of interfaces, in order of preference, for endpoint URL. (list value)
7598valid_interfaces = internal
7599
7600# The default region_name for endpoint URL discovery. (string value)
7601region_name = {{ compute.identity.region }}
7602
7603# Always use this endpoint URL for requests for this client. NOTE: The
7604# unversioned endpoint should be specified here; to request a particular API
7605# version, use the `version`, `min-version`, and/or `max-version` options.
7606# (string value)
7607#endpoint_override = <None>
7608
7609
7610[placement_database]
7611#
7612# The *Placement API Database* is a separate database which can be used with the
7613# placement service. This database is optional: if the connection option is not
7614# set, the nova api database will be used instead.
7615
7616#
7617# From nova.conf
7618#
7619
7620# The SQLAlchemy connection string to use to connect to the database. (string
7621# value)
7622#connection = <None>
7623
7624# Optional URL parameters to append onto the connection URL at connect time;
7625# specify as param1=value1&param2=value2&... (string value)
7626#connection_parameters =
7627
7628# If True, SQLite uses synchronous mode. (boolean value)
7629#sqlite_synchronous = true
7630
7631# The SQLAlchemy connection string to use to connect to the slave database.
7632# (string value)
7633#slave_connection = <None>
7634
7635# The SQL mode to be used for MySQL sessions. This option, including the
7636# default, overrides any server-set SQL mode. To use whatever SQL mode is set by
7637# the server configuration, set this to no value. Example: mysql_sql_mode=
7638# (string value)
7639#mysql_sql_mode = TRADITIONAL
7640
7641# Connections which have been present in the connection pool longer than this
7642# number of seconds will be replaced with a new one the next time they are
7643# checked out from the pool. (integer value)
7644#connection_recycle_time = 3600
7645
7646# Maximum number of SQL connections to keep open in a pool. Setting a value of 0
7647# indicates no limit. (integer value)
7648#max_pool_size = <None>
7649
7650# Maximum number of database connection retries during startup. Set to -1 to
7651# specify an infinite retry count. (integer value)
7652#max_retries = 10
7653
7654# Interval between retries of opening a SQL connection. (integer value)
7655#retry_interval = 10
7656
7657# If set, use this value for max_overflow with SQLAlchemy. (integer value)
7658#max_overflow = <None>
7659
7660# Verbosity of SQL debugging information: 0=None, 100=Everything. (integer
7661# value)
7662#connection_debug = 0
7663
7664# Add Python stack traces to SQL as comment strings. (boolean value)
7665#connection_trace = false
7666
7667# If set, use this value for pool_timeout with SQLAlchemy. (integer value)
7668#pool_timeout = <None>
7669
7670
7671[powervm]
7672#
7673# PowerVM options allow cloud administrators to configure how OpenStack will
7674# work
7675# with the PowerVM hypervisor.
7676
7677#
7678# From nova.conf
7679#
7680
7681#
7682# Factor used to calculate the amount of physical processor compute power given
7683# to each vCPU. E.g. A value of 1.0 means a whole physical processor, whereas
7684# 0.05 means 1/20th of a physical processor.
7685# (floating point value)
7686# Minimum value: 0
7687# Maximum value: 1
7688#proc_units_factor = 0.1
7689
7690#
7691# The disk driver to use for PowerVM disks. PowerVM provides support for
7692# localdisk and PowerVM Shared Storage Pool disk drivers.
7693#
7694# Related options:
7695#
7696# * volume_group_name - required when using localdisk
7697#
7698# (string value)
7699# Possible values:
7700# localdisk - <No description provided>
7701# ssp - <No description provided>
7702#disk_driver = localdisk
7703
7704#
7705# Volume Group to use for block device operations. If disk_driver is localdisk,
7706# then this attribute must be specified. It is strongly recommended NOT to use
7707# rootvg since that is used by the management partition and filling it will
7708# cause
7709# failures.
7710# (string value)
7711#volume_group_name =
7712
7713
7714[profiler]
7715
7716#
7717# From osprofiler
7718#
7719
7720#
7721# Enable the profiling for all services on this node.
7722#
7723# Default value is False (fully disable the profiling feature).
7724#
7725# Possible values:
7726#
7727# * True: Enables the feature
7728# * False: Disables the feature. The profiling cannot be started via this
7729# project
7730# operations. If the profiling is triggered by another project, this project
7731# part will be empty.
7732# (boolean value)
7733# Deprecated group/name - [profiler]/profiler_enabled
7734#enabled = false
7735
7736#
7737# Enable SQL requests profiling in services.
7738#
7739# Default value is False (SQL requests won't be traced).
7740#
7741# Possible values:
7742#
7743# * True: Enables SQL requests profiling. Each SQL query will be part of the
7744# trace and can the be analyzed by how much time was spent for that.
7745# * False: Disables SQL requests profiling. The spent time is only shown on a
7746# higher level of operations. Single SQL queries cannot be analyzed this way.
7747# (boolean value)
7748#trace_sqlalchemy = false
7749
7750#
7751# Secret key(s) to use for encrypting context data for performance profiling.
7752#
7753# This string value should have the following format: <key1>[,<key2>,...<keyn>],
7754# where each key is some random string. A user who triggers the profiling via
7755# the REST API has to set one of these keys in the headers of the REST API call
7756# to include profiling results of this node for this particular project.
7757#
7758# Both "enabled" flag and "hmac_keys" config options should be set to enable
7759# profiling. Also, to generate correct profiling information across all services
7760# at least one key needs to be consistent between OpenStack projects. This
7761# ensures it can be used from client side to generate the trace, containing
7762# information from all possible resources.
7763# (string value)
7764#hmac_keys = SECRET_KEY
7765
7766#
7767# Connection string for a notifier backend.
7768#
7769# Default value is ``messaging://`` which sets the notifier to oslo_messaging.
7770#
7771# Examples of possible values:
7772#
7773# * ``messaging://`` - use oslo_messaging driver for sending spans.
7774# * ``redis://127.0.0.1:6379`` - use redis driver for sending spans.
7775# * ``mongodb://127.0.0.1:27017`` - use mongodb driver for sending spans.
7776# * ``elasticsearch://127.0.0.1:9200`` - use elasticsearch driver for sending
7777# spans.
7778# * ``jaeger://127.0.0.1:6831`` - use jaeger tracing as driver for sending
7779# spans.
7780# (string value)
7781#connection_string = messaging://
7782
7783#
7784# Document type for notification indexing in elasticsearch.
7785# (string value)
7786#es_doc_type = notification
7787
7788#
7789# This parameter is a time value parameter (for example: es_scroll_time=2m),
7790# indicating for how long the nodes that participate in the search will maintain
7791# relevant resources in order to continue and support it.
7792# (string value)
7793#es_scroll_time = 2m
7794
7795#
7796# Elasticsearch splits large requests in batches. This parameter defines
7797# maximum size of each batch (for example: es_scroll_size=10000).
7798# (integer value)
7799#es_scroll_size = 10000
7800
7801#
7802# Redissentinel provides a timeout option on the connections.
7803# This parameter defines that timeout (for example: socket_timeout=0.1).
7804# (floating point value)
7805#socket_timeout = 0.1
7806
7807#
7808# Redissentinel uses a service name to identify a master redis service.
7809# This parameter defines the name (for example:
7810# ``sentinal_service_name=mymaster``).
7811# (string value)
7812#sentinel_service_name = mymaster
7813
7814#
7815# Enable filter traces that contain error/exception to a separated place.
7816#
7817# Default value is set to False.
7818#
7819# Possible values:
7820#
7821# * True: Enable filter traces that contain error/exception.
7822# * False: Disable the filter.
7823# (boolean value)
7824#filter_error_trace = false
7825
7826
7827[quota]
7828#
7829# Quota options allow to manage quotas in openstack deployment.
7830
7831#
7832# From nova.conf
7833#
7834
7835#
7836# The number of instances allowed per project.
7837#
7838# Possible Values
7839#
7840# * A positive integer or 0.
7841# * -1 to disable the quota.
7842# (integer value)
7843# Minimum value: -1
7844# Deprecated group/name - [DEFAULT]/quota_instances
7845#instances = 10
7846
7847#
7848# The number of instance cores or vCPUs allowed per project.
7849#
7850# Possible values:
7851#
7852# * A positive integer or 0.
7853# * -1 to disable the quota.
7854# (integer value)
7855# Minimum value: -1
7856# Deprecated group/name - [DEFAULT]/quota_cores
7857#cores = 20
7858
7859#
7860# The number of megabytes of instance RAM allowed per project.
7861#
7862# Possible values:
7863#
7864# * A positive integer or 0.
7865# * -1 to disable the quota.
7866# (integer value)
7867# Minimum value: -1
7868# Deprecated group/name - [DEFAULT]/quota_ram
7869#ram = 51200
7870
7871# DEPRECATED:
7872# The number of floating IPs allowed per project.
7873#
7874# Floating IPs are not allocated to instances by default. Users need to select
7875# them from the pool configured by the OpenStack administrator to attach to
7876# their
7877# instances.
7878#
7879# Possible values:
7880#
7881# * A positive integer or 0.
7882# * -1 to disable the quota.
7883# (integer value)
7884# Minimum value: -1
7885# Deprecated group/name - [DEFAULT]/quota_floating_ips
7886# This option is deprecated for removal since 15.0.0.
7887# Its value may be silently ignored in the future.
7888# Reason:
7889# nova-network is deprecated, as are any related configuration options.
7890#floating_ips = 10
7891
7892# DEPRECATED:
7893# The number of fixed IPs allowed per project.
7894#
7895# Unlike floating IPs, fixed IPs are allocated dynamically by the network
7896# component when instances boot up. This quota value should be at least the
7897# number of instances allowed
7898#
7899# Possible values:
7900#
7901# * A positive integer or 0.
7902# * -1 to disable the quota.
7903# (integer value)
7904# Minimum value: -1
7905# Deprecated group/name - [DEFAULT]/quota_fixed_ips
7906# This option is deprecated for removal since 15.0.0.
7907# Its value may be silently ignored in the future.
7908# Reason:
7909# nova-network is deprecated, as are any related configuration options.
7910#fixed_ips = -1
7911
7912#
7913# The number of metadata items allowed per instance.
7914#
7915# Users can associate metadata with an instance during instance creation. This
7916# metadata takes the form of key-value pairs.
7917#
7918# Possible values:
7919#
7920# * A positive integer or 0.
7921# * -1 to disable the quota.
7922# (integer value)
7923# Minimum value: -1
7924# Deprecated group/name - [DEFAULT]/quota_metadata_items
7925#metadata_items = 128
7926
7927#
7928# The number of injected files allowed.
7929#
7930# File injection allows users to customize the personality of an instance by
7931# injecting data into it upon boot. Only text file injection is permitted:
7932# binary
7933# or ZIP files are not accepted. During file injection, any existing files that
7934# match specified files are renamed to include ``.bak`` extension appended with
7935# a
7936# timestamp.
7937#
7938# Possible values:
7939#
7940# * A positive integer or 0.
7941# * -1 to disable the quota.
7942# (integer value)
7943# Minimum value: -1
7944# Deprecated group/name - [DEFAULT]/quota_injected_files
7945#injected_files = 5
7946
7947#
7948# The number of bytes allowed per injected file.
7949#
7950# Possible values:
7951#
7952# * A positive integer or 0.
7953# * -1 to disable the quota.
7954# (integer value)
7955# Minimum value: -1
7956# Deprecated group/name - [DEFAULT]/quota_injected_file_content_bytes
7957#injected_file_content_bytes = 10240
7958
7959#
7960# The maximum allowed injected file path length.
7961#
7962# Possible values:
7963#
7964# * A positive integer or 0.
7965# * -1 to disable the quota.
7966# (integer value)
7967# Minimum value: -1
7968# Deprecated group/name - [DEFAULT]/quota_injected_file_path_length
7969#injected_file_path_length = 255
7970
7971# DEPRECATED:
7972# The number of security groups per project.
7973#
7974# Possible values:
7975#
7976# * A positive integer or 0.
7977# * -1 to disable the quota.
7978# (integer value)
7979# Minimum value: -1
7980# Deprecated group/name - [DEFAULT]/quota_security_groups
7981# This option is deprecated for removal since 15.0.0.
7982# Its value may be silently ignored in the future.
7983# Reason:
7984# nova-network is deprecated, as are any related configuration options.
7985#security_groups = 10
7986
7987# DEPRECATED:
7988# The number of security rules per security group.
7989#
7990# The associated rules in each security group control the traffic to instances
7991# in
7992# the group.
7993#
7994# Possible values:
7995#
7996# * A positive integer or 0.
7997# * -1 to disable the quota.
7998# (integer value)
7999# Minimum value: -1
8000# Deprecated group/name - [DEFAULT]/quota_security_group_rules
8001# This option is deprecated for removal since 15.0.0.
8002# Its value may be silently ignored in the future.
8003# Reason:
8004# nova-network is deprecated, as are any related configuration options.
8005#security_group_rules = 20
8006
8007#
8008# The maximum number of key pairs allowed per user.
8009#
8010# Users can create at least one key pair for each project and use the key pair
8011# for multiple instances that belong to that project.
8012#
8013# Possible values:
8014#
8015# * A positive integer or 0.
8016# * -1 to disable the quota.
8017# (integer value)
8018# Minimum value: -1
8019# Deprecated group/name - [DEFAULT]/quota_key_pairs
8020#key_pairs = 100
8021
8022#
8023# The maxiumum number of server groups per project.
8024#
8025# Server groups are used to control the affinity and anti-affinity scheduling
8026# policy for a group of servers or instances. Reducing the quota will not affect
8027# any existing group, but new servers will not be allowed into groups that have
8028# become over quota.
8029#
8030# Possible values:
8031#
8032# * A positive integer or 0.
8033# * -1 to disable the quota.
8034# (integer value)
8035# Minimum value: -1
8036# Deprecated group/name - [DEFAULT]/quota_server_groups
8037#server_groups = 10
8038
8039#
8040# The maximum number of servers per server group.
8041#
8042# Possible values:
8043#
8044# * A positive integer or 0.
8045# * -1 to disable the quota.
8046# (integer value)
8047# Minimum value: -1
8048# Deprecated group/name - [DEFAULT]/quota_server_group_members
8049#server_group_members = 10
8050
8051#
8052# The number of seconds until a reservation expires.
8053#
8054# This quota represents the time period for invalidating quota reservations.
8055# (integer value)
8056#reservation_expire = 86400
8057
8058#
8059# The count of reservations until usage is refreshed.
8060#
8061# This defaults to 0 (off) to avoid additional load but it is useful to turn on
8062# to help keep quota usage up-to-date and reduce the impact of out of sync usage
8063# issues.
8064# (integer value)
8065# Minimum value: 0
8066#until_refresh = 0
8067
8068#
8069# The number of seconds between subsequent usage refreshes.
8070#
8071# This defaults to 0 (off) to avoid additional load but it is useful to turn on
8072# to help keep quota usage up-to-date and reduce the impact of out of sync usage
8073# issues. Note that quotas are not updated on a periodic task, they will update
8074# on a new reservation if max_age has passed since the last reservation.
8075# (integer value)
8076# Minimum value: 0
8077#max_age = 0
8078
8079#
8080# Provides abstraction for quota checks. Users can configure a specific
8081# driver to use for quota checks.
8082#
8083# Possible values:
8084#
8085# * nova.quota.DbQuotaDriver: Stores quota limit information
8086# in the database and relies on the quota_* configuration options for default
8087# quota limit values. Counts quota usage on-demand.
8088# * nova.quota.NoopQuotaDriver: Ignores quota and treats all resources as
8089# unlimited.
8090# (string value)
8091# Possible values:
8092# nova.quota.DbQuotaDriver - <No description provided>
8093# nova.quota.NoopQuotaDriver - <No description provided>
8094#driver = nova.quota.DbQuotaDriver
8095
8096#
8097# Recheck quota after resource creation to prevent allowing quota to be
8098# exceeded.
8099#
8100# This defaults to True (recheck quota after resource creation) but can be set
8101# to
8102# False to avoid additional load if allowing quota to be exceeded because of
8103# racing requests is considered acceptable. For example, when set to False, if a
8104# user makes highly parallel REST API requests to create servers, it will be
8105# possible for them to create more servers than their allowed quota during the
8106# race. If their quota is 10 servers, they might be able to create 50 during the
8107# burst. After the burst, they will not be able to create any more servers but
8108# they will be able to keep their 50 servers until they delete them.
8109#
8110# The initial quota check is done before resources are created, so if multiple
8111# parallel requests arrive at the same time, all could pass the quota check and
8112# create resources, potentially exceeding quota. When recheck_quota is True,
8113# quota will be checked a second time after resources have been created and if
8114# the resource is over quota, it will be deleted and OverQuota will be raised,
8115# usually resulting in a 403 response to the REST API user. This makes it
8116# impossible for a user to exceed their quota with the caveat that it will,
8117# however, be possible for a REST API user to be rejected with a 403 response in
8118# the event of a collision close to reaching their quota limit, even if the user
8119# has enough quota available when they made the request.
8120# (boolean value)
8121#recheck_quota = true
8122
8123
8124[rdp]
8125#
8126# Options under this group enable and configure Remote Desktop Protocol (
8127# RDP) related features.
8128#
8129# This group is only relevant to Hyper-V users.
8130
8131#
8132# From nova.conf
8133#
8134
8135#
8136# Enable Remote Desktop Protocol (RDP) related features.
8137#
8138# Hyper-V, unlike the majority of the hypervisors employed on Nova compute
8139# nodes, uses RDP instead of VNC and SPICE as a desktop sharing protocol to
8140# provide instance console access. This option enables RDP for graphical
8141# console access for virtual machines created by Hyper-V.
8142#
8143# **Note:** RDP should only be enabled on compute nodes that support the Hyper-V
8144# virtualization platform.
8145#
8146# Related options:
8147#
8148# * ``compute_driver``: Must be hyperv.
8149#
8150# (boolean value)
8151#enabled = false
8152
8153#
8154# The URL an end user would use to connect to the RDP HTML5 console proxy.
8155# The console proxy service is called with this token-embedded URL and
8156# establishes the connection to the proper instance.
8157#
8158# An RDP HTML5 console proxy service will need to be configured to listen on the
8159# address configured here. Typically the console proxy service would be run on a
8160# controller node. The localhost address used as default would only work in a
8161# single node environment i.e. devstack.
8162#
8163# An RDP HTML5 proxy allows a user to access via the web the text or graphical
8164# console of any Windows server or workstation using RDP. RDP HTML5 console
8165# proxy services include FreeRDP, wsgate.
8166# See https://github.com/FreeRDP/FreeRDP-WebConnect
8167#
8168# Possible values:
8169#
8170# * <scheme>://<ip-address>:<port-number>/
8171#
8172# The scheme must be identical to the scheme configured for the RDP HTML5
8173# console proxy service. It is ``http`` or ``https``.
8174#
8175# The IP address must be identical to the address on which the RDP HTML5
8176# console proxy service is listening.
8177#
8178# The port must be identical to the port on which the RDP HTML5 console proxy
8179# service is listening.
8180#
8181# Related options:
8182#
8183# * ``rdp.enabled``: Must be set to ``True`` for ``html5_proxy_base_url`` to be
8184# effective.
8185# (uri value)
8186#html5_proxy_base_url = http://127.0.0.1:6083/
8187
8188
8189[remote_debug]
8190
8191#
8192# From nova.conf
8193#
8194
8195#
8196# Debug host (IP or name) to connect to. This command line parameter is used
8197# when
8198# you want to connect to a nova service via a debugger running on a different
8199# host.
8200#
8201# Note that using the remote debug option changes how Nova uses the eventlet
8202# library to support async IO. This could result in failures that do not occur
8203# under normal operation. Use at your own risk.
8204#
8205# Possible Values:
8206#
8207# * IP address of a remote host as a command line parameter
8208# to a nova service. For Example:
8209#
8210# /usr/local/bin/nova-compute --config-file /etc/nova/nova.conf
8211# --remote_debug-host <IP address where the debugger is running>
8212# (host address value)
8213#host = <None>
8214
8215#
8216# Debug port to connect to. This command line parameter allows you to specify
8217# the port you want to use to connect to a nova service via a debugger running
8218# on different host.
8219#
8220# Note that using the remote debug option changes how Nova uses the eventlet
8221# library to support async IO. This could result in failures that do not occur
8222# under normal operation. Use at your own risk.
8223#
8224# Possible Values:
8225#
8226# * Port number you want to use as a command line parameter
8227# to a nova service. For Example:
8228#
8229# /usr/local/bin/nova-compute --config-file /etc/nova/nova.conf
8230# --remote_debug-host <IP address where the debugger is running>
8231# --remote_debug-port <port> it's listening on>.
8232# (port value)
8233# Minimum value: 0
8234# Maximum value: 65535
8235#port = <None>
8236
8237
8238[scheduler]
8239
8240#
8241# From nova.conf
8242#
8243
8244#
8245# The class of the driver used by the scheduler. This should be chosen from one
8246# of the entrypoints under the namespace 'nova.scheduler.driver' of file
8247# 'setup.cfg'. If nothing is specified in this option, the 'filter_scheduler' is
8248# used.
8249#
8250# Other options are:
8251#
8252# * 'caching_scheduler' which aggressively caches the system state for better
8253# individual scheduler performance at the risk of more retries when running
8254# multiple schedulers. [DEPRECATED]
8255# * 'chance_scheduler' which simply picks a host at random. [DEPRECATED]
8256# * 'fake_scheduler' which is used for testing.
8257#
8258# Possible values:
8259#
8260# * Any of the drivers included in Nova:
8261#
8262# * filter_scheduler
8263# * caching_scheduler
8264# * chance_scheduler
8265# * fake_scheduler
8266#
8267# * You may also set this to the entry point name of a custom scheduler driver,
8268# but you will be responsible for creating and maintaining it in your
8269# setup.cfg
8270# file.
8271#
8272# Related options:
8273#
8274# * workers
8275# (string value)
8276# Deprecated group/name - [DEFAULT]/scheduler_driver
8277#driver = filter_scheduler
8278
8279#
8280# Periodic task interval.
8281#
8282# This value controls how often (in seconds) to run periodic tasks in the
8283# scheduler. The specific tasks that are run for each period are determined by
8284# the particular scheduler being used. Currently the only in-tree scheduler
8285# driver that uses this option is the ``caching_scheduler``.
8286#
8287# If this is larger than the nova-service 'service_down_time' setting, the
8288# ComputeFilter (if enabled) may think the compute service is down. As each
8289# scheduler can work a little differently than the others, be sure to test this
8290# with your selected scheduler.
8291#
8292# Possible values:
8293#
8294# * An integer, where the integer corresponds to periodic task interval in
8295# seconds. 0 uses the default interval (60 seconds). A negative value disables
8296# periodic tasks.
8297#
8298# Related options:
8299#
8300# * ``nova-service service_down_time``
8301# (integer value)
8302#periodic_task_interval = 60
8303
8304#
8305# This is the maximum number of attempts that will be made for a given instance
8306# build/move operation. It limits the number of alternate hosts returned by the
8307# scheduler. When that list of hosts is exhausted, a MaxRetriesExceeded
8308# exception is raised and the instance is set to an error state.
8309#
8310# Possible values:
8311#
8312# * A positive integer, where the integer corresponds to the max number of
8313# attempts that can be made when building or moving an instance.
8314# (integer value)
8315# Minimum value: 1
8316# Deprecated group/name - [DEFAULT]/scheduler_max_attempts
8317#max_attempts = 3
8318
8319#
8320# Periodic task interval.
8321#
8322# This value controls how often (in seconds) the scheduler should attempt
8323# to discover new hosts that have been added to cells. If negative (the
8324# default), no automatic discovery will occur.
8325#
8326# Deployments where compute nodes come and go frequently may want this
8327# enabled, where others may prefer to manually discover hosts when one
8328# is added to avoid any overhead from constantly checking. If enabled,
8329# every time this runs, we will select any unmapped hosts out of each
8330# cell database on every run.
8331# (integer value)
8332# Minimum value: -1
8333#discover_hosts_in_cells_interval = -1
8334
8335#
8336# This setting determines the maximum limit on results received from the
8337# placement service during a scheduling operation. It effectively limits
8338# the number of hosts that may be considered for scheduling requests that
8339# match a large number of candidates.
8340#
8341# A value of 1 (the minimum) will effectively defer scheduling to the placement
8342# service strictly on "will it fit" grounds. A higher value will put an upper
8343# cap on the number of results the scheduler will consider during the filtering
8344# and weighing process. Large deployments may need to set this lower than the
8345# total number of hosts available to limit memory consumption, network traffic,
8346# etc. of the scheduler.
8347#
8348# This option is only used by the FilterScheduler; if you use a different
8349# scheduler, this option has no effect.
8350# (integer value)
8351# Minimum value: 1
8352#max_placement_results = 1000
8353
8354#
8355# Number of workers for the nova-scheduler service. The default will be the
8356# number of CPUs available if using the "filter_scheduler" scheduler driver,
8357# otherwise the default will be 1.
8358# (integer value)
8359# Minimum value: 0
8360#workers = <None>
8361
8362#
8363# This setting causes the scheduler to look up a host aggregate with the
8364# metadata key of `filter_tenant_id` set to the project of an incoming
8365# request, and request results from placement be limited to that aggregate.
8366# Multiple tenants may be added to a single aggregate by appending a serial
8367# number to the key, such as `filter_tenant_id:123`.
8368#
8369# The matching aggregate UUID must be mirrored in placement for proper
8370# operation. If no host aggregate with the tenant id is found, or that
8371# aggregate does not match one in placement, the result will be the same
8372# as not finding any suitable hosts for the request.
8373#
8374# See also the placement_aggregate_required_for_tenants option.
8375# (boolean value)
8376#limit_tenants_to_placement_aggregate = false
8377
8378#
8379# This setting, when limit_tenants_to_placement_aggregate=True, will control
8380# whether or not a tenant with no aggregate affinity will be allowed to schedule
8381# to any available node. If aggregates are used to limit some tenants but
8382# not all, then this should be False. If all tenants should be confined via
8383# aggregate, then this should be True to prevent them from receiving
8384# unrestricted
8385# scheduling to any available node.
8386#
8387# See also the limit_tenants_to_placement_aggregate option.
8388# (boolean value)
8389#placement_aggregate_required_for_tenants = false
8390
8391#
8392# This setting causes the scheduler to look up a host aggregate with the
8393# metadata key of `availability_zone` set to the value provided by an
8394# incoming request, and request results from placement be limited to that
8395# aggregate.
8396#
8397# The matching aggregate UUID must be mirrored in placement for proper
8398# operation. If no host aggregate with the `availability_zone` key is
8399# found, or that aggregate does not match one in placement, the result will
8400# be the same as not finding any suitable hosts.
8401#
8402# Note that if you enable this flag, you can disable the (less efficient)
8403# AvailabilityZoneFilter in the scheduler.
8404# (boolean value)
8405#query_placement_for_availability_zone = false
8406
8407
8408[serial_console]
8409#
8410# The serial console feature allows you to connect to a guest in case a
8411# graphical console like VNC, RDP or SPICE is not available. This is only
8412# currently supported for the libvirt, Ironic and hyper-v drivers.
8413
8414#
8415# From nova.conf
8416#
8417
8418#
8419# Enable the serial console feature.
8420#
8421# In order to use this feature, the service ``nova-serialproxy`` needs to run.
8422# This service is typically executed on the controller node.
8423# (boolean value)
8424#enabled = false
8425
8426#
8427# A range of TCP ports a guest can use for its backend.
8428#
8429# Each instance which gets created will use one port out of this range. If the
8430# range is not big enough to provide another port for an new instance, this
8431# instance won't get launched.
8432#
8433# Possible values:
8434#
8435# * Each string which passes the regex ``\d+:\d+`` For example ``10000:20000``.
8436# Be sure that the first port number is lower than the second port number
8437# and that both are in range from 0 to 65535.
8438# (string value)
8439#port_range = 10000:20000
8440
8441#
8442# The URL an end user would use to connect to the ``nova-serialproxy`` service.
8443#
8444# The ``nova-serialproxy`` service is called with this token enriched URL
8445# and establishes the connection to the proper instance.
8446#
8447# Related options:
8448#
8449# * The IP address must be identical to the address to which the
8450# ``nova-serialproxy`` service is listening (see option ``serialproxy_host``
8451# in this section).
8452# * The port must be the same as in the option ``serialproxy_port`` of this
8453# section.
8454# * If you choose to use a secured websocket connection, then start this option
8455# with ``wss://`` instead of the unsecured ``ws://``. The options ``cert``
8456# and ``key`` in the ``[DEFAULT]`` section have to be set for that.
8457# (uri value)
8458#base_url = ws://127.0.0.1:6083/
8459
8460#
8461# The IP address to which proxy clients (like ``nova-serialproxy``) should
8462# connect to get the serial console of an instance.
8463#
8464# This is typically the IP address of the host of a ``nova-compute`` service.
8465# (string value)
8466#proxyclient_address = 127.0.0.1
8467
8468#
8469# The IP address which is used by the ``nova-serialproxy`` service to listen
8470# for incoming requests.
8471#
8472# The ``nova-serialproxy`` service listens on this IP address for incoming
8473# connection requests to instances which expose serial console.
8474#
8475# Related options:
8476#
8477# * Ensure that this is the same IP address which is defined in the option
8478# ``base_url`` of this section or use ``0.0.0.0`` to listen on all addresses.
8479# (string value)
8480#serialproxy_host = 0.0.0.0
8481
8482#
8483# The port number which is used by the ``nova-serialproxy`` service to listen
8484# for incoming requests.
8485#
8486# The ``nova-serialproxy`` service listens on this port number for incoming
8487# connection requests to instances which expose serial console.
8488#
8489# Related options:
8490#
8491# * Ensure that this is the same port number which is defined in the option
8492# ``base_url`` of this section.
8493# (port value)
8494# Minimum value: 0
8495# Maximum value: 65535
8496#serialproxy_port = 6083
8497
8498
8499[service_user]
8500#
8501# Configuration options for service to service authentication using a service
8502# token. These options allow sending a service token along with the user's token
8503# when contacting external REST APIs.
8504
8505#
8506# From nova.conf
8507#
8508
8509#
8510# When True, if sending a user token to a REST API, also send a service token.
8511#
8512# Nova often reuses the user token provided to the nova-api to talk to other
8513# REST
8514# APIs, such as Cinder, Glance and Neutron. It is possible that while the user
8515# token was valid when the request was made to Nova, the token may expire before
8516# it reaches the other service. To avoid any failures, and to make it clear it
8517# is
8518# Nova calling the service on the user's behalf, we include a service token
8519# along
8520# with the user token. Should the user's token have expired, a valid service
8521# token ensures the REST API request will still be accepted by the keystone
8522# middleware.
8523# (boolean value)
8524#send_service_user_token = false
Oleksandr Bryndzii6d821f52019-02-20 15:51:15 +02008525{%- if compute.get('service_user', {}).get('enabled', True) %}
8526send_service_user_token = True
Oleksandr Bryndziif4f91a12019-03-04 15:14:48 +02008527{%- set _data = {} %}
8528{%- do _data.update(compute.get('identity', {})) %}
8529{%- do _data.update(compute.get('service_user', {})) %}
Oleksandr Bryndzii6d821f52019-02-20 15:51:15 +02008530{%- if not _data.port == '5000' %}{% do _data.update({'port': '5000'}) %}{% endif %}
Oleksandr Bryndziif4f91a12019-03-04 15:14:48 +02008531{%- if 'cacert_file' not in _data.keys() %}{% do _data.update({'cacert_file': compute.cacert_file}) %}{% endif %}
8532{%- include "oslo_templates/files/" ~ compute.version ~ "/keystoneauth/_type_"+ _data.get('auth_type','password') +".conf" %}
Oleksandr Bryndzii6d821f52019-02-20 15:51:15 +02008533{%- else %}
Michael Polenchukf37e5b62018-11-28 17:55:45 +04008534
8535# PEM encoded Certificate Authority to use when verifying HTTPs connections.
8536# (string value)
8537#cafile = <None>
8538
8539# PEM encoded client certificate cert file (string value)
8540#certfile = <None>
8541
8542# PEM encoded client certificate key file (string value)
8543#keyfile = <None>
8544
8545# Verify HTTPS connections. (boolean value)
8546#insecure = false
8547
8548# Timeout value for http requests (integer value)
8549#timeout = <None>
8550
8551# Collect per-API call timing information. (boolean value)
8552#collect_timing = false
8553
8554# Log requests to multiple loggers. (boolean value)
8555#split_loggers = false
8556
8557# Authentication type to load (string value)
8558# Deprecated group/name - [service_user]/auth_plugin
8559#auth_type = <None>
8560
8561# Config Section from which to load plugin specific options (string value)
8562#auth_section = <None>
8563
8564# Authentication URL (string value)
8565#auth_url = <None>
8566
8567# Scope for system operations (string value)
8568#system_scope = <None>
8569
8570# Domain ID to scope to (string value)
8571#domain_id = <None>
8572
8573# Domain name to scope to (string value)
8574#domain_name = <None>
8575
8576# Project ID to scope to (string value)
8577#project_id = <None>
8578
8579# Project name to scope to (string value)
8580#project_name = <None>
8581
8582# Domain ID containing project (string value)
8583#project_domain_id = <None>
8584
8585# Domain name containing project (string value)
8586#project_domain_name = <None>
8587
8588# Trust ID (string value)
8589#trust_id = <None>
8590
8591# Optional domain ID to use with v3 and v2 parameters. It will be used for both
8592# the user and project domain in v3 and ignored in v2 authentication. (string
8593# value)
8594#default_domain_id = <None>
8595
8596# Optional domain name to use with v3 API and v2 parameters. It will be used for
8597# both the user and project domain in v3 and ignored in v2 authentication.
8598# (string value)
8599#default_domain_name = <None>
8600
8601# User ID (string value)
8602#user_id = <None>
8603
8604# Username (string value)
8605# Deprecated group/name - [service_user]/user_name
8606#username = <None>
8607
8608# User's domain id (string value)
8609#user_domain_id = <None>
8610
8611# User's domain name (string value)
8612#user_domain_name = <None>
8613
8614# User's password (string value)
8615#password = <None>
8616
8617# Tenant ID (string value)
8618#tenant_id = <None>
8619
8620# Tenant Name (string value)
8621#tenant_name = <None>
Oleksandr Bryndzii6d821f52019-02-20 15:51:15 +02008622{%- endif %}
Michael Polenchukf37e5b62018-11-28 17:55:45 +04008623
8624
8625[spice]
8626#
8627# SPICE console feature allows you to connect to a guest virtual machine.
8628# SPICE is a replacement for fairly limited VNC protocol.
8629#
8630# Following requirements must be met in order to use SPICE:
8631#
8632# * Virtualization driver must be libvirt
8633# * spice.enabled set to True
8634# * vnc.enabled set to False
8635# * update html5proxy_base_url
8636# * update server_proxyclient_address
8637
8638#
8639# From nova.conf
8640#
8641
8642#
8643# Enable SPICE related features.
8644#
8645# Related options:
8646#
8647# * VNC must be explicitly disabled to get access to the SPICE console. Set the
8648# enabled option to False in the [vnc] section to disable the VNC console.
8649# (boolean value)
8650#enabled = false
8651
8652#
8653# Enable the SPICE guest agent support on the instances.
8654#
8655# The Spice agent works with the Spice protocol to offer a better guest console
8656# experience. However, the Spice console can still be used without the Spice
8657# Agent. With the Spice agent installed the following features are enabled:
8658#
8659# * Copy & Paste of text and images between the guest and client machine
8660# * Automatic adjustment of resolution when the client screen changes - e.g.
8661# if you make the Spice console full screen the guest resolution will adjust
8662# to
8663# match it rather than letterboxing.
8664# * Better mouse integration - The mouse can be captured and released without
8665# needing to click inside the console or press keys to release it. The
8666# performance of mouse movement is also improved.
8667# (boolean value)
8668#agent_enabled = true
8669
8670#
8671# Location of the SPICE HTML5 console proxy.
8672#
8673# End user would use this URL to connect to the `nova-spicehtml5proxy``
8674# service. This service will forward request to the console of an instance.
8675#
8676# In order to use SPICE console, the service ``nova-spicehtml5proxy`` should be
8677# running. This service is typically launched on the controller node.
8678#
8679# Possible values:
8680#
8681# * Must be a valid URL of the form: ``http://host:port/spice_auto.html``
8682# where host is the node running ``nova-spicehtml5proxy`` and the port is
8683# typically 6082. Consider not using default value as it is not well defined
8684# for any real deployment.
8685#
8686# Related options:
8687#
8688# * This option depends on ``html5proxy_host`` and ``html5proxy_port`` options.
8689# The access URL returned by the compute node must have the host
8690# and port where the ``nova-spicehtml5proxy`` service is listening.
8691# (uri value)
8692#html5proxy_base_url = http://127.0.0.1:6082/spice_auto.html
8693{%- if compute.vncproxy_url is defined %}
8694html5proxy_base_url = {{ compute.vncproxy_url }}/spice_auto.html
8695{%- endif %}
8696
8697#
8698# The address where the SPICE server running on the instances should listen.
8699#
8700# Typically, the ``nova-spicehtml5proxy`` proxy client runs on the controller
8701# node and connects over the private network to this address on the compute
8702# node(s).
8703#
8704# Possible values:
8705#
8706# * IP address to listen on.
8707# (string value)
8708#server_listen = 127.0.0.1
8709
8710#
8711# The address used by ``nova-spicehtml5proxy`` client to connect to instance
8712# console.
8713#
8714# Typically, the ``nova-spicehtml5proxy`` proxy client runs on the
8715# controller node and connects over the private network to this address on the
8716# compute node(s).
8717#
8718# Possible values:
8719#
8720# * Any valid IP address on the compute node.
8721#
8722# Related options:
8723#
8724# * This option depends on the ``server_listen`` option.
8725# The proxy client must be able to access the address specified in
8726# ``server_listen`` using the value of this option.
8727# (string value)
8728#server_proxyclient_address = 127.0.0.1
8729
8730# DEPRECATED:
8731# A keyboard layout which is supported by the underlying hypervisor on this
8732# node.
8733#
8734# Possible values:
8735#
8736# * This is usually an 'IETF language tag' (default is 'en-us'). If you
8737# use QEMU as hypervisor, you should find the list of supported keyboard
8738# layouts at /usr/share/qemu/keymaps.
8739# (string value)
8740# This option is deprecated for removal since 18.0.0.
8741# Its value may be silently ignored in the future.
8742# Reason:
8743# Configuring this option forces QEMU to do keymap conversions. These
8744# conversions
8745# are lossy and can result in significant issues for users of non en-US
8746# keyboards. Refer to bug #1682020 for more information.
8747#keymap = <None>
8748
8749#
8750# IP address or a hostname on which the ``nova-spicehtml5proxy`` service
8751# listens for incoming requests.
8752#
8753# Related options:
8754#
8755# * This option depends on the ``html5proxy_base_url`` option.
8756# The ``nova-spicehtml5proxy`` service must be listening on a host that is
8757# accessible from the HTML5 client.
8758# (host address value)
8759#html5proxy_host = 0.0.0.0
8760
8761#
8762# Port on which the ``nova-spicehtml5proxy`` service listens for incoming
8763# requests.
8764#
8765# Related options:
8766#
8767# * This option depends on the ``html5proxy_base_url`` option.
8768# The ``nova-spicehtml5proxy`` service must be listening on a port that is
8769# accessible from the HTML5 client.
8770# (port value)
8771# Minimum value: 0
8772# Maximum value: 65535
8773#html5proxy_port = 6082
8774
8775
8776[upgrade_levels]
8777#
8778# upgrade_levels options are used to set version cap for RPC
8779# messages sent between different nova services.
8780#
8781# By default all services send messages using the latest version
8782# they know about.
8783#
8784# The compute upgrade level is an important part of rolling upgrades
8785# where old and new nova-compute services run side by side.
8786#
8787# The other options can largely be ignored, and are only kept to
8788# help with a possible future backport issue.
8789
8790#
8791# From nova.conf
8792#
8793
8794#
8795# Compute RPC API version cap.
8796#
8797# By default, we always send messages using the most recent version
8798# the client knows about.
8799#
8800# Where you have old and new compute services running, you should set
8801# this to the lowest deployed version. This is to guarantee that all
8802# services never send messages that one of the compute nodes can't
8803# understand. Note that we only support upgrading from release N to
8804# release N+1.
8805#
8806# Set this option to "auto" if you want to let the compute RPC module
8807# automatically determine what version to use based on the service
8808# versions in the deployment.
8809#
8810# Possible values:
8811#
8812# * By default send the latest version the client knows about
8813# * 'auto': Automatically determines what version to use based on
8814# the service versions in the deployment.
8815# * A string representing a version number in the format 'N.N';
8816# for example, possible values might be '1.12' or '2.0'.
8817# * An OpenStack release name, in lower case, such as 'mitaka' or
8818# 'liberty'.
8819# (string value)
8820#compute = <None>
8821
8822#
8823# Cells RPC API version cap.
8824#
8825# Possible values:
8826#
8827# * By default send the latest version the client knows about
8828# * A string representing a version number in the format 'N.N';
8829# for example, possible values might be '1.12' or '2.0'.
8830# * An OpenStack release name, in lower case, such as 'mitaka' or
8831# 'liberty'.
8832# (string value)
8833#cells = <None>
8834
8835#
8836# Intercell RPC API version cap.
8837#
8838# Possible values:
8839#
8840# * By default send the latest version the client knows about
8841# * A string representing a version number in the format 'N.N';
8842# for example, possible values might be '1.12' or '2.0'.
8843# * An OpenStack release name, in lower case, such as 'mitaka' or
8844# 'liberty'.
8845# (string value)
8846#intercell = <None>
8847
8848# DEPRECATED:
8849# Cert RPC API version cap.
8850#
8851# Possible values:
8852#
8853# * By default send the latest version the client knows about
8854# * A string representing a version number in the format 'N.N';
8855# for example, possible values might be '1.12' or '2.0'.
8856# * An OpenStack release name, in lower case, such as 'mitaka' or
8857# 'liberty'.
8858# (string value)
8859# This option is deprecated for removal since 18.0.0.
8860# Its value may be silently ignored in the future.
8861# Reason:
8862# The nova-cert service was removed in 16.0.0 (Pike) so this option
8863# is no longer used.
8864#cert = <None>
8865
8866#
8867# Scheduler RPC API version cap.
8868#
8869# Possible values:
8870#
8871# * By default send the latest version the client knows about
8872# * A string representing a version number in the format 'N.N';
8873# for example, possible values might be '1.12' or '2.0'.
8874# * An OpenStack release name, in lower case, such as 'mitaka' or
8875# 'liberty'.
8876# (string value)
8877#scheduler = <None>
8878
8879#
8880# Conductor RPC API version cap.
8881#
8882# Possible values:
8883#
8884# * By default send the latest version the client knows about
8885# * A string representing a version number in the format 'N.N';
8886# for example, possible values might be '1.12' or '2.0'.
8887# * An OpenStack release name, in lower case, such as 'mitaka' or
8888# 'liberty'.
8889# (string value)
8890#conductor = <None>
8891
8892#
8893# Console RPC API version cap.
8894#
8895# Possible values:
8896#
8897# * By default send the latest version the client knows about
8898# * A string representing a version number in the format 'N.N';
8899# for example, possible values might be '1.12' or '2.0'.
8900# * An OpenStack release name, in lower case, such as 'mitaka' or
8901# 'liberty'.
8902# (string value)
8903#console = <None>
8904
8905# DEPRECATED:
8906# Consoleauth RPC API version cap.
8907#
8908# Possible values:
8909#
8910# * By default send the latest version the client knows about
8911# * A string representing a version number in the format 'N.N';
8912# for example, possible values might be '1.12' or '2.0'.
8913# * An OpenStack release name, in lower case, such as 'mitaka' or
8914# 'liberty'.
8915# (string value)
8916# This option is deprecated for removal since 18.0.0.
8917# Its value may be silently ignored in the future.
8918# Reason:
8919# The nova-consoleauth service was deprecated in 18.0.0 (Rocky) and will be
8920# removed in an upcoming release.
8921#consoleauth = <None>
8922
8923# DEPRECATED:
8924# Network RPC API version cap.
8925#
8926# Possible values:
8927#
8928# * By default send the latest version the client knows about
8929# * A string representing a version number in the format 'N.N';
8930# for example, possible values might be '1.12' or '2.0'.
8931# * An OpenStack release name, in lower case, such as 'mitaka' or
8932# 'liberty'.
8933# (string value)
8934# This option is deprecated for removal since 18.0.0.
8935# Its value may be silently ignored in the future.
8936# Reason:
8937# The nova-network service was deprecated in 14.0.0 (Newton) and will be
8938# removed in an upcoming release.
8939#network = <None>
8940
8941#
8942# Base API RPC API version cap.
8943#
8944# Possible values:
8945#
8946# * By default send the latest version the client knows about
8947# * A string representing a version number in the format 'N.N';
8948# for example, possible values might be '1.12' or '2.0'.
8949# * An OpenStack release name, in lower case, such as 'mitaka' or
8950# 'liberty'.
8951# (string value)
8952#baseapi = <None>
8953{%- if compute.upgrade_levels is defined %}
8954{%- for key, value in compute.upgrade_levels.iteritems() %}
8955{{ key }} = {{ value }}
8956{%- endfor %}
8957{%- endif %}
8958
8959
8960[vault]
8961
8962#
8963# From nova.conf
8964#
8965
8966# root token for vault (string value)
8967#root_token_id = <None>
8968
8969# Use this endpoint to connect to Vault, for example: "http://127.0.0.1:8200"
8970# (string value)
8971#vault_url = http://127.0.0.1:8200
8972
8973# Absolute path to ca cert file (string value)
8974#ssl_ca_crt_file = <None>
8975
8976# SSL Enabled/Disabled (boolean value)
8977#use_ssl = false
8978
8979
8980[vendordata_dynamic_auth]
8981#
8982# Options within this group control the authentication of the vendordata
8983# subsystem of the metadata API server (and config drive) with external systems.
8984
8985#
8986# From nova.conf
8987#
8988
8989# PEM encoded Certificate Authority to use when verifying HTTPs connections.
8990# (string value)
8991#cafile = <None>
8992
8993# PEM encoded client certificate cert file (string value)
8994#certfile = <None>
8995
8996# PEM encoded client certificate key file (string value)
8997#keyfile = <None>
8998
8999# Verify HTTPS connections. (boolean value)
9000#insecure = false
9001
9002# Timeout value for http requests (integer value)
9003#timeout = <None>
9004
9005# Collect per-API call timing information. (boolean value)
9006#collect_timing = false
9007
9008# Log requests to multiple loggers. (boolean value)
9009#split_loggers = false
9010
9011# Authentication type to load (string value)
9012# Deprecated group/name - [vendordata_dynamic_auth]/auth_plugin
9013#auth_type = <None>
9014
9015# Config Section from which to load plugin specific options (string value)
9016#auth_section = <None>
9017
9018# Authentication URL (string value)
9019#auth_url = <None>
9020
9021# Scope for system operations (string value)
9022#system_scope = <None>
9023
9024# Domain ID to scope to (string value)
9025#domain_id = <None>
9026
9027# Domain name to scope to (string value)
9028#domain_name = <None>
9029
9030# Project ID to scope to (string value)
9031#project_id = <None>
9032
9033# Project name to scope to (string value)
9034#project_name = <None>
9035
9036# Domain ID containing project (string value)
9037#project_domain_id = <None>
9038
9039# Domain name containing project (string value)
9040#project_domain_name = <None>
9041
9042# Trust ID (string value)
9043#trust_id = <None>
9044
9045# Optional domain ID to use with v3 and v2 parameters. It will be used for both
9046# the user and project domain in v3 and ignored in v2 authentication. (string
9047# value)
9048#default_domain_id = <None>
9049
9050# Optional domain name to use with v3 API and v2 parameters. It will be used for
9051# both the user and project domain in v3 and ignored in v2 authentication.
9052# (string value)
9053#default_domain_name = <None>
9054
9055# User ID (string value)
9056#user_id = <None>
9057
9058# Username (string value)
9059# Deprecated group/name - [vendordata_dynamic_auth]/user_name
9060#username = <None>
9061
9062# User's domain id (string value)
9063#user_domain_id = <None>
9064
9065# User's domain name (string value)
9066#user_domain_name = <None>
9067
9068# User's password (string value)
9069#password = <None>
9070
9071# Tenant ID (string value)
9072#tenant_id = <None>
9073
9074# Tenant Name (string value)
9075#tenant_name = <None>
9076
9077{%- set compute_driver = compute.get('compute_driver', 'libvirt.LibvirtDriver') %}
9078{%- if compute_driver in compute_driver_mapping.keys() %}
9079{%- set _data = compute.get(compute_driver_mapping[compute_driver]) %}
9080{%- include "nova/files/" ~ compute.version ~ "/compute/_" ~ compute_driver_mapping[compute_driver] ~ ".conf" %}
9081{%- endif %}
9082
9083
9084[vnc]
9085#
9086# Virtual Network Computer (VNC) can be used to provide remote desktop
9087# console access to instances for tenants and/or administrators.
9088
9089#
9090# From nova.conf
9091#
9092
9093#
9094# Enable VNC related features.
9095#
9096# Guests will get created with graphical devices to support this. Clients
9097# (for example Horizon) can then establish a VNC connection to the guest.
9098# (boolean value)
9099# Deprecated group/name - [DEFAULT]/vnc_enabled
9100#enabled = true
9101
9102# DEPRECATED:
9103# Keymap for VNC.
9104#
9105# The keyboard mapping (keymap) determines which keyboard layout a VNC
9106# session should use by default.
9107#
9108# Possible values:
9109#
9110# * A keyboard layout which is supported by the underlying hypervisor on
9111# this node. This is usually an 'IETF language tag' (for example
9112# 'en-us'). If you use QEMU as hypervisor, you should find the list
9113# of supported keyboard layouts at ``/usr/share/qemu/keymaps``.
9114# (string value)
9115# Deprecated group/name - [DEFAULT]/vnc_keymap
9116# This option is deprecated for removal since 18.0.0.
9117# Its value may be silently ignored in the future.
9118# Reason:
9119# Configuring this option forces QEMU to do keymap conversions. These
9120# conversions
9121# are lossy and can result in significant issues for users of non en-US
9122# keyboards. You should instead use a VNC client that supports Extended Key
9123# Event
9124# messages, such as noVNC 1.0.0. Refer to bug #1682020 for more information.
9125keymap = {{ compute.get('vnc_keymap', 'en-us') }}
9126
9127#
9128# The IP address or hostname on which an instance should listen to for
9129# incoming VNC connection requests on this node.
9130# (host address value)
9131# Deprecated group/name - [DEFAULT]/vncserver_listen
9132# Deprecated group/name - [vnc]/vncserver_listen
9133#server_listen = 127.0.0.1
9134{%- if compute.bind is defined %}
9135{%- if compute.bind.vnc_address is defined %}
9136server_listen = {{ compute.bind.vnc_address }}
9137
9138#
9139# Private, internal IP address or hostname of VNC console proxy.
9140#
9141# The VNC proxy is an OpenStack component that enables compute service
9142# users to access their instances through VNC clients.
9143#
9144# This option sets the private address to which proxy clients, such as
9145# ``nova-xvpvncproxy``, should connect to.
9146# (host address value)
9147# Deprecated group/name - [DEFAULT]/vncserver_proxyclient_address
9148# Deprecated group/name - [vnc]/vncserver_proxyclient_address
9149server_proxyclient_address = {{ compute.bind.vnc_address }}
9150{%- else %}
9151server_listen = 0.0.0.0
9152{%- endif %}
9153{%- endif %}
9154
9155#
9156# Public address of noVNC VNC console proxy.
9157#
9158# The VNC proxy is an OpenStack component that enables compute service
9159# users to access their instances through VNC clients. noVNC provides
9160# VNC support through a websocket-based client.
9161#
9162# This option sets the public base URL to which client systems will
9163# connect. noVNC clients can use this address to connect to the noVNC
9164# instance and, by extension, the VNC sessions.
9165#
9166# If using noVNC >= 1.0.0, you should use ``vnc_lite.html`` instead of
9167# ``vnc_auto.html``.
9168#
9169# Related options:
9170#
9171# * novncproxy_host
9172# * novncproxy_port
9173# (uri value)
9174#novncproxy_base_url = http://127.0.0.1:6080/vnc_auto.html
9175{%- if compute.vncproxy_url is defined %}
9176novncproxy_base_url = {{ compute.vncproxy_url }}/vnc_auto.html
9177{%- endif %}
9178
9179#
9180# IP address or hostname that the XVP VNC console proxy should bind to.
9181#
9182# The VNC proxy is an OpenStack component that enables compute service
9183# users to access their instances through VNC clients. Xen provides
9184# the Xenserver VNC Proxy, or XVP, as an alternative to the
9185# websocket-based noVNC proxy used by Libvirt. In contrast to noVNC,
9186# XVP clients are Java-based.
9187#
9188# This option sets the private address to which the XVP VNC console proxy
9189# service should bind to.
9190#
9191# Related options:
9192#
9193# * xvpvncproxy_port
9194# * xvpvncproxy_base_url
9195# (host address value)
9196#xvpvncproxy_host = 0.0.0.0
9197
9198#
9199# Port that the XVP VNC console proxy should bind to.
9200#
9201# The VNC proxy is an OpenStack component that enables compute service
9202# users to access their instances through VNC clients. Xen provides
9203# the Xenserver VNC Proxy, or XVP, as an alternative to the
9204# websocket-based noVNC proxy used by Libvirt. In contrast to noVNC,
9205# XVP clients are Java-based.
9206#
9207# This option sets the private port to which the XVP VNC console proxy
9208# service should bind to.
9209#
9210# Related options:
9211#
9212# * xvpvncproxy_host
9213# * xvpvncproxy_base_url
9214# (port value)
9215# Minimum value: 0
9216# Maximum value: 65535
9217#xvpvncproxy_port = 6081
9218
9219#
9220# Public URL address of XVP VNC console proxy.
9221#
9222# The VNC proxy is an OpenStack component that enables compute service
9223# users to access their instances through VNC clients. Xen provides
9224# the Xenserver VNC Proxy, or XVP, as an alternative to the
9225# websocket-based noVNC proxy used by Libvirt. In contrast to noVNC,
9226# XVP clients are Java-based.
9227#
9228# This option sets the public base URL to which client systems will
9229# connect. XVP clients can use this address to connect to the XVP
9230# instance and, by extension, the VNC sessions.
9231#
9232# Related options:
9233#
9234# * xvpvncproxy_host
9235# * xvpvncproxy_port
9236# (uri value)
9237#xvpvncproxy_base_url = http://127.0.0.1:6081/console
9238
9239#
9240# IP address that the noVNC console proxy should bind to.
9241#
9242# The VNC proxy is an OpenStack component that enables compute service
9243# users to access their instances through VNC clients. noVNC provides
9244# VNC support through a websocket-based client.
9245#
9246# This option sets the private address to which the noVNC console proxy
9247# service should bind to.
9248#
9249# Related options:
9250#
9251# * novncproxy_port
9252# * novncproxy_base_url
9253# (string value)
9254#novncproxy_host = 0.0.0.0
9255
9256#
9257# Port that the noVNC console proxy should bind to.
9258#
9259# The VNC proxy is an OpenStack component that enables compute service
9260# users to access their instances through VNC clients. noVNC provides
9261# VNC support through a websocket-based client.
9262#
9263# This option sets the private port to which the noVNC console proxy
9264# service should bind to.
9265#
9266# Related options:
9267#
9268# * novncproxy_host
9269# * novncproxy_base_url
9270# (port value)
9271# Minimum value: 0
9272# Maximum value: 65535
9273#novncproxy_port = 6080
9274{%- if compute.bind is defined and compute.bind.vnc_port is defined %}
9275novncproxy_port = {{ compute.bind.vnc_port }}
9276{%- endif %}
9277
9278#
9279# The authentication schemes to use with the compute node.
9280#
9281# Control what RFB authentication schemes are permitted for connections between
9282# the proxy and the compute host. If multiple schemes are enabled, the first
9283# matching scheme will be used, thus the strongest schemes should be listed
9284# first.
9285#
9286# Possible values:
9287#
9288# * ``none``: allow connection without authentication
9289# * ``vencrypt``: use VeNCrypt authentication scheme
9290#
9291# Related options:
9292#
9293# * ``[vnc]vencrypt_client_key``, ``[vnc]vencrypt_client_cert``: must also be
9294# set
9295# (list value)
9296#auth_schemes = none
9297
9298# The path to the client certificate PEM file (for x509)
9299#
9300# The fully qualified path to a PEM file containing the private key which the
9301# VNC
9302# proxy server presents to the compute node during VNC authentication.
9303#
9304# Related options:
9305#
9306# * ``vnc.auth_schemes``: must include ``vencrypt``
9307# * ``vnc.vencrypt_client_cert``: must also be set
9308# (string value)
9309#vencrypt_client_key = <None>
9310
9311# The path to the client key file (for x509)
9312#
9313# The fully qualified path to a PEM file containing the x509 certificate which
9314# the VNC proxy server presents to the compute node during VNC authentication.
9315#
9316# Realted options:
9317#
9318# * ``vnc.auth_schemes``: must include ``vencrypt``
9319# * ``vnc.vencrypt_client_key``: must also be set
9320# (string value)
9321#vencrypt_client_cert = <None>
9322
9323# The path to the CA certificate PEM file
9324#
9325# The fully qualified path to a PEM file containing one or more x509
9326# certificates
9327# for the certificate authorities used by the compute node VNC server.
9328#
9329# Related options:
9330#
9331# * ``vnc.auth_schemes``: must include ``vencrypt``
9332# (string value)
9333#vencrypt_ca_certs = <None>
9334
9335
9336[workarounds]
9337#
9338# A collection of workarounds used to mitigate bugs or issues found in system
9339# tools (e.g. Libvirt or QEMU) or Nova itself under certain conditions. These
9340# should only be enabled in exceptional circumstances. All options are linked
9341# against bug IDs, where more information on the issue can be found.
9342
9343#
9344# From nova.conf
9345#
9346
9347#
9348# Use sudo instead of rootwrap.
9349#
9350# Allow fallback to sudo for performance reasons.
9351#
9352# For more information, refer to the bug report:
9353#
9354# https://bugs.launchpad.net/nova/+bug/1415106
9355#
9356# Possible values:
9357#
9358# * True: Use sudo instead of rootwrap
9359# * False: Use rootwrap as usual
9360#
9361# Interdependencies to other options:
9362#
9363# * Any options that affect 'rootwrap' will be ignored.
9364# (boolean value)
9365#disable_rootwrap = false
9366
9367#
9368# Disable live snapshots when using the libvirt driver.
9369#
9370# Live snapshots allow the snapshot of the disk to happen without an
9371# interruption to the guest, using coordination with a guest agent to
9372# quiesce the filesystem.
9373#
9374# When using libvirt 1.2.2 live snapshots fail intermittently under load
9375# (likely related to concurrent libvirt/qemu operations). This config
9376# option provides a mechanism to disable live snapshot, in favor of cold
9377# snapshot, while this is resolved. Cold snapshot causes an instance
9378# outage while the guest is going through the snapshotting process.
9379#
9380# For more information, refer to the bug report:
9381#
9382# https://bugs.launchpad.net/nova/+bug/1334398
9383#
9384# Possible values:
9385#
9386# * True: Live snapshot is disabled when using libvirt
9387# * False: Live snapshots are always used when snapshotting (as long as
9388# there is a new enough libvirt and the backend storage supports it)
9389# (boolean value)
9390disable_libvirt_livesnapshot = {{ compute.get('workaround', {}).get('disable_libvirt_livesnapshot', True)|lower }}
9391
9392#
9393# Enable handling of events emitted from compute drivers.
9394#
9395# Many compute drivers emit lifecycle events, which are events that occur when,
9396# for example, an instance is starting or stopping. If the instance is going
9397# through task state changes due to an API operation, like resize, the events
9398# are ignored.
9399#
9400# This is an advanced feature which allows the hypervisor to signal to the
9401# compute service that an unexpected state change has occurred in an instance
9402# and that the instance can be shutdown automatically. Unfortunately, this can
9403# race in some conditions, for example in reboot operations or when the compute
9404# service or when host is rebooted (planned or due to an outage). If such races
9405# are common, then it is advisable to disable this feature.
9406#
9407# Care should be taken when this feature is disabled and
9408# 'sync_power_state_interval' is set to a negative value. In this case, any
9409# instances that get out of sync between the hypervisor and the Nova database
9410# will have to be synchronized manually.
9411#
9412# For more information, refer to the bug report:
9413#
9414# https://bugs.launchpad.net/bugs/1444630
9415#
9416# Interdependencies to other options:
9417#
9418# * If ``sync_power_state_interval`` is negative and this feature is disabled,
9419# then instances that get out of sync between the hypervisor and the Nova
9420# database will have to be synchronized manually.
9421# (boolean value)
9422#handle_virt_lifecycle_events = true
9423
9424#
9425# Disable the server group policy check upcall in compute.
9426#
9427# In order to detect races with server group affinity policy, the compute
9428# service attempts to validate that the policy was not violated by the
9429# scheduler. It does this by making an upcall to the API database to list
9430# the instances in the server group for one that it is booting, which violates
9431# our api/cell isolation goals. Eventually this will be solved by proper
9432# affinity
9433# guarantees in the scheduler and placement service, but until then, this late
9434# check is needed to ensure proper affinity policy.
9435#
9436# Operators that desire api/cell isolation over this check should
9437# enable this flag, which will avoid making that upcall from compute.
9438#
9439# Related options:
9440#
9441# * [filter_scheduler]/track_instance_changes also relies on upcalls from the
9442# compute service to the scheduler service.
9443# (boolean value)
9444#disable_group_policy_check_upcall = false
9445
9446# DEPRECATED:
9447# Enable the consoleauth service to avoid resetting unexpired consoles.
9448#
9449# Console token authorizations have moved from the ``nova-consoleauth`` service
9450# to the database, so all new consoles will be supported by the database
9451# backend.
9452# With this, consoles that existed before database backend support will be
9453# reset.
9454# For most operators, this should be a minimal disruption as the default TTL of
9455# a
9456# console token is 10 minutes.
9457#
9458# Operators that have much longer token TTL configured or otherwise wish to
9459# avoid
9460# immediately resetting all existing consoles can enable this flag to continue
9461# using the ``nova-consoleauth`` service in addition to the database backend.
9462# Once all of the old ``nova-consoleauth`` supported console tokens have
9463# expired,
9464# this flag should be disabled. For example, if a deployment has configured a
9465# token TTL of one hour, the operator may disable the flag, one hour after
9466# deploying the new code during an upgrade.
9467#
9468# .. note:: Cells v1 was not converted to use the database backend for
9469# console token authorizations. Cells v1 console token authorizations will
9470# continue to be supported by the ``nova-consoleauth`` service and use of
9471# the ``[workarounds]/enable_consoleauth`` option does not apply to
9472# Cells v1 users.
9473#
9474# Related options:
9475#
9476# * ``[consoleauth]/token_ttl``
9477# (boolean value)
9478# This option is deprecated for removal since 18.0.0.
9479# Its value may be silently ignored in the future.
9480# Reason:
9481# This option has been added as deprecated originally because it is used
9482# for avoiding a upgrade issue and it will not be used in the future.
9483# See the help text for more details.
9484#enable_consoleauth = false
9485
9486#
9487# Starting in the 16.0.0 Pike release, ironic nodes can be scheduled using
9488# custom resource classes rather than the standard resource classes VCPU,
9489# MEMORY_MB and DISK_GB:
9490#
9491# https://docs.openstack.org/ironic/rocky/install/configure-nova-flavors.html
9492#
9493# However, existing ironic instances require a data migration which can be
9494# achieved either by restarting ``nova-compute`` services managing ironic nodes
9495# or running ``nova-manage db ironic_flavor_migration``. The completion of the
9496# data migration can be checked by running the ``nova-status upgrade check``
9497# command and checking the "Ironic Flavor Migration" result.
9498#
9499# Once all data migrations are complete, you can set this option to False to
9500# stop reporting VCPU, MEMORY_MB and DISK_GB resource class inventory to the
9501# Placement service so that scheduling will only rely on the custom resource
9502# class for each ironic node, as described in the document above.
9503#
9504# Note that this option does not apply starting in the 19.0.0 Stein release
9505# since the ironic compute driver no longer reports standard resource class
9506# inventory regardless of configuration.
9507#
9508# Alternatives to this workaround would be unsetting ``memory_mb`` and/or
9509# ``vcpus`` properties from ironic nodes, or using host aggregates to segregate
9510# VM from BM compute hosts and restrict flavors to those aggregates, but those
9511# alternatives might not be feasible at large scale.
9512#
9513# See related bug https://bugs.launchpad.net/nova/+bug/1796920 for more details.
9514# (boolean value)
9515#report_ironic_standard_resource_class_inventory = true
9516
9517
9518[wsgi]
9519#
9520# Options under this group are used to configure WSGI (Web Server Gateway
9521# Interface). WSGI is used to serve API requests.
9522
9523#
9524# From nova.conf
9525#
9526
9527#
9528# This option represents a file name for the paste.deploy config for nova-api.
9529#
9530# Possible values:
9531#
9532# * A string representing file name for the paste.deploy config.
9533# (string value)
9534#api_paste_config = api-paste.ini
9535
9536# DEPRECATED:
9537# It represents a python format string that is used as the template to generate
9538# log lines. The following values can be formatted into it: client_ip,
9539# date_time, request_line, status_code, body_length, wall_seconds.
9540#
9541# This option is used for building custom request loglines when running
9542# nova-api under eventlet. If used under uwsgi or apache, this option
9543# has no effect.
9544#
9545# Possible values:
9546#
9547# * '%(client_ip)s "%(request_line)s" status: %(status_code)s'
9548# 'len: %(body_length)s time: %(wall_seconds).7f' (default)
9549# * Any formatted string formed by specific values.
9550# (string value)
9551# This option is deprecated for removal since 16.0.0.
9552# Its value may be silently ignored in the future.
9553# Reason:
9554# This option only works when running nova-api under eventlet, and
9555# encodes very eventlet specific pieces of information. Starting in Pike
9556# the preferred model for running nova-api is under uwsgi or apache
9557# mod_wsgi.
9558#wsgi_log_format = %(client_ip)s "%(request_line)s" status: %(status_code)s len: %(body_length)s time: %(wall_seconds).7f
9559
9560#
9561# This option specifies the HTTP header used to determine the protocol scheme
9562# for the original request, even if it was removed by a SSL terminating proxy.
9563#
9564# Possible values:
9565#
9566# * None (default) - the request scheme is not influenced by any HTTP headers
9567# * Valid HTTP header, like ``HTTP_X_FORWARDED_PROTO``
9568#
9569# WARNING: Do not set this unless you know what you are doing.
9570#
9571# Make sure ALL of the following are true before setting this (assuming the
9572# values from the example above):
9573#
9574# * Your API is behind a proxy.
9575# * Your proxy strips the X-Forwarded-Proto header from all incoming requests.
9576# In other words, if end users include that header in their requests, the
9577# proxy
9578# will discard it.
9579# * Your proxy sets the X-Forwarded-Proto header and sends it to API, but only
9580# for requests that originally come in via HTTPS.
9581#
9582# If any of those are not true, you should keep this setting set to None.
9583# (string value)
9584#secure_proxy_ssl_header = <None>
9585
9586#
9587# This option allows setting path to the CA certificate file that should be used
9588# to verify connecting clients.
9589#
9590# Possible values:
9591#
9592# * String representing path to the CA certificate file.
9593#
9594# Related options:
9595#
9596# * enabled_ssl_apis
9597# (string value)
9598#ssl_ca_file = <None>
9599
9600#
9601# This option allows setting path to the SSL certificate of API server.
9602#
9603# Possible values:
9604#
9605# * String representing path to the SSL certificate.
9606#
9607# Related options:
9608#
9609# * enabled_ssl_apis
9610# (string value)
9611#ssl_cert_file = <None>
9612
9613#
9614# This option specifies the path to the file where SSL private key of API
9615# server is stored when SSL is in effect.
9616#
9617# Possible values:
9618#
9619# * String representing path to the SSL private key.
9620#
9621# Related options:
9622#
9623# * enabled_ssl_apis
9624# (string value)
9625#ssl_key_file = <None>
9626
9627#
9628# This option sets the value of TCP_KEEPIDLE in seconds for each server socket.
9629# It specifies the duration of time to keep connection active. TCP generates a
9630# KEEPALIVE transmission for an application that requests to keep connection
9631# active. Not supported on OS X.
9632#
9633# Related options:
9634#
9635# * keep_alive
9636# (integer value)
9637# Minimum value: 0
9638#tcp_keepidle = 600
9639
9640#
9641# This option specifies the size of the pool of greenthreads used by wsgi.
9642# It is possible to limit the number of concurrent connections using this
9643# option.
9644# (integer value)
9645# Minimum value: 0
9646# Deprecated group/name - [DEFAULT]/wsgi_default_pool_size
9647#default_pool_size = 1000
9648
9649#
9650# This option specifies the maximum line size of message headers to be accepted.
9651# max_header_line may need to be increased when using large tokens (typically
9652# those generated by the Keystone v3 API with big service catalogs).
9653#
9654# Since TCP is a stream based protocol, in order to reuse a connection, the HTTP
9655# has to have a way to indicate the end of the previous response and beginning
9656# of the next. Hence, in a keep_alive case, all messages must have a
9657# self-defined message length.
9658# (integer value)
9659# Minimum value: 0
9660#max_header_line = 16384
9661
9662#
9663# This option allows using the same TCP connection to send and receive multiple
9664# HTTP requests/responses, as opposed to opening a new one for every single
9665# request/response pair. HTTP keep-alive indicates HTTP connection reuse.
9666#
9667# Possible values:
9668#
9669# * True : reuse HTTP connection.
9670# * False : closes the client socket connection explicitly.
9671#
9672# Related options:
9673#
9674# * tcp_keepidle
9675# (boolean value)
9676# Deprecated group/name - [DEFAULT]/wsgi_keep_alive
9677#keep_alive = true
9678
9679#
9680# This option specifies the timeout for client connections' socket operations.
9681# If an incoming connection is idle for this number of seconds it will be
9682# closed. It indicates timeout on individual read/writes on the socket
9683# connection. To wait forever set to 0.
9684# (integer value)
9685# Minimum value: 0
9686#client_socket_timeout = 900
9687
9688
9689[xenserver]
9690#
9691# XenServer options are used when the compute_driver is set to use
9692# XenServer (compute_driver=xenapi.XenAPIDriver).
9693#
9694# Must specify connection_url, connection_password and ovs_integration_bridge to
9695# use compute_driver=xenapi.XenAPIDriver.
9696
9697#
9698# From nova.conf
9699#
9700
9701#
9702# Number of seconds to wait for agent's reply to a request.
9703#
9704# Nova configures/performs certain administrative actions on a server with the
9705# help of an agent that's installed on the server. The communication between
9706# Nova and the agent is achieved via sharing messages, called records, over
9707# xenstore, a shared storage across all the domains on a Xenserver host.
9708# Operations performed by the agent on behalf of nova are: 'version','
9709# key_init',
9710# 'password','resetnetwork','inject_file', and 'agentupdate'.
9711#
9712# To perform one of the above operations, the xapi 'agent' plugin writes the
9713# command and its associated parameters to a certain location known to the
9714# domain
9715# and awaits response. On being notified of the message, the agent performs
9716# appropriate actions on the server and writes the result back to xenstore. This
9717# result is then read by the xapi 'agent' plugin to determine the
9718# success/failure
9719# of the operation.
9720#
9721# This config option determines how long the xapi 'agent' plugin shall wait to
9722# read the response off of xenstore for a given request/command. If the agent on
9723# the instance fails to write the result in this time period, the operation is
9724# considered to have timed out.
9725#
9726# Related options:
9727#
9728# * ``agent_version_timeout``
9729# * ``agent_resetnetwork_timeout``
9730#
9731# (integer value)
9732# Minimum value: 0
9733#agent_timeout = 30
9734
9735#
9736# Number of seconds to wait for agent't reply to version request.
9737#
9738# This indicates the amount of time xapi 'agent' plugin waits for the agent to
9739# respond to the 'version' request specifically. The generic timeout for agent
9740# communication ``agent_timeout`` is ignored in this case.
9741#
9742# During the build process the 'version' request is used to determine if the
9743# agent is available/operational to perform other requests such as
9744# 'resetnetwork', 'password', 'key_init' and 'inject_file'. If the 'version'
9745# call
9746# fails, the other configuration is skipped. So, this configuration option can
9747# also be interpreted as time in which agent is expected to be fully
9748# operational.
9749# (integer value)
9750# Minimum value: 0
9751#agent_version_timeout = 300
9752
9753#
9754# Number of seconds to wait for agent's reply to resetnetwork
9755# request.
9756#
9757# This indicates the amount of time xapi 'agent' plugin waits for the agent to
9758# respond to the 'resetnetwork' request specifically. The generic timeout for
9759# agent communication ``agent_timeout`` is ignored in this case.
9760# (integer value)
9761# Minimum value: 0
9762#agent_resetnetwork_timeout = 60
9763
9764#
9765# Path to locate guest agent on the server.
9766#
9767# Specifies the path in which the XenAPI guest agent should be located. If the
9768# agent is present, network configuration is not injected into the image.
9769#
9770# Related options:
9771#
9772# For this option to have an effect:
9773# * ``flat_injected`` should be set to ``True``
9774# * ``compute_driver`` should be set to ``xenapi.XenAPIDriver``
9775#
9776# (string value)
9777#agent_path = usr/sbin/xe-update-networking
9778
9779#
9780# Disables the use of XenAPI agent.
9781#
9782# This configuration option suggests whether the use of agent should be enabled
9783# or not regardless of what image properties are present. Image properties have
9784# an effect only when this is set to ``True``. Read description of config option
9785# ``use_agent_default`` for more information.
9786#
9787# Related options:
9788#
9789# * ``use_agent_default``
9790#
9791# (boolean value)
9792#disable_agent = false
9793
9794#
9795# Whether or not to use the agent by default when its usage is enabled but not
9796# indicated by the image.
9797#
9798# The use of XenAPI agent can be disabled altogether using the configuration
9799# option ``disable_agent``. However, if it is not disabled, the use of an agent
9800# can still be controlled by the image in use through one of its properties,
9801# ``xenapi_use_agent``. If this property is either not present or specified
9802# incorrectly on the image, the use of agent is determined by this configuration
9803# option.
9804#
9805# Note that if this configuration is set to ``True`` when the agent is not
9806# present, the boot times will increase significantly.
9807#
9808# Related options:
9809#
9810# * ``disable_agent``
9811#
9812# (boolean value)
9813#use_agent_default = false
9814
9815# Timeout in seconds for XenAPI login. (integer value)
9816# Minimum value: 0
9817#login_timeout = 10
9818
9819#
9820# Maximum number of concurrent XenAPI connections.
9821#
9822# In nova, multiple XenAPI requests can happen at a time.
9823# Configuring this option will parallelize access to the XenAPI
9824# session, which allows you to make concurrent XenAPI connections.
9825# (integer value)
9826# Minimum value: 1
9827#connection_concurrent = 5
9828
9829#
9830# Cache glance images locally.
9831#
9832# The value for this option must be chosen from the choices listed
9833# here. Configuring a value other than these will default to 'all'.
9834#
9835# Note: There is nothing that deletes these images.
9836#
9837# Possible values:
9838#
9839# * `all`: will cache all images.
9840# * `some`: will only cache images that have the
9841# image_property `cache_in_nova=True`.
9842# * `none`: turns off caching entirely.
9843# (string value)
9844# Possible values:
9845# all - <No description provided>
9846# some - <No description provided>
9847# none - <No description provided>
9848#cache_images = all
9849
9850#
9851# Compression level for images.
9852#
9853# By setting this option we can configure the gzip compression level.
9854# This option sets GZIP environment variable before spawning tar -cz
9855# to force the compression level. It defaults to none, which means the
9856# GZIP environment variable is not set and the default (usually -6)
9857# is used.
9858#
9859# Possible values:
9860#
9861# * Range is 1-9, e.g., 9 for gzip -9, 9 being most
9862# compressed but most CPU intensive on dom0.
9863# * Any values out of this range will default to None.
9864# (integer value)
9865# Minimum value: 1
9866# Maximum value: 9
9867#image_compression_level = <None>
9868
9869# Default OS type used when uploading an image to glance (string value)
9870#default_os_type = linux
9871
9872# Time in secs to wait for a block device to be created (integer value)
9873# Minimum value: 1
9874#block_device_creation_timeout = 10
9875{%- if compute.block_device_creation_timeout is defined %}
9876block_device_creation_timeout = {{ compute.block_device_creation_timeout }}
9877{%- endif %}
9878
9879#
9880# Maximum size in bytes of kernel or ramdisk images.
9881#
9882# Specifying the maximum size of kernel or ramdisk will avoid copying
9883# large files to dom0 and fill up /boot/guest.
9884# (integer value)
9885#max_kernel_ramdisk_size = 16777216
9886
9887#
9888# Filter for finding the SR to be used to install guest instances on.
9889#
9890# Possible values:
9891#
9892# * To use the Local Storage in default XenServer/XCP installations
9893# set this flag to other-config:i18n-key=local-storage.
9894# * To select an SR with a different matching criteria, you could
9895# set it to other-config:my_favorite_sr=true.
9896# * To fall back on the Default SR, as displayed by XenCenter,
9897# set this flag to: default-sr:true.
9898# (string value)
9899#sr_matching_filter = default-sr:true
9900
9901#
9902# Whether to use sparse_copy for copying data on a resize down.
9903# (False will use standard dd). This speeds up resizes down
9904# considerably since large runs of zeros won't have to be rsynced.
9905# (boolean value)
9906#sparse_copy = true
9907
9908#
9909# Maximum number of retries to unplug VBD.
9910# If set to 0, should try once, no retries.
9911# (integer value)
9912# Minimum value: 0
9913#num_vbd_unplug_retries = 10
9914
9915#
9916# Name of network to use for booting iPXE ISOs.
9917#
9918# An iPXE ISO is a specially crafted ISO which supports iPXE booting.
9919# This feature gives a means to roll your own image.
9920#
9921# By default this option is not set. Enable this option to
9922# boot an iPXE ISO.
9923#
9924# Related Options:
9925#
9926# * `ipxe_boot_menu_url`
9927# * `ipxe_mkisofs_cmd`
9928# (string value)
9929#ipxe_network_name = <None>
9930
9931#
9932# URL to the iPXE boot menu.
9933#
9934# An iPXE ISO is a specially crafted ISO which supports iPXE booting.
9935# This feature gives a means to roll your own image.
9936#
9937# By default this option is not set. Enable this option to
9938# boot an iPXE ISO.
9939#
9940# Related Options:
9941#
9942# * `ipxe_network_name`
9943# * `ipxe_mkisofs_cmd`
9944# (string value)
9945#ipxe_boot_menu_url = <None>
9946
9947#
9948# Name and optionally path of the tool used for ISO image creation.
9949#
9950# An iPXE ISO is a specially crafted ISO which supports iPXE booting.
9951# This feature gives a means to roll your own image.
9952#
9953# Note: By default `mkisofs` is not present in the Dom0, so the
9954# package can either be manually added to Dom0 or include the
9955# `mkisofs` binary in the image itself.
9956#
9957# Related Options:
9958#
9959# * `ipxe_network_name`
9960# * `ipxe_boot_menu_url`
9961# (string value)
9962#ipxe_mkisofs_cmd = mkisofs
9963
9964#
9965# URL for connection to XenServer/Xen Cloud Platform. A special value
9966# of unix://local can be used to connect to the local unix socket.
9967#
9968# Possible values:
9969#
9970# * Any string that represents a URL. The connection_url is
9971# generally the management network IP address of the XenServer.
9972# * This option must be set if you chose the XenServer driver.
9973# (string value)
9974#connection_url = <None>
9975
9976# Username for connection to XenServer/Xen Cloud Platform (string value)
9977#connection_username = root
9978
9979# Password for connection to XenServer/Xen Cloud Platform (string value)
9980#connection_password = <None>
9981
9982#
9983# The interval used for polling of coalescing vhds.
9984#
9985# This is the interval after which the task of coalesce VHD is
9986# performed, until it reaches the max attempts that is set by
9987# vhd_coalesce_max_attempts.
9988#
9989# Related options:
9990#
9991# * `vhd_coalesce_max_attempts`
9992# (floating point value)
9993# Minimum value: 0
9994#vhd_coalesce_poll_interval = 5.0
9995
9996#
9997# Ensure compute service is running on host XenAPI connects to.
9998# This option must be set to false if the 'independent_compute'
9999# option is set to true.
10000#
10001# Possible values:
10002#
10003# * Setting this option to true will make sure that compute service
10004# is running on the same host that is specified by connection_url.
10005# * Setting this option to false, doesn't perform the check.
10006#
10007# Related options:
10008#
10009# * `independent_compute`
10010# (boolean value)
10011#check_host = true
10012
10013#
10014# Max number of times to poll for VHD to coalesce.
10015#
10016# This option determines the maximum number of attempts that can be
10017# made for coalescing the VHD before giving up.
10018#
10019# Related opitons:
10020#
10021# * `vhd_coalesce_poll_interval`
10022# (integer value)
10023# Minimum value: 0
10024#vhd_coalesce_max_attempts = 20
10025
10026# Base path to the storage repository on the XenServer host. (string value)
10027#sr_base_path = /var/run/sr-mount
10028
10029#
10030# The iSCSI Target Host.
10031#
10032# This option represents the hostname or ip of the iSCSI Target.
10033# If the target host is not present in the connection information from
10034# the volume provider then the value from this option is taken.
10035#
10036# Possible values:
10037#
10038# * Any string that represents hostname/ip of Target.
10039# (host address value)
10040#target_host = <None>
10041
10042#
10043# The iSCSI Target Port.
10044#
10045# This option represents the port of the iSCSI Target. If the
10046# target port is not present in the connection information from the
10047# volume provider then the value from this option is taken.
10048# (port value)
10049# Minimum value: 0
10050# Maximum value: 65535
10051#target_port = 3260
10052
10053#
10054# Used to prevent attempts to attach VBDs locally, so Nova can
10055# be run in a VM on a different host.
10056#
10057# Related options:
10058#
10059# * ``CONF.flat_injected`` (Must be False)
10060# * ``CONF.xenserver.check_host`` (Must be False)
10061# * ``CONF.default_ephemeral_format`` (Must be unset or 'ext3')
10062# * Joining host aggregates (will error if attempted)
10063# * Swap disks for Windows VMs (will error if attempted)
10064# * Nova-based auto_configure_disk (will error if attempted)
10065# (boolean value)
10066#independent_compute = false
10067
10068#
10069# Wait time for instances to go to running state.
10070#
10071# Provide an integer value representing time in seconds to set the
10072# wait time for an instance to go to running state.
10073#
10074# When a request to create an instance is received by nova-api and
10075# communicated to nova-compute, the creation of the instance occurs
10076# through interaction with Xen via XenAPI in the compute node. Once
10077# the node on which the instance(s) are to be launched is decided by
10078# nova-schedule and the launch is triggered, a certain amount of wait
10079# time is involved until the instance(s) can become available and
10080# 'running'. This wait time is defined by running_timeout. If the
10081# instances do not go to running state within this specified wait
10082# time, the launch expires and the instance(s) are set to 'error'
10083# state.
10084# (integer value)
10085# Minimum value: 0
10086#running_timeout = 60
10087
10088# DEPRECATED:
10089# Dom0 plugin driver used to handle image uploads.
10090#
10091# Provide a string value representing a plugin driver required to
10092# handle the image uploading to GlanceStore.
10093#
10094# Images, and snapshots from XenServer need to be uploaded to the data
10095# store for use. image_upload_handler takes in a value for the Dom0
10096# plugin driver. This driver is then called to uplaod images to the
10097# GlanceStore.
10098# (string value)
10099# This option is deprecated for removal since 18.0.0.
10100# Its value may be silently ignored in the future.
10101# Reason:
10102# Instead of setting the class path here, we will use short names
10103# to represent image handlers. The download and upload handlers
10104# must also be matching. So another new option "image_handler"
10105# will be used to set the short name for a specific image handler
10106# for both image download and upload.
10107#image_upload_handler =
10108
10109#
10110# The plugin used to handle image uploads and downloads.
10111#
10112# Provide a short name representing an image driver required to
10113# handle the image between compute host and glance.
10114#
10115# Description for the allowed values:
10116# * ``direct_vhd``: This plugin directly processes the VHD files in XenServer
10117# SR(Storage Repository). So this plugin only works when the host's SR
10118# type is file system based e.g. ext, nfs.
10119# * ``vdi_local_dev``: This plugin implements an image handler which attaches
10120# the instance's VDI as a local disk to the VM where the OpenStack Compute
10121# service runs in. It uploads the raw disk to glance when creating image;
10122# When booting an instance from a glance image, it downloads the image and
10123# streams it into the disk which is attached to the compute VM.
10124# * ``vdi_remote_stream``: This plugin implements an image handler which works
10125# as a proxy between glance and XenServer. The VHD streams to XenServer via
10126# a remote import API supplied by XAPI for image download; and for image
10127# upload, the VHD streams from XenServer via a remote export API supplied
10128# by XAPI. This plugin works for all SR types supported by XenServer.
10129# (string value)
10130# Possible values:
10131# direct_vhd - <No description provided>
10132# vdi_local_dev - <No description provided>
10133# vdi_remote_stream - <No description provided>
10134#image_handler = direct_vhd
10135
10136#
10137# Number of seconds to wait for SR to settle if the VDI
10138# does not exist when first introduced.
10139#
10140# Some SRs, particularly iSCSI connections are slow to see the VDIs
10141# right after they got introduced. Setting this option to a
10142# time interval will make the SR to wait for that time period
10143# before raising VDI not found exception.
10144# (integer value)
10145# Minimum value: 0
10146#introduce_vdi_retry_wait = 20
10147
10148#
10149# The name of the integration Bridge that is used with xenapi
10150# when connecting with Open vSwitch.
10151#
10152# Note: The value of this config option is dependent on the
10153# environment, therefore this configuration value must be set
10154# accordingly if you are using XenAPI.
10155#
10156# Possible values:
10157#
10158# * Any string that represents a bridge name.
10159# (string value)
10160#ovs_integration_bridge = <None>
10161
10162#
10163# When adding new host to a pool, this will append a --force flag to the
10164# command, forcing hosts to join a pool, even if they have different CPUs.
10165#
10166# Since XenServer version 5.6 it is possible to create a pool of hosts that have
10167# different CPU capabilities. To accommodate CPU differences, XenServer limited
10168# features it uses to determine CPU compatibility to only the ones that are
10169# exposed by CPU and support for CPU masking was added.
10170# Despite this effort to level differences between CPUs, it is still possible
10171# that adding new host will fail, thus option to force join was introduced.
10172# (boolean value)
10173#use_join_force = true
10174
10175#
10176# Publicly visible name for this console host.
10177#
10178# Possible values:
10179#
10180# * Current hostname (default) or any string representing hostname.
10181# (string value)
10182#console_public_hostname = <current_hostname>
10183
10184
10185[xvp]
10186#
10187# Configuration options for XVP.
10188#
10189# xvp (Xen VNC Proxy) is a proxy server providing password-protected VNC-based
10190# access to the consoles of virtual machines hosted on Citrix XenServer.
10191
10192#
10193# From nova.conf
10194#
10195
10196# XVP conf template (string value)
10197#console_xvp_conf_template = $pybasedir/nova/console/xvp.conf.template
10198
10199# Generated XVP conf file (string value)
10200#console_xvp_conf = /etc/xvp.conf
10201
10202# XVP master process pid file (string value)
10203#console_xvp_pid = /var/run/xvp.pid
10204
10205# XVP log file (string value)
10206#console_xvp_log = /var/log/xvp.log
10207
10208# Port for XVP to multiplex VNC connections on (port value)
10209# Minimum value: 0
10210# Maximum value: 65535
10211#console_xvp_multiplex_port = 5900
10212
10213
10214[zvm]
10215#
10216# zvm options allows cloud administrator to configure related
10217# z/VM hypervisor driver to be used within an OpenStack deployment.
10218#
10219# zVM options are used when the compute_driver is set to use
10220# zVM (compute_driver=zvm.ZVMDriver)
10221
10222#
10223# From nova.conf
10224#
10225
10226#
10227# URL to be used to communicate with z/VM Cloud Connector.
10228# (uri value)
10229#cloud_connector_url = http://zvm.example.org:8080/
10230
10231#
10232# CA certificate file to be verified in httpd server with TLS enabled
10233#
10234# A string, it must be a path to a CA bundle to use.
10235# (string value)
10236#ca_file = <None>
10237
10238#
10239# The path at which images will be stored (snapshot, deploy, etc).
10240#
10241# Images used for deploy and images captured via snapshot
10242# need to be stored on the local disk of the compute host.
10243# This configuration identifies the directory location.
10244#
10245# Possible values:
10246# A file system path on the host running the compute service.
10247# (string value)
10248#image_tmp_path = $state_path/images
10249
10250#
10251# Timeout (seconds) to wait for an instance to start.
10252#
10253# The z/VM driver relies on communication between the instance and cloud
10254# connector. After an instance is created, it must have enough time to wait
10255# for all the network info to be written into the user directory.
10256# The driver will keep rechecking network status to the instance with the
10257# timeout value, If setting network failed, it will notify the user that
10258# starting the instance failed and put the instance in ERROR state.
10259# The underlying z/VM guest will then be deleted.
10260#
10261# Possible Values:
10262# Any positive integer. Recommended to be at least 300 seconds (5 minutes),
10263# but it will vary depending on instance and system load.
10264# A value of 0 is used for debug. In this case the underlying z/VM guest
10265# will not be deleted when the instance is marked in ERROR state.
10266# (integer value)
10267#reachable_timeout = 300
Oleksandr Bryndzii6af347b2019-04-23 15:34:42 +030010268
10269{%- if compute.configmap is defined %}
10270{%- set _data = compute.configmap %}
10271{%- include "oslo_templates/files/configmap/configmap.conf" %}
10272{%- endif %}