blob: b56bd125c3b5d836fcde99a99542dfc78d4f2978 [file] [log] [blame]
Vasyl Saienko8c36b422018-07-24 09:38:00 +03001{%- from "nova/map.jinja" import compute,compute_driver_mapping with context %}
Oleh Hryhorov5d0f13a2018-05-21 17:02:54 +00002[DEFAULT]
3
4#
5# From nova.conf
6#
7compute_manager=nova.compute.manager.ComputeManager
8network_device_mtu=65000
9use_neutron = True
10security_group_api=neutron
11image_service=nova.image.glance.GlanceImageService
12
13#
14# Availability zone for internal services.
15#
16# This option determines the availability zone for the various
17# internal nova
18# services, such as 'nova-scheduler', 'nova-conductor', etc.
19#
20# Possible values:
21#
22# * Any string representing an existing availability zone name.
23# (string value)
24#internal_service_availability_zone = internal
25
26#
27# Default availability zone for compute services.
28#
29# This option determines the default availability zone for 'nova-
30# compute'
31# services, which will be used if the service(s) do not belong to
32# aggregates with
33# availability zone metadata.
34#
35# Possible values:
36#
37# * Any string representing an existing availability zone name.
38# (string value)
39#default_availability_zone = nova
40
41#
42# Default availability zone for instances.
43#
44# This option determines the default availability zone for instances,
45# which will
46# be used when a user does not specify one when creating an instance.
47# The
48# instance(s) will be bound to this availability zone for their
49# lifetime.
50#
51# Possible values:
52#
53# * Any string representing an existing availability zone name.
54# * None, which means that the instance can move from one availability
55# zone to
56# another during its lifetime if it is moved from one compute node
57# to another.
58# (string value)
59#default_schedule_zone = <None>
60
61# Length of generated instance admin passwords. (integer value)
62# Minimum value: 0
63#password_length = 12
64
65#
66# Time period to generate instance usages for. It is possible to
67# define optional
68# offset to given period by appending @ character followed by a number
69# defining
70# offset.
71#
72# Possible values:
73#
74# * period, example: ``hour``, ``day``, ``month` or ``year``
75# * period with offset, example: ``month@15`` will result in monthly
76# audits
77# starting on 15th day of month.
78# (string value)
79#instance_usage_audit_period = month
80{% if pillar.ceilometer is defined %}
81instance_usage_audit = True
82instance_usage_audit_period = hour
83{%- endif %}
84
85#
86# Start and use a daemon that can run the commands that need to be run
87# with
88# root privileges. This option is usually enabled on nodes that run
89# nova compute
90# processes.
91# (boolean value)
92#use_rootwrap_daemon = false
93
94#
95# Path to the rootwrap configuration file.
96#
97# Goal of the root wrapper is to allow a service-specific unprivileged
98# user to
99# run a number of actions as the root user in the safest manner
100# possible.
101# The configuration file used here must match the one defined in the
102# sudoers
103# entry.
104# (string value)
105rootwrap_config = /etc/nova/rootwrap.conf
106
107# Explicitly specify the temporary working directory. (string value)
108#tempdir = <None>
109
110# DEPRECATED:
111# Determine if monkey patching should be applied.
112#
113# Related options:
114#
115# * ``monkey_patch_modules``: This must have values set for this
116# option to
117# have any effect
118# (boolean value)
119# This option is deprecated for removal since 17.0.0.
120# Its value may be silently ignored in the future.
121# Reason:
122# Monkey patching nova is not tested, not supported, and is a barrier
123# for interoperability.
124#monkey_patch = false
125
126# DEPRECATED:
127# List of modules/decorators to monkey patch.
128#
129# This option allows you to patch a decorator for all functions in
130# specified
131# modules.
132#
133# Possible values:
134#
135# * nova.compute.api:nova.notifications.notify_decorator
136# * [...]
137#
138# Related options:
139#
140# * ``monkey_patch``: This must be set to ``True`` for this option to
141# have any effect
142# (list value)
143# This option is deprecated for removal since 17.0.0.
144# Its value may be silently ignored in the future.
145# Reason:
146# Monkey patching nova is not tested, not supported, and is a barrier
147# for interoperability.
148#monkey_patch_modules = nova.compute.api:nova.notifications.notify_decorator
149
150#
151# Defines which driver to use for controlling virtualization.
152#
153# Possible values:
154#
155# * ``libvirt.LibvirtDriver``
156# * ``xenapi.XenAPIDriver``
157# * ``fake.FakeDriver``
158# * ``ironic.IronicDriver``
159# * ``vmwareapi.VMwareVCDriver``
160# * ``hyperv.HyperVDriver``
161# * ``powervm.PowerVMDriver``
162# (string value)
163#compute_driver = <None>
164compute_driver = {{ compute.get('compute_driver', 'libvirt.LibvirtDriver') }}
165
166#
167# Allow destination machine to match source for resize. Useful when
168# testing in single-host environments. By default it is not allowed
169# to resize to the same host. Setting this option to true will add
170# the same host to the destination options. Also set to true
171# if you allow the ServerGroupAffinityFilter and need to resize.
172# (boolean value)
173#allow_resize_to_same_host = false
174allow_resize_to_same_host = true
175
176#
177# Image properties that should not be inherited from the instance
178# when taking a snapshot.
179#
180# This option gives an opportunity to select which image-properties
181# should not be inherited by newly created snapshots.
182#
183# Possible values:
184#
185# * A comma-separated list whose item is an image property. Usually
186# only
187# the image properties that are only needed by base images can be
188# included
189# here, since the snapshots that are created from the base images
190# don't
191# need them.
192# * Default list: cache_in_nova, bittorrent,
193# img_signature_hash_method,
194# img_signature, img_signature_key_type,
195# img_signature_certificate_uuid
196#
197# (list value)
198#non_inheritable_image_properties = cache_in_nova,bittorrent,img_signature_hash_method,img_signature,img_signature_key_type,img_signature_certificate_uuid
199
200# DEPRECATED:
201# When creating multiple instances with a single request using the
202# os-multiple-create API extension, this template will be used to
203# build
204# the display name for each instance. The benefit is that the
205# instances
206# end up with different hostnames. Example display names when creating
207# two VM's: name-1, name-2.
208#
209# Possible values:
210#
211# * Valid keys for the template are: name, uuid, count.
212# (string value)
213# This option is deprecated for removal since 15.0.0.
214# Its value may be silently ignored in the future.
215# Reason:
216# This config changes API behaviour. All changes in API behaviour
217# should be
218# discoverable.
219#multi_instance_display_name_template = %(name)s-%(count)d
220
221#
222# Maximum number of devices that will result in a local image being
223# created on the hypervisor node.
224#
225# A negative number means unlimited. Setting max_local_block_devices
226# to 0 means that any request that attempts to create a local disk
227# will fail. This option is meant to limit the number of local discs
228# (so root local disc that is the result of --image being used, and
229# any other ephemeral and swap disks). 0 does not mean that images
230# will be automatically converted to volumes and boot instances from
231# volumes - it just means that all requests that attempt to create a
232# local disk will fail.
233#
234# Possible values:
235#
236# * 0: Creating a local disk is not allowed.
237# * Negative number: Allows unlimited number of local discs.
238# * Positive number: Allows only these many number of local discs.
239# (Default value is 3).
240# (integer value)
241#max_local_block_devices = 3
242
243#
244# A comma-separated list of monitors that can be used for getting
245# compute metrics. You can use the alias/name from the setuptools
246# entry points for nova.compute.monitors.* namespaces. If no
247# namespace is supplied, the "cpu." namespace is assumed for
248# backwards-compatibility.
249#
250# NOTE: Only one monitor per namespace (For example: cpu) can be
251# loaded at
252# a time.
253#
254# Possible values:
255#
256# * An empty list will disable the feature (Default).
257# * An example value that would enable both the CPU and NUMA memory
258# bandwidth monitors that use the virt driver variant:
259#
260# compute_monitors = cpu.virt_driver, numa_mem_bw.virt_driver
261# (list value)
262#compute_monitors =
263
264#
265# The default format an ephemeral_volume will be formatted with on
266# creation.
267#
268# Possible values:
269#
270# * ``ext2``
271# * ``ext3``
272# * ``ext4``
273# * ``xfs``
274# * ``ntfs`` (only for Windows guests)
275# (string value)
276#default_ephemeral_format = <None>
277
278#
279# Determine if instance should boot or fail on VIF plugging timeout.
280#
281# Nova sends a port update to Neutron after an instance has been
282# scheduled,
283# providing Neutron with the necessary information to finish setup of
284# the port.
285# Once completed, Neutron notifies Nova that it has finished setting
286# up the
287# port, at which point Nova resumes the boot of the instance since
288# network
289# connectivity is now supposed to be present. A timeout will occur if
290# the reply
291# is not received after a given interval.
292#
293# This option determines what Nova does when the VIF plugging timeout
294# event
295# happens. When enabled, the instance will error out. When disabled,
296# the
297# instance will continue to boot on the assumption that the port is
298# ready.
299#
300# Possible values:
301#
302# * True: Instances should fail after VIF plugging timeout
303# * False: Instances should continue booting after VIF plugging
304# timeout
305# (boolean value)
306vif_plugging_is_fatal = {{ compute.get('vif_plugging_is_fatal', 'true') }}
307
308#
309# Timeout for Neutron VIF plugging event message arrival.
310#
311# Number of seconds to wait for Neutron vif plugging events to
312# arrive before continuing or failing (see 'vif_plugging_is_fatal').
313#
314# Related options:
315#
316# * vif_plugging_is_fatal - If ``vif_plugging_timeout`` is set to zero
317# and
318# ``vif_plugging_is_fatal`` is False, events should not be expected
319# to
320# arrive at all.
321# (integer value)
322# Minimum value: 0
323vif_plugging_timeout = {{ compute.get('vif_plugging_timeout', '300') }}
324
325# Path to '/etc/network/interfaces' template.
326#
327# The path to a template file for the '/etc/network/interfaces'-style
328# file, which
329# will be populated by nova and subsequently used by cloudinit. This
330# provides a
331# method to configure network connectivity in environments without a
332# DHCP server.
333#
334# The template will be rendered using Jinja2 template engine, and
335# receive a
336# top-level key called ``interfaces``. This key will contain a list of
337# dictionaries, one for each interface.
338#
339# Refer to the cloudinit documentaion for more information:
340#
341# https://cloudinit.readthedocs.io/en/latest/topics/datasources.html
342#
343# Possible values:
344#
345# * A path to a Jinja2-formatted template for a Debian
346# '/etc/network/interfaces'
347# file. This applies even if using a non Debian-derived guest.
348#
349# Related options:
350#
351# * ``flat_inject``: This must be set to ``True`` to ensure nova
352# embeds network
353# configuration information in the metadata provided through the
354# config drive.
355# (string value)
356#injected_network_template = $pybasedir/nova/virt/interfaces.template
357
358#
359# The image preallocation mode to use.
360#
361# Image preallocation allows storage for instance images to be
362# allocated up front
363# when the instance is initially provisioned. This ensures immediate
364# feedback is
365# given if enough space isn't available. In addition, it should
366# significantly
367# improve performance on writes to new blocks and may even improve I/O
368# performance to prewritten blocks due to reduced fragmentation.
369#
370# Possible values:
371#
372# * "none" => no storage provisioning is done up front
373# * "space" => storage is fully allocated at instance start
374# (string value)
375# Possible values:
376# none - <No description provided>
377# space - <No description provided>
378#preallocate_images = none
Michael Polenchuk159c2542018-06-09 15:31:51 +0400379{%- if compute.preallocate_images is defined %}
380preallocate_images = {{ compute.preallocate_images }}
381{%- endif %}
Oleh Hryhorov5d0f13a2018-05-21 17:02:54 +0000382
383#
384# Enable use of copy-on-write (cow) images.
385#
386# QEMU/KVM allow the use of qcow2 as backing files. By disabling this,
387# backing files will not be used.
388# (boolean value)
389#use_cow_images = true
390{%- if compute.image is defined and compute.image.use_cow is defined %}
391use_cow_images = {{ compute.image.use_cow }}
392{%- endif %}
393
394#
395# Force conversion of backing images to raw format.
396#
397# Possible values:
398#
399# * True: Backing image files will be converted to raw image format
400# * False: Backing image files will not be converted
401#
402# Related options:
403#
404# * ``compute_driver``: Only the libvirt driver uses this option.
405# (boolean value)
406#force_raw_images = true
407force_raw_images={{ compute.get('image', {}).get('force_raw', True)|lower }}
408
409#
410# Name of the mkfs commands for ephemeral device.
411#
412# The format is <os_type>=<mkfs command>
413# (multi valued)
414#virt_mkfs =
415
416#
417# Enable resizing of filesystems via a block device.
418#
419# If enabled, attempt to resize the filesystem by accessing the image
420# over a
421# block device. This is done by the host and may not be necessary if
422# the image
423# contains a recent version of cloud-init. Possible mechanisms require
424# the nbd
425# driver (for qcow and raw), or loop (for raw).
426# (boolean value)
427#resize_fs_using_block_device = false
428
429# Amount of time, in seconds, to wait for NBD device start up.
430# (integer value)
431# Minimum value: 0
432#timeout_nbd = 10
433
434#
435# Location of cached images.
436#
437# This is NOT the full path - just a folder name relative to
438# '$instances_path'.
439# For per-compute-host cached images, set to '_base_$my_ip'
440# (string value)
441#image_cache_subdirectory_name = _base
442
443# Should unused base images be removed? (boolean value)
444#remove_unused_base_images = true
445
446#
447# Unused unresized base images younger than this will not be removed.
448# (integer value)
449remove_unused_original_minimum_age_seconds = 86400
450
451#
452# Generic property to specify the pointer type.
453#
454# Input devices allow interaction with a graphical framebuffer. For
455# example to provide a graphic tablet for absolute cursor movement.
456#
457# If set, the 'hw_pointer_model' image property takes precedence over
458# this configuration option.
459#
460# Possible values:
461#
462# * None: Uses default behavior provided by drivers (mouse on PS2 for
463# libvirt x86)
464# * ps2mouse: Uses relative movement. Mouse connected by PS2
465# * usbtablet: Uses absolute movement. Tablet connect by USB
466#
467# Related options:
468#
469# * usbtablet must be configured with VNC enabled or SPICE enabled and
470# SPICE
471# agent disabled. When used with libvirt the instance mode should be
472# configured as HVM.
473# (string value)
474# Possible values:
475# <None> - <No description provided>
476# ps2mouse - <No description provided>
477# usbtablet - <No description provided>
478#pointer_model = usbtablet
479
480#
481# Defines which physical CPUs (pCPUs) can be used by instance
482# virtual CPUs (vCPUs).
483#
484# Possible values:
485#
486# * A comma-separated list of physical CPU numbers that virtual CPUs
487# can be
488# allocated to by default. Each element should be either a single
489# CPU number,
490# a range of CPU numbers, or a caret followed by a CPU number to be
491# excluded from a previous range. For example:
492#
493# vcpu_pin_set = "4-12,^8,15"
494# (string value)
495#vcpu_pin_set = <None>
496{%- if compute.vcpu_pin_set is defined %}
497vcpu_pin_set={{ compute.vcpu_pin_set }}
498{%- endif %}
499
500#
501# Number of huge/large memory pages to reserved per NUMA host cell.
502#
503# Possible values:
504#
505# * A list of valid key=value which reflect NUMA node ID, page size
506# (Default unit is KiB) and number of pages to be reserved.
507#
508# reserved_huge_pages = node:0,size:2048,count:64
509# reserved_huge_pages = node:1,size:1GB,count:1
510#
511# In this example we are reserving on NUMA node 0 64 pages of 2MiB
512# and on NUMA node 1 1 page of 1GiB.
513# (dict value)
514#reserved_huge_pages = <None>
515
516#
517# Amount of disk resources in MB to make them always available to
518# host. The
519# disk usage gets reported back to the scheduler from nova-compute
520# running
521# on the compute nodes. To prevent the disk resources from being
522# considered
523# as available, this option can be used to reserve disk space for that
524# host.
525#
526# Possible values:
527#
528# * Any positive integer representing amount of disk in MB to reserve
529# for the host.
530# (integer value)
531# Minimum value: 0
532#reserved_host_disk_mb = 0
533
534#
535# Amount of memory in MB to reserve for the host so that it is always
536# available
537# to host processes. The host resources usage is reported back to the
538# scheduler
539# continuously from nova-compute running on the compute node. To
540# prevent the host
541# memory from being considered as available, this option is used to
542# reserve
543# memory for the host.
544#
545# Possible values:
546#
547# * Any positive integer representing amount of memory in MB to
548# reserve
549# for the host.
550# (integer value)
551# Minimum value: 0
552#reserved_host_memory_mb = 512
553reserved_host_memory_mb = {{ compute.get('reserved_host_memory_mb', '512') }}
554
555#
556# Number of physical CPUs to reserve for the host. The host resources
557# usage is
558# reported back to the scheduler continuously from nova-compute
559# running on the
560# compute node. To prevent the host CPU from being considered as
561# available,
562# this option is used to reserve random pCPU(s) for the host.
563#
564# Possible values:
565#
566# * Any positive integer representing number of physical CPUs to
567# reserve
568# for the host.
569# (integer value)
570# Minimum value: 0
571#reserved_host_cpus = 0
572
573#
574# This option helps you specify virtual CPU to physical CPU allocation
575# ratio.
576#
577# From Ocata (15.0.0) this is used to influence the hosts selected by
578# the Placement API. Note that when Placement is used, the CoreFilter
579# is redundant, because the Placement API will have already filtered
580# out hosts that would have failed the CoreFilter.
581#
582# This configuration specifies ratio for CoreFilter which can be set
583# per compute node. For AggregateCoreFilter, it will fall back to this
584# configuration value if no per-aggregate setting is found.
585#
586# NOTE: This can be set per-compute, or if set to 0.0, the value
587# set on the scheduler node(s) or compute node(s) will be used
588# and defaulted to 16.0.
589#
590# NOTE: As of the 16.0.0 Pike release, this configuration option is
591# ignored
592# for the ironic.IronicDriver compute driver and is hardcoded to 1.0.
593#
594# Possible values:
595#
596# * Any valid positive integer or float value
597# (floating point value)
598# Minimum value: 0
599#cpu_allocation_ratio = 0.0
600{%- if compute.cpu_allocation_ratio is defined %}
601cpu_allocation_ratio = {{ compute.cpu_allocation_ratio }}
602{%- else %}
603#cpu_allocation_ratio=0.0
604{%- endif %}
605
606#
607# This option helps you specify virtual RAM to physical RAM
608# allocation ratio.
609#
610# From Ocata (15.0.0) this is used to influence the hosts selected by
611# the Placement API. Note that when Placement is used, the RamFilter
612# is redundant, because the Placement API will have already filtered
613# out hosts that would have failed the RamFilter.
614#
615# This configuration specifies ratio for RamFilter which can be set
616# per compute node. For AggregateRamFilter, it will fall back to this
617# configuration value if no per-aggregate setting found.
618#
619# NOTE: This can be set per-compute, or if set to 0.0, the value
620# set on the scheduler node(s) or compute node(s) will be used and
621# defaulted to 1.5.
622#
623# NOTE: As of the 16.0.0 Pike release, this configuration option is
624# ignored
625# for the ironic.IronicDriver compute driver and is hardcoded to 1.0.
626#
627# Possible values:
628#
629# * Any valid positive integer or float value
630# (floating point value)
631# Minimum value: 0
632#ram_allocation_ratio = 0.0
633{%- if compute.ram_allocation_ratio is defined %}
634ram_allocation_ratio = {{ compute.ram_allocation_ratio }}
635{%- else %}
636#ram_allocation_ratio=0.0
637{%- endif %}
638
639#
640# This option helps you specify virtual disk to physical disk
641# allocation ratio.
642#
643# From Ocata (15.0.0) this is used to influence the hosts selected by
644# the Placement API. Note that when Placement is used, the DiskFilter
645# is redundant, because the Placement API will have already filtered
646# out hosts that would have failed the DiskFilter.
647#
648# A ratio greater than 1.0 will result in over-subscription of the
649# available physical disk, which can be useful for more
650# efficiently packing instances created with images that do not
651# use the entire virtual disk, such as sparse or compressed
652# images. It can be set to a value between 0.0 and 1.0 in order
653# to preserve a percentage of the disk for uses other than
654# instances.
655#
656# NOTE: This can be set per-compute, or if set to 0.0, the value
657# set on the scheduler node(s) or compute node(s) will be used and
658# defaulted to 1.0.
659#
660# NOTE: As of the 16.0.0 Pike release, this configuration option is
661# ignored
662# for the ironic.IronicDriver compute driver and is hardcoded to 1.0.
663#
664# Possible values:
665#
666# * Any valid positive integer or float value
667# (floating point value)
668# Minimum value: 0
669#disk_allocation_ratio = 0.0
670
671#
672# Console proxy host to be used to connect to instances on this host.
673# It is the
674# publicly visible name for the console host.
675#
676# Possible values:
677#
678# * Current hostname (default) or any string representing hostname.
679# (string value)
680#console_host = <current_hostname>
681
682#
683# Name of the network to be used to set access IPs for instances. If
684# there are
685# multiple IPs to choose from, an arbitrary one will be chosen.
686#
687# Possible values:
688#
689# * None (default)
690# * Any string representing network name.
691# (string value)
692#default_access_ip_network_name = <None>
693
694#
695# Whether to batch up the application of IPTables rules during a host
696# restart
697# and apply all at the end of the init phase.
698# (boolean value)
699#defer_iptables_apply = false
700
701#
702# Specifies where instances are stored on the hypervisor's disk.
703# It can point to locally attached storage or a directory on NFS.
704#
705# Possible values:
706#
707# * $state_path/instances where state_path is a config option that
708# specifies
709# the top-level directory for maintaining nova's state. (default) or
710# Any string representing directory path.
711# (string value)
712instances_path = {{ compute.instances_path }}
713
714#
715# This option enables periodic compute.instance.exists notifications.
716# Each
717# compute node must be configured to generate system usage data. These
718# notifications are consumed by OpenStack Telemetry service.
719# (boolean value)
720#instance_usage_audit = false
721
722#
723# Maximum number of 1 second retries in live_migration. It specifies
724# number
725# of retries to iptables when it complains. It happens when an user
726# continuously
727# sends live-migration request to same host leading to concurrent
728# request
729# to iptables.
730#
731# Possible values:
732#
733# * Any positive integer representing retry count.
734# (integer value)
735# Minimum value: 0
736#live_migration_retry_count = 30
737
738#
739# This option specifies whether to start guests that were running
740# before the
741# host rebooted. It ensures that all of the instances on a Nova
742# compute node
743# resume their state each time the compute node boots or restarts.
744# (boolean value)
745resume_guests_state_on_host_boot = {{ compute.get('resume_guests_state_on_host_boot', True) }}
746
747#
748# Number of times to retry network allocation. It is required to
749# attempt network
750# allocation retries if the virtual interface plug fails.
751#
752# Possible values:
753#
754# * Any positive integer representing retry count.
755# (integer value)
756# Minimum value: 0
757#network_allocate_retries = 0
758
759#
760# Limits the maximum number of instance builds to run concurrently by
761# nova-compute. Compute service can attempt to build an infinite
762# number of
763# instances, if asked to do so. This limit is enforced to avoid
764# building
765# unlimited instance concurrently on a compute node. This value can be
766# set
767# per compute node.
768#
769# Possible Values:
770#
771# * 0 : treated as unlimited.
772# * Any positive integer representing maximum concurrent builds.
773# (integer value)
774# Minimum value: 0
775#max_concurrent_builds = 10
776
777#
778# Maximum number of live migrations to run concurrently. This limit is
779# enforced
780# to avoid outbound live migrations overwhelming the host/network and
781# causing
782# failures. It is not recommended that you change this unless you are
783# very sure
784# that doing so is safe and stable in your environment.
785#
786# Possible values:
787#
788# * 0 : treated as unlimited.
789# * Negative value defaults to 0.
790# * Any positive integer representing maximum number of live
791# migrations
792# to run concurrently.
793# (integer value)
794#max_concurrent_live_migrations = 1
795{%- if compute.max_concurrent_live_migrations is defined %}
796max_concurrent_live_migrations = {{ compute.max_concurrent_live_migrations }}
797{%- endif %}
798
799#
800# Number of times to retry block device allocation on failures.
801# Starting with
802# Liberty, Cinder can use image volume cache. This may help with block
803# device
804# allocation performance. Look at the cinder
805# image_volume_cache_enabled
806# configuration option.
807#
808# Possible values:
809#
810# * 60 (default)
811# * If value is 0, then one attempt is made.
812# * Any negative value is treated as 0.
813# * For any value > 0, total attempts are (value + 1)
814# (integer value)
815block_device_allocate_retries = {{ compute.get('block_device_allocate_retries', '600') }}
816
817#
818# Number of greenthreads available for use to sync power states.
819#
820# This option can be used to reduce the number of concurrent requests
821# made to the hypervisor or system with real instance power states
822# for performance reasons, for example, with Ironic.
823#
824# Possible values:
825#
826# * Any positive integer representing greenthreads count.
827# (integer value)
828#sync_power_state_pool_size = 1000
829
830#
831# Number of seconds to wait between runs of the image cache manager.
832#
833# Possible values:
834# * 0: run at the default rate.
835# * -1: disable
836# * Any other value
837# (integer value)
838# Minimum value: -1
839image_cache_manager_interval = 0
840
841#
842# Interval to pull network bandwidth usage info.
843#
844# Not supported on all hypervisors. If a hypervisor doesn't support
845# bandwidth
846# usage, it will not get the info in the usage events.
847#
848# Possible values:
849#
850# * 0: Will run at the default periodic interval.
851# * Any value < 0: Disables the option.
852# * Any positive integer in seconds.
853# (integer value)
854#bandwidth_poll_interval = 600
855
856#
857# Interval to sync power states between the database and the
858# hypervisor.
859#
860# The interval that Nova checks the actual virtual machine power state
861# and the power state that Nova has in its database. If a user powers
862# down their VM, Nova updates the API to report the VM has been
863# powered down. Should something turn on the VM unexpectedly,
864# Nova will turn the VM back off to keep the system in the expected
865# state.
866#
867# Possible values:
868#
869# * 0: Will run at the default periodic interval.
870# * Any value < 0: Disables the option.
871# * Any positive integer in seconds.
872#
873# Related options:
874#
875# * If ``handle_virt_lifecycle_events`` in workarounds_group is
876# false and this option is negative, then instances that get out
877# of sync between the hypervisor and the Nova database will have
878# to be synchronized manually.
879# (integer value)
880#sync_power_state_interval = 600
881
882#
883# Interval between instance network information cache updates.
884#
885# Number of seconds after which each compute node runs the task of
886# querying Neutron for all of its instances networking information,
887# then updates the Nova db with that information. Nova will never
888# update it's cache if this option is set to 0. If we don't update the
889# cache, the metadata service and nova-api endpoints will be proxying
890# incorrect network data about the instance. So, it is not recommended
891# to set this option to 0.
892#
893# Possible values:
894#
895# * Any positive integer in seconds.
896# * Any value <=0 will disable the sync. This is not recommended.
897# (integer value)
898#heal_instance_info_cache_interval = 60
899heal_instance_info_cache_interval = {{ compute.heal_instance_info_cache_interval }}
900
901#
902# Interval for reclaiming deleted instances.
903#
904# A value greater than 0 will enable SOFT_DELETE of instances.
905# This option decides whether the server to be deleted will be put
906# into
907# the SOFT_DELETED state. If this value is greater than 0, the deleted
908# server will not be deleted immediately, instead it will be put into
909# a queue until it's too old (deleted time greater than the value of
910# reclaim_instance_interval). The server can be recovered from the
911# delete queue by using the restore action. If the deleted server
912# remains
913# longer than the value of reclaim_instance_interval, it will be
914# deleted by a periodic task in the compute service automatically.
915#
916# Note that this option is read from both the API and compute nodes,
917# and
918# must be set globally otherwise servers could be put into a soft
919# deleted
920# state in the API and never actually reclaimed (deleted) on the
921# compute
922# node.
923#
924# Possible values:
925#
926# * Any positive integer(in seconds) greater than 0 will enable
927# this option.
928# * Any value <=0 will disable the option.
929# (integer value)
930#reclaim_instance_interval = 0
931
932#
933# Interval for gathering volume usages.
934#
935# This option updates the volume usage cache for every
936# volume_usage_poll_interval number of seconds.
937#
938# Possible values:
939#
940# * Any positive integer(in seconds) greater than 0 will enable
941# this option.
942# * Any value <=0 will disable the option.
943# (integer value)
944#volume_usage_poll_interval = 0
945
946#
947# Interval for polling shelved instances to offload.
948#
949# The periodic task runs for every shelved_poll_interval number
950# of seconds and checks if there are any shelved instances. If it
951# finds a shelved instance, based on the 'shelved_offload_time' config
952# value it offloads the shelved instances. Check
953# 'shelved_offload_time'
954# config option description for details.
955#
956# Possible values:
957#
958# * Any value <= 0: Disables the option.
959# * Any positive integer in seconds.
960#
961# Related options:
962#
963# * ``shelved_offload_time``
964# (integer value)
965#shelved_poll_interval = 3600
966
967#
968# Time before a shelved instance is eligible for removal from a host.
969#
970# By default this option is set to 0 and the shelved instance will be
971# removed from the hypervisor immediately after shelve operation.
972# Otherwise, the instance will be kept for the value of
973# shelved_offload_time(in seconds) so that during the time period the
974# unshelve action will be faster, then the periodic task will remove
975# the instance from hypervisor after shelved_offload_time passes.
976#
977# Possible values:
978#
979# * 0: Instance will be immediately offloaded after being
980# shelved.
981# * Any value < 0: An instance will never offload.
982# * Any positive integer in seconds: The instance will exist for
983# the specified number of seconds before being offloaded.
984# (integer value)
985#shelved_offload_time = 0
986
987#
988# Interval for retrying failed instance file deletes.
989#
990# This option depends on 'maximum_instance_delete_attempts'.
991# This option specifies how often to retry deletes whereas
992# 'maximum_instance_delete_attempts' specifies the maximum number
993# of retry attempts that can be made.
994#
995# Possible values:
996#
997# * 0: Will run at the default periodic interval.
998# * Any value < 0: Disables the option.
999# * Any positive integer in seconds.
1000#
1001# Related options:
1002#
1003# * ``maximum_instance_delete_attempts`` from instance_cleaning_opts
1004# group.
1005# (integer value)
1006#instance_delete_interval = 300
1007
1008#
1009# Interval (in seconds) between block device allocation retries on
1010# failures.
1011#
1012# This option allows the user to specify the time interval between
1013# consecutive retries. 'block_device_allocate_retries' option
1014# specifies
1015# the maximum number of retries.
1016#
1017# Possible values:
1018#
1019# * 0: Disables the option.
1020# * Any positive integer in seconds enables the option.
1021#
1022# Related options:
1023#
1024# * ``block_device_allocate_retries`` in compute_manager_opts group.
1025# (integer value)
1026# Minimum value: 0
1027block_device_allocate_retries_interval = {{ compute.get('block_device_allocate_retries_interval', '10') }}
1028
1029#
1030# Interval between sending the scheduler a list of current instance
1031# UUIDs to
1032# verify that its view of instances is in sync with nova.
1033#
1034# If the CONF option 'scheduler_tracks_instance_changes' is
1035# False, the sync calls will not be made. So, changing this option
1036# will
1037# have no effect.
1038#
1039# If the out of sync situations are not very common, this interval
1040# can be increased to lower the number of RPC messages being sent.
1041# Likewise, if sync issues turn out to be a problem, the interval
1042# can be lowered to check more frequently.
1043#
1044# Possible values:
1045#
1046# * 0: Will run at the default periodic interval.
1047# * Any value < 0: Disables the option.
1048# * Any positive integer in seconds.
1049#
1050# Related options:
1051#
1052# * This option has no impact if ``scheduler_tracks_instance_changes``
1053# is set to False.
1054# (integer value)
1055#scheduler_instance_sync_interval = 120
1056
1057#
1058# Interval for updating compute resources.
1059#
1060# This option specifies how often the update_available_resources
1061# periodic task should run. A number less than 0 means to disable the
1062# task completely. Leaving this at the default of 0 will cause this to
1063# run at the default periodic interval. Setting it to any positive
1064# value will cause it to run at approximately that number of seconds.
1065#
1066# Possible values:
1067#
1068# * 0: Will run at the default periodic interval.
1069# * Any value < 0: Disables the option.
1070# * Any positive integer in seconds.
1071# (integer value)
1072#update_resources_interval = 0
1073
1074#
1075# Time interval after which an instance is hard rebooted
1076# automatically.
1077#
1078# When doing a soft reboot, it is possible that a guest kernel is
1079# completely hung in a way that causes the soft reboot task
1080# to not ever finish. Setting this option to a time period in seconds
1081# will automatically hard reboot an instance if it has been stuck
1082# in a rebooting state longer than N seconds.
1083#
1084# Possible values:
1085#
1086# * 0: Disables the option (default).
1087# * Any positive integer in seconds: Enables the option.
1088# (integer value)
1089# Minimum value: 0
1090#reboot_timeout = 0
1091
1092#
1093# Maximum time in seconds that an instance can take to build.
1094#
1095# If this timer expires, instance status will be changed to ERROR.
1096# Enabling this option will make sure an instance will not be stuck
1097# in BUILD state for a longer period.
1098#
1099# Possible values:
1100#
1101# * 0: Disables the option (default)
1102# * Any positive integer in seconds: Enables the option.
1103# (integer value)
1104# Minimum value: 0
1105#instance_build_timeout = 0
1106
1107#
1108# Interval to wait before un-rescuing an instance stuck in RESCUE.
1109#
1110# Possible values:
1111#
1112# * 0: Disables the option (default)
1113# * Any positive integer in seconds: Enables the option.
1114# (integer value)
1115# Minimum value: 0
1116#rescue_timeout = 0
1117
1118#
1119# Automatically confirm resizes after N seconds.
1120#
1121# Resize functionality will save the existing server before resizing.
1122# After the resize completes, user is requested to confirm the resize.
1123# The user has the opportunity to either confirm or revert all
1124# changes. Confirm resize removes the original server and changes
1125# server status from resized to active. Setting this option to a time
1126# period (in seconds) will automatically confirm the resize if the
1127# server is in resized state longer than that time.
1128#
1129# Possible values:
1130#
1131# * 0: Disables the option (default)
1132# * Any positive integer in seconds: Enables the option.
1133# (integer value)
1134# Minimum value: 0
1135#resize_confirm_window = 0
1136
1137#
1138# Total time to wait in seconds for an instance toperform a clean
1139# shutdown.
1140#
1141# It determines the overall period (in seconds) a VM is allowed to
1142# perform a clean shutdown. While performing stop, rescue and shelve,
1143# rebuild operations, configuring this option gives the VM a chance
1144# to perform a controlled shutdown before the instance is powered off.
1145# The default timeout is 60 seconds.
1146#
1147# The timeout value can be overridden on a per image basis by means
1148# of os_shutdown_timeout that is an image metadata setting allowing
1149# different types of operating systems to specify how much time they
1150# need to shut down cleanly.
1151#
1152# Possible values:
1153#
1154# * Any positive integer in seconds (default value is 60).
1155# (integer value)
1156# Minimum value: 1
1157#shutdown_timeout = 60
1158
1159#
1160# The compute service periodically checks for instances that have been
1161# deleted in the database but remain running on the compute node. The
1162# above option enables action to be taken when such instances are
1163# identified.
1164#
1165# Possible values:
1166#
1167# * reap: Powers down the instances and deletes them(default)
1168# * log: Logs warning message about deletion of the resource
1169# * shutdown: Powers down instances and marks them as non-
1170# bootable which can be later used for debugging/analysis
1171# * noop: Takes no action
1172#
1173# Related options:
1174#
1175# * running_deleted_instance_poll_interval
1176# * running_deleted_instance_timeout
1177# (string value)
1178# Possible values:
1179# noop - <No description provided>
1180# log - <No description provided>
1181# shutdown - <No description provided>
1182# reap - <No description provided>
1183#running_deleted_instance_action = reap
1184
1185#
1186# Time interval in seconds to wait between runs for the clean up
1187# action.
1188# If set to 0, above check will be disabled. If
1189# "running_deleted_instance
1190# _action" is set to "log" or "reap", a value greater than 0 must be
1191# set.
1192#
1193# Possible values:
1194#
1195# * Any positive integer in seconds enables the option.
1196# * 0: Disables the option.
1197# * 1800: Default value.
1198#
1199# Related options:
1200#
1201# * running_deleted_instance_action
1202# (integer value)
1203#running_deleted_instance_poll_interval = 1800
1204
1205#
1206# Time interval in seconds to wait for the instances that have
1207# been marked as deleted in database to be eligible for cleanup.
1208#
1209# Possible values:
1210#
1211# * Any positive integer in seconds(default is 0).
1212#
1213# Related options:
1214#
1215# * "running_deleted_instance_action"
1216# (integer value)
1217#running_deleted_instance_timeout = 0
1218
1219#
1220# The number of times to attempt to reap an instance's files.
1221#
1222# This option specifies the maximum number of retry attempts
1223# that can be made.
1224#
1225# Possible values:
1226#
1227# * Any positive integer defines how many attempts are made.
1228# * Any value <=0 means no delete attempts occur, but you should use
1229# ``instance_delete_interval`` to disable the delete attempts.
1230#
1231# Related options:
1232# * ``instance_delete_interval`` in interval_opts group can be used to
1233# disable
1234# this option.
1235# (integer value)
1236#maximum_instance_delete_attempts = 5
1237
1238#
1239# Sets the scope of the check for unique instance names.
1240#
1241# The default doesn't check for unique names. If a scope for the name
1242# check is
1243# set, a launch of a new instance or an update of an existing instance
1244# with a
1245# duplicate name will result in an ''InstanceExists'' error. The
1246# uniqueness is
1247# case-insensitive. Setting this option can increase the usability for
1248# end
1249# users as they don't have to distinguish among instances with the
1250# same name
1251# by their IDs.
1252#
1253# Possible values:
1254#
1255# * '': An empty value means that no uniqueness check is done and
1256# duplicate
1257# names are possible.
1258# * "project": The instance name check is done only for instances
1259# within the
1260# same project.
1261# * "global": The instance name check is done for all instances
1262# regardless of
1263# the project.
1264# (string value)
1265# Possible values:
1266# '' - <No description provided>
1267# project - <No description provided>
1268# global - <No description provided>
1269#osapi_compute_unique_server_name_scope =
1270
1271#
1272# Enable new nova-compute services on this host automatically.
1273#
1274# When a new nova-compute service starts up, it gets
1275# registered in the database as an enabled service. Sometimes it can
1276# be useful
1277# to register new compute services in disabled state and then enabled
1278# them at a
1279# later point in time. This option only sets this behavior for nova-
1280# compute
1281# services, it does not auto-disable other services like nova-
1282# conductor,
1283# nova-scheduler, nova-consoleauth, or nova-osapi_compute.
1284#
1285# Possible values:
1286#
1287# * ``True``: Each new compute service is enabled as soon as it
1288# registers itself.
1289# * ``False``: Compute services must be enabled via an os-services
1290# REST API call
1291# or with the CLI with ``nova service-enable <hostname> <binary>``,
1292# otherwise
1293# they are not ready to use.
1294# (boolean value)
1295#enable_new_services = true
1296
1297#
1298# Template string to be used to generate instance names.
1299#
1300# This template controls the creation of the database name of an
1301# instance. This
1302# is *not* the display name you enter when creating an instance (via
1303# Horizon
1304# or CLI). For a new deployment it is advisable to change the default
1305# value
1306# (which uses the database autoincrement) to another value which makes
1307# use
1308# of the attributes of an instance, like ``instance-%(uuid)s``. If you
1309# already have instances in your deployment when you change this, your
1310# deployment will break.
1311#
1312# Possible values:
1313#
1314# * A string which either uses the instance database ID (like the
1315# default)
1316# * A string with a list of named database columns, for example
1317# ``%(id)d``
1318# or ``%(uuid)s`` or ``%(hostname)s``.
1319#
1320# Related options:
1321#
1322# * not to be confused with: ``multi_instance_display_name_template``
1323# (string value)
1324#instance_name_template = instance-%08x
1325
1326#
1327# Number of times to retry live-migration before failing.
1328#
1329# Possible values:
1330#
1331# * If == -1, try until out of hosts (default)
1332# * If == 0, only try once, no retries
1333# * Integer greater than 0
1334# (integer value)
1335# Minimum value: -1
1336#migrate_max_retries = -1
1337
1338#
1339# Configuration drive format
1340#
1341# Configuration drive format that will contain metadata attached to
1342# the
1343# instance when it boots.
1344#
1345# Possible values:
1346#
1347# * iso9660: A file system image standard that is widely supported
1348# across
1349# operating systems. NOTE: Mind the libvirt bug
1350# (https://bugs.launchpad.net/nova/+bug/1246201) - If your
1351# hypervisor
1352# driver is libvirt, and you want live migrate to work without
1353# shared storage,
1354# then use VFAT.
1355# * vfat: For legacy reasons, you can configure the configuration
1356# drive to
1357# use VFAT format instead of ISO 9660.
1358#
1359# Related options:
1360#
1361# * This option is meaningful when one of the following alternatives
1362# occur:
1363# 1. force_config_drive option set to 'true'
1364# 2. the REST API call to create the instance contains an enable
1365# flag for
1366# config drive option
1367# 3. the image used to create the instance requires a config drive,
1368# this is defined by img_config_drive property for that image.
1369# * A compute node running Hyper-V hypervisor can be configured to
1370# attach
1371# configuration drive as a CD drive. To attach the configuration
1372# drive as a CD
1373# drive, set config_drive_cdrom option at hyperv section, to true.
1374# (string value)
1375# Possible values:
1376# iso9660 - <No description provided>
1377# vfat - <No description provided>
1378#config_drive_format = iso9660
1379config_drive_format={{ compute.get('config_drive_format', compute.get('config_drive', {}).get('format', 'vfat')) }}
1380
1381#
1382# Force injection to take place on a config drive
1383#
1384# When this option is set to true configuration drive functionality
1385# will be
1386# forced enabled by default, otherwise user can still enable
1387# configuration
1388# drives via the REST API or image metadata properties.
1389#
1390# Possible values:
1391#
1392# * True: Force to use of configuration drive regardless the user's
1393# input in the
1394# REST API call.
1395# * False: Do not force use of configuration drive. Config drives can
1396# still be
1397# enabled via the REST API or image metadata properties.
1398#
1399# Related options:
1400#
1401# * Use the 'mkisofs_cmd' flag to set the path where you install the
1402# genisoimage program. If genisoimage is in same path as the
1403# nova-compute service, you do not need to set this flag.
1404# * To use configuration drive with Hyper-V, you must set the
1405# 'mkisofs_cmd' value to the full path to an mkisofs.exe
1406# installation.
1407# Additionally, you must set the qemu_img_cmd value in the hyperv
1408# configuration section to the full path to an qemu-img command
1409# installation.
1410# (boolean value)
1411#force_config_drive = false
1412force_config_drive={{ compute.get('config_drive', {}).get('forced', True)|lower }}
1413
1414#
1415# Name or path of the tool used for ISO image creation
1416#
1417# Use the mkisofs_cmd flag to set the path where you install the
1418# genisoimage
1419# program. If genisoimage is on the system path, you do not need to
1420# change
1421# the default value.
1422#
1423# To use configuration drive with Hyper-V, you must set the
1424# mkisofs_cmd value
1425# to the full path to an mkisofs.exe installation. Additionally, you
1426# must set
1427# the qemu_img_cmd value in the hyperv configuration section to the
1428# full path
1429# to an qemu-img command installation.
1430#
1431# Possible values:
1432#
1433# * Name of the ISO image creator program, in case it is in the same
1434# directory
1435# as the nova-compute service
1436# * Path to ISO image creator program
1437#
1438# Related options:
1439#
1440# * This option is meaningful when config drives are enabled.
1441# * To use configuration drive with Hyper-V, you must set the
1442# qemu_img_cmd
1443# value in the hyperv configuration section to the full path to an
1444# qemu-img
1445# command installation.
1446# (string value)
1447#mkisofs_cmd = genisoimage
1448
1449# DEPRECATED: The driver to use for database access (string value)
1450# This option is deprecated for removal since 13.0.0.
1451# Its value may be silently ignored in the future.
1452#db_driver = nova.db
1453
1454# DEPRECATED:
1455# Default flavor to use for the EC2 API only.
1456# The Nova API does not support a default flavor.
1457# (string value)
1458# This option is deprecated for removal since 14.0.0.
1459# Its value may be silently ignored in the future.
1460# Reason: The EC2 API is deprecated.
1461#default_flavor = m1.small
1462
1463#
1464# The IP address which the host is using to connect to the management
1465# network.
1466#
1467# Possible values:
1468#
1469# * String with valid IP address. Default is IPv4 address of this
1470# host.
1471#
1472# Related options:
1473#
1474# * metadata_host
1475# * my_block_storage_ip
1476# * routing_source_ip
1477# * vpn_ip
1478# (string value)
1479#my_ip = <host_ipv4>
1480{%- if compute.my_ip is defined %}
1481my_ip={{ compute.my_ip }}
1482{%- endif %}
1483
1484#
1485# The IP address which is used to connect to the block storage
1486# network.
1487#
1488# Possible values:
1489#
1490# * String with valid IP address. Default is IP address of this host.
1491#
1492# Related options:
1493#
1494# * my_ip - if my_block_storage_ip is not set, then my_ip value is
1495# used.
1496# (string value)
1497#my_block_storage_ip = $my_ip
1498
1499#
1500# Hostname, FQDN or IP address of this host.
1501#
1502# Used as:
1503#
1504# * the oslo.messaging queue name for nova-compute worker
1505# * we use this value for the binding_host sent to neutron. This means
1506# if you use
1507# a neutron agent, it should have the same value for host.
1508# * cinder host attachment information
1509#
1510# Must be valid within AMQP key.
1511#
1512# Possible values:
1513#
1514# * String with hostname, FQDN or IP address. Default is hostname of
1515# this host.
1516# (string value)
1517#host = <current_hostname>
1518{%- if compute.host is defined %}
1519host={{ compute.host }}
1520{%- endif %}
1521
1522# DEPRECATED:
1523# This option is a list of full paths to one or more configuration
1524# files for
1525# dhcpbridge. In most cases the default path of '/etc/nova/nova-
1526# dhcpbridge.conf'
1527# should be sufficient, but if you have special needs for configuring
1528# dhcpbridge,
1529# you can change or add to this list.
1530#
1531# Possible values
1532#
1533# * A list of strings, where each string is the full path to a
1534# dhcpbridge
1535# configuration file.
1536# (multi valued)
1537# This option is deprecated for removal since 16.0.0.
1538# Its value may be silently ignored in the future.
1539# Reason:
1540# nova-network is deprecated, as are any related configuration
1541# options.
1542#dhcpbridge_flagfile = /etc/nova/nova.conf
1543
1544# DEPRECATED:
1545# The location where the network configuration files will be kept. The
1546# default is
1547# the 'networks' directory off of the location where nova's Python
1548# module is
1549# installed.
1550#
1551# Possible values
1552#
1553# * A string containing the full path to the desired configuration
1554# directory
1555# (string value)
1556# This option is deprecated for removal since 16.0.0.
1557# Its value may be silently ignored in the future.
1558# Reason:
1559# nova-network is deprecated, as are any related configuration
1560# options.
1561#networks_path = $state_path/networks
1562
1563# DEPRECATED:
1564# This is the name of the network interface for public IP addresses.
1565# The default
1566# is 'eth0'.
1567#
1568# Possible values:
1569#
1570# * Any string representing a network interface name
1571# (string value)
1572# This option is deprecated for removal since 16.0.0.
1573# Its value may be silently ignored in the future.
1574# Reason:
1575# nova-network is deprecated, as are any related configuration
1576# options.
1577#public_interface = eth0
1578
1579# DEPRECATED:
1580# The location of the binary nova-dhcpbridge. By default it is the
1581# binary named
1582# 'nova-dhcpbridge' that is installed with all the other nova
1583# binaries.
1584#
1585# Possible values:
1586#
1587# * Any string representing the full path to the binary for dhcpbridge
1588# (string value)
1589# This option is deprecated for removal since 16.0.0.
1590# Its value may be silently ignored in the future.
1591# Reason:
1592# nova-network is deprecated, as are any related configuration
1593# options.
1594#dhcpbridge = $bindir/nova-dhcpbridge
1595
1596# DEPRECATED:
1597# The public IP address of the network host.
1598#
1599# This is used when creating an SNAT rule.
1600#
1601# Possible values:
1602#
1603# * Any valid IP address
1604#
1605# Related options:
1606#
1607# * ``force_snat_range``
1608# (string value)
1609# This option is deprecated for removal since 16.0.0.
1610# Its value may be silently ignored in the future.
1611# Reason:
1612# nova-network is deprecated, as are any related configuration
1613# options.
1614#routing_source_ip = $my_ip
1615
1616# DEPRECATED:
1617# The lifetime of a DHCP lease, in seconds. The default is 86400 (one
1618# day).
1619#
1620# Possible values:
1621#
1622# * Any positive integer value.
1623# (integer value)
1624# Minimum value: 1
1625# This option is deprecated for removal since 16.0.0.
1626# Its value may be silently ignored in the future.
1627# Reason:
1628# nova-network is deprecated, as are any related configuration
1629# options.
1630#dhcp_lease_time = 86400
1631
1632# DEPRECATED:
1633# Despite the singular form of the name of this option, it is actually
1634# a list of
1635# zero or more server addresses that dnsmasq will use for DNS
1636# nameservers. If
1637# this is not empty, dnsmasq will not read /etc/resolv.conf, but will
1638# only use
1639# the servers specified in this option. If the option
1640# use_network_dns_servers is
1641# True, the dns1 and dns2 servers from the network will be appended to
1642# this list,
1643# and will be used as DNS servers, too.
1644#
1645# Possible values:
1646#
1647# * A list of strings, where each string is either an IP address or a
1648# FQDN.
1649#
1650# Related options:
1651#
1652# * ``use_network_dns_servers``
1653# (multi valued)
1654# This option is deprecated for removal since 16.0.0.
1655# Its value may be silently ignored in the future.
1656# Reason:
1657# nova-network is deprecated, as are any related configuration
1658# options.
1659#dns_server =
1660
1661# DEPRECATED:
1662# When this option is set to True, the dns1 and dns2 servers for the
1663# network
1664# specified by the user on boot will be used for DNS, as well as any
1665# specified in
1666# the `dns_server` option.
1667#
1668# Related options:
1669#
1670# * ``dns_server``
1671# (boolean value)
1672# This option is deprecated for removal since 16.0.0.
1673# Its value may be silently ignored in the future.
1674# Reason:
1675# nova-network is deprecated, as are any related configuration
1676# options.
1677#use_network_dns_servers = false
1678
1679# DEPRECATED:
1680# This option is a list of zero or more IP address ranges in your
1681# network's DMZ
1682# that should be accepted.
1683#
1684# Possible values:
1685#
1686# * A list of strings, each of which should be a valid CIDR.
1687# (list value)
1688# This option is deprecated for removal since 16.0.0.
1689# Its value may be silently ignored in the future.
1690# Reason:
1691# nova-network is deprecated, as are any related configuration
1692# options.
1693#dmz_cidr =
1694
1695# DEPRECATED:
1696# This is a list of zero or more IP ranges that traffic from the
1697# `routing_source_ip` will be SNATted to. If the list is empty, then
1698# no SNAT
1699# rules are created.
1700#
1701# Possible values:
1702#
1703# * A list of strings, each of which should be a valid CIDR.
1704#
1705# Related options:
1706#
1707# * ``routing_source_ip``
1708# (multi valued)
1709# This option is deprecated for removal since 16.0.0.
1710# Its value may be silently ignored in the future.
1711# Reason:
1712# nova-network is deprecated, as are any related configuration
1713# options.
1714#force_snat_range =
1715
1716# DEPRECATED:
1717# The path to the custom dnsmasq configuration file, if any.
1718#
1719# Possible values:
1720#
1721# * The full path to the configuration file, or an empty string if
1722# there is no
1723# custom dnsmasq configuration file.
1724# (string value)
1725# This option is deprecated for removal since 16.0.0.
1726# Its value may be silently ignored in the future.
1727# Reason:
1728# nova-network is deprecated, as are any related configuration
1729# options.
1730#dnsmasq_config_file =
1731
1732# DEPRECATED:
1733# This is the class used as the ethernet device driver for linuxnet
1734# bridge
1735# operations. The default value should be all you need for most cases,
1736# but if you
1737# wish to use a customized class, set this option to the full dot-
1738# separated
1739# import path for that class.
1740#
1741# Possible values:
1742#
1743# * Any string representing a dot-separated class path that Nova can
1744# import.
1745# (string value)
1746# This option is deprecated for removal since 16.0.0.
1747# Its value may be silently ignored in the future.
1748# Reason:
1749# nova-network is deprecated, as are any related configuration
1750# options.
1751#linuxnet_interface_driver = nova.network.linux_net.LinuxBridgeInterfaceDriver
1752
1753# DEPRECATED:
1754# The name of the Open vSwitch bridge that is used with linuxnet when
1755# connecting
1756# with Open vSwitch."
1757#
1758# Possible values:
1759#
1760# * Any string representing a valid bridge name.
1761# (string value)
1762# This option is deprecated for removal since 16.0.0.
1763# Its value may be silently ignored in the future.
1764# Reason:
1765# nova-network is deprecated, as are any related configuration
1766# options.
1767#linuxnet_ovs_integration_bridge = br-int
1768
1769#
1770# When True, when a device starts up, and upon binding floating IP
1771# addresses, arp
1772# messages will be sent to ensure that the arp caches on the compute
1773# hosts are
1774# up-to-date.
1775#
1776# Related options:
1777#
1778# * ``send_arp_for_ha_count``
1779# (boolean value)
1780#send_arp_for_ha = false
1781
1782#
1783# When arp messages are configured to be sent, they will be sent with
1784# the count
1785# set to the value of this option. Of course, if this is set to zero,
1786# no arp
1787# messages will be sent.
1788#
1789# Possible values:
1790#
1791# * Any integer greater than or equal to 0
1792#
1793# Related options:
1794#
1795# * ``send_arp_for_ha``
1796# (integer value)
1797#send_arp_for_ha_count = 3
1798
1799# DEPRECATED:
1800# When set to True, only the firt nic of a VM will get its default
1801# gateway from
1802# the DHCP server.
1803# (boolean value)
1804# This option is deprecated for removal since 16.0.0.
1805# Its value may be silently ignored in the future.
1806# Reason:
1807# nova-network is deprecated, as are any related configuration
1808# options.
1809#use_single_default_gateway = false
1810
1811# DEPRECATED:
1812# One or more interfaces that bridges can forward traffic to. If any
1813# of the items
1814# in this list is the special keyword 'all', then all traffic will be
1815# forwarded.
1816#
1817# Possible values:
1818#
1819# * A list of zero or more interface names, or the word 'all'.
1820# (multi valued)
1821# This option is deprecated for removal since 16.0.0.
1822# Its value may be silently ignored in the future.
1823# Reason:
1824# nova-network is deprecated, as are any related configuration
1825# options.
1826#forward_bridge_interface = all
1827
1828#
1829# This option determines the IP address for the network metadata API
1830# server.
1831#
1832# This is really the client side of the metadata host equation that
1833# allows
1834# nova-network to find the metadata server when doing a default multi
1835# host
1836# networking.
1837#
1838# Possible values:
1839#
1840# * Any valid IP address. The default is the address of the Nova API
1841# server.
1842#
1843# Related options:
1844#
1845# * ``metadata_port``
1846# (string value)
1847#metadata_host = $my_ip
1848
1849# DEPRECATED:
1850# This option determines the port used for the metadata API server.
1851#
1852# Related options:
1853#
1854# * ``metadata_host``
1855# (port value)
1856# Minimum value: 0
1857# Maximum value: 65535
1858# This option is deprecated for removal since 16.0.0.
1859# Its value may be silently ignored in the future.
1860# Reason:
1861# nova-network is deprecated, as are any related configuration
1862# options.
1863#metadata_port = 8775
1864
1865# DEPRECATED:
1866# This expression, if defined, will select any matching iptables rules
1867# and place
1868# them at the top when applying metadata changes to the rules.
1869#
1870# Possible values:
1871#
1872# * Any string representing a valid regular expression, or an empty
1873# string
1874#
1875# Related options:
1876#
1877# * ``iptables_bottom_regex``
1878# (string value)
1879# This option is deprecated for removal since 16.0.0.
1880# Its value may be silently ignored in the future.
1881# Reason:
1882# nova-network is deprecated, as are any related configuration
1883# options.
1884#iptables_top_regex =
1885
1886# DEPRECATED:
1887# This expression, if defined, will select any matching iptables rules
1888# and place
1889# them at the bottom when applying metadata changes to the rules.
1890#
1891# Possible values:
1892#
1893# * Any string representing a valid regular expression, or an empty
1894# string
1895#
1896# Related options:
1897#
1898# * iptables_top_regex
1899# (string value)
1900# This option is deprecated for removal since 16.0.0.
1901# Its value may be silently ignored in the future.
1902# Reason:
1903# nova-network is deprecated, as are any related configuration
1904# options.
1905#iptables_bottom_regex =
1906
1907# DEPRECATED:
1908# By default, packets that do not pass the firewall are DROPped. In
1909# many cases,
1910# though, an operator may find it more useful to change this from DROP
1911# to REJECT,
1912# so that the user issuing those packets may have a better idea as to
1913# what's
1914# going on, or LOGDROP in order to record the blocked traffic before
1915# DROPping.
1916#
1917# Possible values:
1918#
1919# * A string representing an iptables chain. The default is DROP.
1920# (string value)
1921# This option is deprecated for removal since 16.0.0.
1922# Its value may be silently ignored in the future.
1923# Reason:
1924# nova-network is deprecated, as are any related configuration
1925# options.
1926#iptables_drop_action = DROP
1927
1928# DEPRECATED:
1929# This option represents the period of time, in seconds, that the
1930# ovs_vsctl calls
1931# will wait for a response from the database before timing out. A
1932# setting of 0
1933# means that the utility should wait forever for a response.
1934#
1935# Possible values:
1936#
1937# * Any positive integer if a limited timeout is desired, or zero if
1938# the calls
1939# should wait forever for a response.
1940# (integer value)
1941# Minimum value: 0
1942# This option is deprecated for removal since 16.0.0.
1943# Its value may be silently ignored in the future.
1944# Reason:
1945# nova-network is deprecated, as are any related configuration
1946# options.
1947#ovs_vsctl_timeout = 120
1948
1949# DEPRECATED:
1950# This option is used mainly in testing to avoid calls to the
1951# underlying network
1952# utilities.
1953# (boolean value)
1954# This option is deprecated for removal since 16.0.0.
1955# Its value may be silently ignored in the future.
1956# Reason:
1957# nova-network is deprecated, as are any related configuration
1958# options.
1959#fake_network = false
1960
1961# DEPRECATED:
1962# This option determines the number of times to retry ebtables
1963# commands before
1964# giving up. The minimum number of retries is 1.
1965#
1966# Possible values:
1967#
1968# * Any positive integer
1969#
1970# Related options:
1971#
1972# * ``ebtables_retry_interval``
1973# (integer value)
1974# Minimum value: 1
1975# This option is deprecated for removal since 16.0.0.
1976# Its value may be silently ignored in the future.
1977# Reason:
1978# nova-network is deprecated, as are any related configuration
1979# options.
1980#ebtables_exec_attempts = 3
1981
1982# DEPRECATED:
1983# This option determines the time, in seconds, that the system will
1984# sleep in
1985# between ebtables retries. Note that each successive retry waits a
1986# multiple of
1987# this value, so for example, if this is set to the default of 1.0
1988# seconds, and
1989# ebtables_exec_attempts is 4, after the first failure, the system
1990# will sleep for
1991# 1 * 1.0 seconds, after the second failure it will sleep 2 * 1.0
1992# seconds, and
1993# after the third failure it will sleep 3 * 1.0 seconds.
1994#
1995# Possible values:
1996#
1997# * Any non-negative float or integer. Setting this to zero will
1998# result in no
1999# waiting between attempts.
2000#
2001# Related options:
2002#
2003# * ebtables_exec_attempts
2004# (floating point value)
2005# This option is deprecated for removal since 16.0.0.
2006# Its value may be silently ignored in the future.
2007# Reason:
2008# nova-network is deprecated, as are any related configuration
2009# options.
2010#ebtables_retry_interval = 1.0
2011
2012# DEPRECATED:
2013# Enable neutron as the backend for networking.
2014#
2015# Determine whether to use Neutron or Nova Network as the back end.
2016# Set to true
2017# to use neutron.
2018# (boolean value)
2019# This option is deprecated for removal since 15.0.0.
2020# Its value may be silently ignored in the future.
2021# Reason:
2022# nova-network is deprecated, as are any related configuration
2023# options.
2024#use_neutron = true
2025
2026#
2027# This option determines whether the network setup information is
2028# injected into
2029# the VM before it is booted. While it was originally designed to be
2030# used only
2031# by nova-network, it is also used by the vmware and xenapi virt
2032# drivers to
2033# control whether network information is injected into a VM. The
2034# libvirt virt
2035# driver also uses it when we use config_drive to configure network to
2036# control
2037# whether network information is injected into a VM.
2038# (boolean value)
2039#flat_injected = false
2040
2041# DEPRECATED:
2042# This option determines the bridge used for simple network interfaces
2043# when no
2044# bridge is specified in the VM creation request.
2045#
2046# Please note that this option is only used when using nova-network
2047# instead of
2048# Neutron in your deployment.
2049#
2050# Possible values:
2051#
2052# * Any string representing a valid network bridge, such as 'br100'
2053#
2054# Related options:
2055#
2056# * ``use_neutron``
2057# (string value)
2058# This option is deprecated for removal since 15.0.0.
2059# Its value may be silently ignored in the future.
2060# Reason:
2061# nova-network is deprecated, as are any related configuration
2062# options.
2063#flat_network_bridge = <None>
2064
2065# DEPRECATED:
2066# This is the address of the DNS server for a simple network. If this
2067# option is
2068# not specified, the default of '8.8.4.4' is used.
2069#
2070# Please note that this option is only used when using nova-network
2071# instead of
2072# Neutron in your deployment.
2073#
2074# Possible values:
2075#
2076# * Any valid IP address.
2077#
2078# Related options:
2079#
2080# * ``use_neutron``
2081# (string value)
2082# This option is deprecated for removal since 15.0.0.
2083# Its value may be silently ignored in the future.
2084# Reason:
2085# nova-network is deprecated, as are any related configuration
2086# options.
2087#flat_network_dns = 8.8.4.4
2088
2089# DEPRECATED:
2090# This option is the name of the virtual interface of the VM on which
2091# the bridge
2092# will be built. While it was originally designed to be used only by
2093# nova-network, it is also used by libvirt for the bridge interface
2094# name.
2095#
2096# Possible values:
2097#
2098# * Any valid virtual interface name, such as 'eth0'
2099# (string value)
2100# This option is deprecated for removal since 15.0.0.
2101# Its value may be silently ignored in the future.
2102# Reason:
2103# nova-network is deprecated, as are any related configuration
2104# options.
2105#flat_interface = <None>
2106
2107# DEPRECATED:
2108# This is the VLAN number used for private networks. Note that the
2109# when creating
2110# the networks, if the specified number has already been assigned,
2111# nova-network
2112# will increment this number until it finds an available VLAN.
2113#
2114# Please note that this option is only used when using nova-network
2115# instead of
2116# Neutron in your deployment. It also will be ignored if the
2117# configuration option
2118# for `network_manager` is not set to the default of
2119# 'nova.network.manager.VlanManager'.
2120#
2121# Possible values:
2122#
2123# * Any integer between 1 and 4094. Values outside of that range will
2124# raise a
2125# ValueError exception.
2126#
2127# Related options:
2128#
2129# * ``network_manager``
2130# * ``use_neutron``
2131# (integer value)
2132# Minimum value: 1
2133# Maximum value: 4094
2134# This option is deprecated for removal since 15.0.0.
2135# Its value may be silently ignored in the future.
2136# Reason:
2137# nova-network is deprecated, as are any related configuration
2138# options.
2139#vlan_start = 100
2140
2141# DEPRECATED:
2142# This option is the name of the virtual interface of the VM on which
2143# the VLAN
2144# bridge will be built. While it was originally designed to be used
2145# only by
2146# nova-network, it is also used by libvirt and xenapi for the bridge
2147# interface
2148# name.
2149#
2150# Please note that this setting will be ignored in nova-network if the
2151# configuration option for `network_manager` is not set to the default
2152# of
2153# 'nova.network.manager.VlanManager'.
2154#
2155# Possible values:
2156#
2157# * Any valid virtual interface name, such as 'eth0'
2158# (string value)
2159# This option is deprecated for removal since 15.0.0.
2160# Its value may be silently ignored in the future.
2161# Reason:
2162# nova-network is deprecated, as are any related configuration
2163# options. While
2164# this option has an effect when using neutron, it incorrectly
2165# override the value
2166# provided by neutron and should therefore not be used.
2167#vlan_interface = <None>
2168
2169# DEPRECATED:
2170# This option represents the number of networks to create if not
2171# explicitly
2172# specified when the network is created. The only time this is used is
2173# if a CIDR
2174# is specified, but an explicit network_size is not. In that case, the
2175# subnets
2176# are created by diving the IP address space of the CIDR by
2177# num_networks. The
2178# resulting subnet sizes cannot be larger than the configuration
2179# option
2180# `network_size`; in that event, they are reduced to `network_size`,
2181# and a
2182# warning is logged.
2183#
2184# Please note that this option is only used when using nova-network
2185# instead of
2186# Neutron in your deployment.
2187#
2188# Possible values:
2189#
2190# * Any positive integer is technically valid, although there are
2191# practical
2192# limits based upon available IP address space and virtual
2193# interfaces.
2194#
2195# Related options:
2196#
2197# * ``use_neutron``
2198# * ``network_size``
2199# (integer value)
2200# Minimum value: 1
2201# This option is deprecated for removal since 15.0.0.
2202# Its value may be silently ignored in the future.
2203# Reason:
2204# nova-network is deprecated, as are any related configuration
2205# options.
2206#num_networks = 1
2207
2208# DEPRECATED:
2209# This option is no longer used since the /os-cloudpipe API was
2210# removed in the
2211# 16.0.0 Pike release. This is the public IP address for the cloudpipe
2212# VPN
2213# servers. It defaults to the IP address of the host.
2214#
2215# Please note that this option is only used when using nova-network
2216# instead of
2217# Neutron in your deployment. It also will be ignored if the
2218# configuration option
2219# for `network_manager` is not set to the default of
2220# 'nova.network.manager.VlanManager'.
2221#
2222# Possible values:
2223#
2224# * Any valid IP address. The default is ``$my_ip``, the IP address of
2225# the VM.
2226#
2227# Related options:
2228#
2229# * ``network_manager``
2230# * ``use_neutron``
2231# * ``vpn_start``
2232# (string value)
2233# This option is deprecated for removal since 15.0.0.
2234# Its value may be silently ignored in the future.
2235# Reason:
2236# nova-network is deprecated, as are any related configuration
2237# options.
2238#vpn_ip = $my_ip
2239
2240# DEPRECATED:
2241# This is the port number to use as the first VPN port for private
2242# networks.
2243#
2244# Please note that this option is only used when using nova-network
2245# instead of
2246# Neutron in your deployment. It also will be ignored if the
2247# configuration option
2248# for `network_manager` is not set to the default of
2249# 'nova.network.manager.VlanManager', or if you specify a value the
2250# 'vpn_start'
2251# parameter when creating a network.
2252#
2253# Possible values:
2254#
2255# * Any integer representing a valid port number. The default is 1000.
2256#
2257# Related options:
2258#
2259# * ``use_neutron``
2260# * ``vpn_ip``
2261# * ``network_manager``
2262# (port value)
2263# Minimum value: 0
2264# Maximum value: 65535
2265# This option is deprecated for removal since 15.0.0.
2266# Its value may be silently ignored in the future.
2267# Reason:
2268# nova-network is deprecated, as are any related configuration
2269# options.
2270#vpn_start = 1000
2271
2272# DEPRECATED:
2273# This option determines the number of addresses in each private
2274# subnet.
2275#
2276# Please note that this option is only used when using nova-network
2277# instead of
2278# Neutron in your deployment.
2279#
2280# Possible values:
2281#
2282# * Any positive integer that is less than or equal to the available
2283# network
2284# size. Note that if you are creating multiple networks, they must
2285# all fit in
2286# the available IP address space. The default is 256.
2287#
2288# Related options:
2289#
2290# * ``use_neutron``
2291# * ``num_networks``
2292# (integer value)
2293# Minimum value: 1
2294# This option is deprecated for removal since 15.0.0.
2295# Its value may be silently ignored in the future.
2296# Reason:
2297# nova-network is deprecated, as are any related configuration
2298# options.
2299#network_size = 256
2300
2301# DEPRECATED:
2302# This option determines the fixed IPv6 address block when creating a
2303# network.
2304#
2305# Please note that this option is only used when using nova-network
2306# instead of
2307# Neutron in your deployment.
2308#
2309# Possible values:
2310#
2311# * Any valid IPv6 CIDR
2312#
2313# Related options:
2314#
2315# * ``use_neutron``
2316# (string value)
2317# This option is deprecated for removal since 15.0.0.
2318# Its value may be silently ignored in the future.
2319# Reason:
2320# nova-network is deprecated, as are any related configuration
2321# options.
2322#fixed_range_v6 = fd00::/48
2323
2324# DEPRECATED:
2325# This is the default IPv4 gateway. It is used only in the testing
2326# suite.
2327#
2328# Please note that this option is only used when using nova-network
2329# instead of
2330# Neutron in your deployment.
2331#
2332# Possible values:
2333#
2334# * Any valid IP address.
2335#
2336# Related options:
2337#
2338# * ``use_neutron``
2339# * ``gateway_v6``
2340# (string value)
2341# This option is deprecated for removal since 15.0.0.
2342# Its value may be silently ignored in the future.
2343# Reason:
2344# nova-network is deprecated, as are any related configuration
2345# options.
2346#gateway = <None>
2347
2348# DEPRECATED:
2349# This is the default IPv6 gateway. It is used only in the testing
2350# suite.
2351#
2352# Please note that this option is only used when using nova-network
2353# instead of
2354# Neutron in your deployment.
2355#
2356# Possible values:
2357#
2358# * Any valid IP address.
2359#
2360# Related options:
2361#
2362# * ``use_neutron``
2363# * ``gateway``
2364# (string value)
2365# This option is deprecated for removal since 15.0.0.
2366# Its value may be silently ignored in the future.
2367# Reason:
2368# nova-network is deprecated, as are any related configuration
2369# options.
2370#gateway_v6 = <None>
2371
2372# DEPRECATED:
2373# This option represents the number of IP addresses to reserve at the
2374# top of the
2375# address range for VPN clients. It also will be ignored if the
2376# configuration
2377# option for `network_manager` is not set to the default of
2378# 'nova.network.manager.VlanManager'.
2379#
2380# Possible values:
2381#
2382# * Any integer, 0 or greater.
2383#
2384# Related options:
2385#
2386# * ``use_neutron``
2387# * ``network_manager``
2388# (integer value)
2389# Minimum value: 0
2390# This option is deprecated for removal since 15.0.0.
2391# Its value may be silently ignored in the future.
2392# Reason:
2393# nova-network is deprecated, as are any related configuration
2394# options.
2395#cnt_vpn_clients = 0
2396
2397# DEPRECATED:
2398# This is the number of seconds to wait before disassociating a
2399# deallocated fixed
2400# IP address. This is only used with the nova-network service, and has
2401# no effect
2402# when using neutron for networking.
2403#
2404# Possible values:
2405#
2406# * Any integer, zero or greater.
2407#
2408# Related options:
2409#
2410# * ``use_neutron``
2411# (integer value)
2412# Minimum value: 0
2413# This option is deprecated for removal since 15.0.0.
2414# Its value may be silently ignored in the future.
2415# Reason:
2416# nova-network is deprecated, as are any related configuration
2417# options.
2418#fixed_ip_disassociate_timeout = 600
2419
2420# DEPRECATED:
2421# This option determines how many times nova-network will attempt to
2422# create a
2423# unique MAC address before giving up and raising a
2424# `VirtualInterfaceMacAddressException` error.
2425#
2426# Possible values:
2427#
2428# * Any positive integer. The default is 5.
2429#
2430# Related options:
2431#
2432# * ``use_neutron``
2433# (integer value)
2434# Minimum value: 1
2435# This option is deprecated for removal since 15.0.0.
2436# Its value may be silently ignored in the future.
2437# Reason:
2438# nova-network is deprecated, as are any related configuration
2439# options.
2440#create_unique_mac_address_attempts = 5
2441
2442# DEPRECATED:
2443# Determines whether unused gateway devices, both VLAN and bridge, are
2444# deleted if
2445# the network is in nova-network VLAN mode and is multi-hosted.
2446#
2447# Related options:
2448#
2449# * ``use_neutron``
2450# * ``vpn_ip``
2451# * ``fake_network``
2452# (boolean value)
2453# This option is deprecated for removal since 15.0.0.
2454# Its value may be silently ignored in the future.
2455# Reason:
2456# nova-network is deprecated, as are any related configuration
2457# options.
2458#teardown_unused_network_gateway = false
2459
2460# DEPRECATED:
2461# When this option is True, a call is made to release the DHCP for the
2462# instance
2463# when that instance is terminated.
2464#
2465# Related options:
2466#
2467# * ``use_neutron``
2468# (boolean value)
2469# This option is deprecated for removal since 15.0.0.
2470# Its value may be silently ignored in the future.
2471# Reason:
2472# nova-network is deprecated, as are any related configuration
2473# options.
2474force_dhcp_release = {{ compute.get('force_dhcp_release', 'true') }}
2475
2476# DEPRECATED:
2477# When this option is True, whenever a DNS entry must be updated, a
2478# fanout cast
2479# message is sent to all network hosts to update their DNS entries in
2480# multi-host
2481# mode.
2482#
2483# Related options:
2484#
2485# * ``use_neutron``
2486# (boolean value)
2487# This option is deprecated for removal since 15.0.0.
2488# Its value may be silently ignored in the future.
2489# Reason:
2490# nova-network is deprecated, as are any related configuration
2491# options.
2492#update_dns_entries = false
2493
2494# DEPRECATED:
2495# This option determines the time, in seconds, to wait between
2496# refreshing DNS
2497# entries for the network.
2498#
2499# Possible values:
2500#
2501# * A positive integer
2502# * -1 to disable updates
2503#
2504# Related options:
2505#
2506# * ``use_neutron``
2507# (integer value)
2508# Minimum value: -1
2509# This option is deprecated for removal since 15.0.0.
2510# Its value may be silently ignored in the future.
2511# Reason:
2512# nova-network is deprecated, as are any related configuration
2513# options.
2514#dns_update_periodic_interval = -1
2515
2516# DEPRECATED:
2517# This option allows you to specify the domain for the DHCP server.
2518#
2519# Possible values:
2520#
2521# * Any string that is a valid domain name.
2522#
2523# Related options:
2524#
2525# * ``use_neutron``
2526# (string value)
2527# This option is deprecated for removal since 15.0.0.
2528# Its value may be silently ignored in the future.
2529# Reason:
2530# nova-network is deprecated, as are any related configuration
2531# options.
2532#dhcp_domain = novalocal
2533dhcp_domain={{ compute.get('dhcp_domain', 'novalocal') }}
2534
2535# DEPRECATED:
2536# This option allows you to specify the L3 management library to be
2537# used.
2538#
2539# Possible values:
2540#
2541# * Any dot-separated string that represents the import path to an L3
2542# networking
2543# library.
2544#
2545# Related options:
2546#
2547# * ``use_neutron``
2548# (string value)
2549# This option is deprecated for removal since 15.0.0.
2550# Its value may be silently ignored in the future.
2551# Reason:
2552# nova-network is deprecated, as are any related configuration
2553# options.
2554#l3_lib = nova.network.l3.LinuxNetL3
2555
2556# DEPRECATED:
2557# THIS VALUE SHOULD BE SET WHEN CREATING THE NETWORK.
2558#
2559# If True in multi_host mode, all compute hosts share the same dhcp
2560# address. The
2561# same IP address used for DHCP will be added on each nova-network
2562# node which is
2563# only visible to the VMs on the same host.
2564#
2565# The use of this configuration has been deprecated and may be removed
2566# in any
2567# release after Mitaka. It is recommended that instead of relying on
2568# this option,
2569# an explicit value should be passed to 'create_networks()' as a
2570# keyword argument
2571# with the name 'share_address'.
2572# (boolean value)
2573# This option is deprecated for removal since 2014.2.
2574# Its value may be silently ignored in the future.
2575#share_dhcp_address = false
2576
2577# DEPRECATED:
2578# URL for LDAP server which will store DNS entries
2579#
2580# Possible values:
2581#
2582# * A valid LDAP URL representing the server
2583# (uri value)
2584# This option is deprecated for removal since 16.0.0.
2585# Its value may be silently ignored in the future.
2586# Reason:
2587# nova-network is deprecated, as are any related configuration
2588# options.
2589#ldap_dns_url = ldap://ldap.example.com:389
2590
2591# DEPRECATED: Bind user for LDAP server (string value)
2592# This option is deprecated for removal since 16.0.0.
2593# Its value may be silently ignored in the future.
2594# Reason:
2595# nova-network is deprecated, as are any related configuration
2596# options.
2597#ldap_dns_user = uid=admin,ou=people,dc=example,dc=org
2598
2599# DEPRECATED: Bind user's password for LDAP server (string value)
2600# This option is deprecated for removal since 16.0.0.
2601# Its value may be silently ignored in the future.
2602# Reason:
2603# nova-network is deprecated, as are any related configuration
2604# options.
2605#ldap_dns_password = password
2606
2607# DEPRECATED:
2608# Hostmaster for LDAP DNS driver Statement of Authority
2609#
2610# Possible values:
2611#
2612# * Any valid string representing LDAP DNS hostmaster.
2613# (string value)
2614# This option is deprecated for removal since 16.0.0.
2615# Its value may be silently ignored in the future.
2616# Reason:
2617# nova-network is deprecated, as are any related configuration
2618# options.
2619#ldap_dns_soa_hostmaster = hostmaster@example.org
2620
2621# DEPRECATED:
2622# DNS Servers for LDAP DNS driver
2623#
2624# Possible values:
2625#
2626# * A valid URL representing a DNS server
2627# (multi valued)
2628# This option is deprecated for removal since 16.0.0.
2629# Its value may be silently ignored in the future.
2630# Reason:
2631# nova-network is deprecated, as are any related configuration
2632# options.
2633#ldap_dns_servers = dns.example.org
2634
2635# DEPRECATED:
2636# Base distinguished name for the LDAP search query
2637#
2638# This option helps to decide where to look up the host in LDAP.
2639# (string value)
2640# This option is deprecated for removal since 16.0.0.
2641# Its value may be silently ignored in the future.
2642# Reason:
2643# nova-network is deprecated, as are any related configuration
2644# options.
2645#ldap_dns_base_dn = ou=hosts,dc=example,dc=org
2646
2647# DEPRECATED:
2648# Refresh interval (in seconds) for LDAP DNS driver Start of Authority
2649#
2650# Time interval, a secondary/slave DNS server waits before requesting
2651# for
2652# primary DNS server's current SOA record. If the records are
2653# different,
2654# secondary DNS server will request a zone transfer from primary.
2655#
2656# NOTE: Lower values would cause more traffic.
2657# (integer value)
2658# This option is deprecated for removal since 16.0.0.
2659# Its value may be silently ignored in the future.
2660# Reason:
2661# nova-network is deprecated, as are any related configuration
2662# options.
2663#ldap_dns_soa_refresh = 1800
2664
2665# DEPRECATED:
2666# Retry interval (in seconds) for LDAP DNS driver Start of Authority
2667#
2668# Time interval, a secondary/slave DNS server should wait, if an
2669# attempt to transfer zone failed during the previous refresh
2670# interval.
2671# (integer value)
2672# This option is deprecated for removal since 16.0.0.
2673# Its value may be silently ignored in the future.
2674# Reason:
2675# nova-network is deprecated, as are any related configuration
2676# options.
2677#ldap_dns_soa_retry = 3600
2678
2679# DEPRECATED:
2680# Expiry interval (in seconds) for LDAP DNS driver Start of Authority
2681#
2682# Time interval, a secondary/slave DNS server holds the information
2683# before it is no longer considered authoritative.
2684# (integer value)
2685# This option is deprecated for removal since 16.0.0.
2686# Its value may be silently ignored in the future.
2687# Reason:
2688# nova-network is deprecated, as are any related configuration
2689# options.
2690#ldap_dns_soa_expiry = 86400
2691
2692# DEPRECATED:
2693# Minimum interval (in seconds) for LDAP DNS driver Start of Authority
2694#
2695# It is Minimum time-to-live applies for all resource records in the
2696# zone file. This value is supplied to other servers how long they
2697# should keep the data in cache.
2698# (integer value)
2699# This option is deprecated for removal since 16.0.0.
2700# Its value may be silently ignored in the future.
2701# Reason:
2702# nova-network is deprecated, as are any related configuration
2703# options.
2704#ldap_dns_soa_minimum = 7200
2705
2706# DEPRECATED:
2707# Default value for multi_host in networks.
2708#
2709# nova-network service can operate in a multi-host or single-host
2710# mode.
2711# In multi-host mode each compute node runs a copy of nova-network and
2712# the
2713# instances on that compute node use the compute node as a gateway to
2714# the
2715# Internet. Where as in single-host mode, a central server runs the
2716# nova-network
2717# service. All compute nodes forward traffic from the instances to the
2718# cloud controller which then forwards traffic to the Internet.
2719#
2720# If this options is set to true, some rpc network calls will be sent
2721# directly
2722# to host.
2723#
2724# Note that this option is only used when using nova-network instead
2725# of
2726# Neutron in your deployment.
2727#
2728# Related options:
2729#
2730# * ``use_neutron``
2731# (boolean value)
2732# This option is deprecated for removal since 15.0.0.
2733# Its value may be silently ignored in the future.
2734# Reason:
2735# nova-network is deprecated, as are any related configuration
2736# options.
2737#multi_host = false
2738
2739# DEPRECATED:
2740# Driver to use for network creation.
2741#
2742# Network driver initializes (creates bridges and so on) only when the
2743# first VM lands on a host node. All network managers configure the
2744# network using network drivers. The driver is not tied to any
2745# particular
2746# network manager.
2747#
2748# The default Linux driver implements vlans, bridges, and iptables
2749# rules
2750# using linux utilities.
2751#
2752# Note that this option is only used when using nova-network instead
2753# of Neutron in your deployment.
2754#
2755# Related options:
2756#
2757# * ``use_neutron``
2758# (string value)
2759# This option is deprecated for removal since 15.0.0.
2760# Its value may be silently ignored in the future.
2761# Reason:
2762# nova-network is deprecated, as are any related configuration
2763# options.
2764#network_driver = nova.network.linux_net
2765
2766# DEPRECATED:
2767# Firewall driver to use with ``nova-network`` service.
2768#
2769# This option only applies when using the ``nova-network`` service.
2770# When using
2771# another networking services, such as Neutron, this should be to set
2772# to the
2773# ``nova.virt.firewall.NoopFirewallDriver``.
2774#
2775# Possible values:
2776#
2777# * ``nova.virt.firewall.IptablesFirewallDriver``
2778# * ``nova.virt.firewall.NoopFirewallDriver``
2779# * ``nova.virt.libvirt.firewall.IptablesFirewallDriver``
2780# * [...]
2781#
2782# Related options:
2783#
2784# * ``use_neutron``: This must be set to ``False`` to enable ``nova-
2785# network``
2786# networking
2787# (string value)
2788# This option is deprecated for removal since 16.0.0.
2789# Its value may be silently ignored in the future.
2790# Reason:
2791# nova-network is deprecated, as are any related configuration
2792# options.
2793firewall_driver = nova.virt.firewall.NoopFirewallDriver
2794
2795# DEPRECATED:
2796# Determine whether to allow network traffic from same network.
2797#
2798# When set to true, hosts on the same subnet are not filtered and are
2799# allowed
2800# to pass all types of traffic between them. On a flat network, this
2801# allows
2802# all instances from all projects unfiltered communication. With VLAN
2803# networking, this allows access between instances within the same
2804# project.
2805#
2806# This option only applies when using the ``nova-network`` service.
2807# When using
2808# another networking services, such as Neutron, security groups or
2809# other
2810# approaches should be used.
2811#
2812# Possible values:
2813#
2814# * True: Network traffic should be allowed pass between all instances
2815# on the
2816# same network, regardless of their tenant and security policies
2817# * False: Network traffic should not be allowed pass between
2818# instances unless
2819# it is unblocked in a security group
2820#
2821# Related options:
2822#
2823# * ``use_neutron``: This must be set to ``False`` to enable ``nova-
2824# network``
2825# networking
2826# * ``firewall_driver``: This must be set to
2827# ``nova.virt.libvirt.firewall.IptablesFirewallDriver`` to ensure
2828# the
2829# libvirt firewall driver is enabled.
2830# (boolean value)
2831# This option is deprecated for removal since 16.0.0.
2832# Its value may be silently ignored in the future.
2833# Reason:
2834# nova-network is deprecated, as are any related configuration
2835# options.
2836#allow_same_net_traffic = true
2837
2838# DEPRECATED:
2839# Default pool for floating IPs.
2840#
2841# This option specifies the default floating IP pool for allocating
2842# floating IPs.
2843#
2844# While allocating a floating ip, users can optionally pass in the
2845# name of the
2846# pool they want to allocate from, otherwise it will be pulled from
2847# the
2848# default pool.
2849#
2850# If this option is not set, then 'nova' is used as default floating
2851# pool.
2852#
2853# Possible values:
2854#
2855# * Any string representing a floating IP pool name
2856# (string value)
2857# This option is deprecated for removal since 16.0.0.
2858# Its value may be silently ignored in the future.
2859# Reason:
2860# This option was used for two purposes: to set the floating IP pool
2861# name for
2862# nova-network and to do the same for neutron. nova-network is
2863# deprecated, as are
2864# any related configuration options. Users of neutron, meanwhile,
2865# should use the
2866# 'default_floating_pool' option in the '[neutron]' group.
2867#default_floating_pool = nova
2868
2869# DEPRECATED:
2870# Autoassigning floating IP to VM
2871#
2872# When set to True, floating IP is auto allocated and associated
2873# to the VM upon creation.
2874#
2875# Related options:
2876#
2877# * use_neutron: this options only works with nova-network.
2878# (boolean value)
2879# This option is deprecated for removal since 15.0.0.
2880# Its value may be silently ignored in the future.
2881# Reason:
2882# nova-network is deprecated, as are any related configuration
2883# options.
2884#auto_assign_floating_ip = false
2885
2886# DEPRECATED:
2887# Full class name for the DNS Manager for floating IPs.
2888#
2889# This option specifies the class of the driver that provides
2890# functionality
2891# to manage DNS entries associated with floating IPs.
2892#
2893# When a user adds a DNS entry for a specified domain to a floating
2894# IP,
2895# nova will add a DNS entry using the specified floating DNS driver.
2896# When a floating IP is deallocated, its DNS entry will automatically
2897# be deleted.
2898#
2899# Possible values:
2900#
2901# * Full Python path to the class to be used
2902#
2903# Related options:
2904#
2905# * use_neutron: this options only works with nova-network.
2906# (string value)
2907# This option is deprecated for removal since 15.0.0.
2908# Its value may be silently ignored in the future.
2909# Reason:
2910# nova-network is deprecated, as are any related configuration
2911# options.
2912#floating_ip_dns_manager = nova.network.noop_dns_driver.NoopDNSDriver
2913
2914# DEPRECATED:
2915# Full class name for the DNS Manager for instance IPs.
2916#
2917# This option specifies the class of the driver that provides
2918# functionality
2919# to manage DNS entries for instances.
2920#
2921# On instance creation, nova will add DNS entries for the instance
2922# name and
2923# id, using the specified instance DNS driver and domain. On instance
2924# deletion,
2925# nova will remove the DNS entries.
2926#
2927# Possible values:
2928#
2929# * Full Python path to the class to be used
2930#
2931# Related options:
2932#
2933# * use_neutron: this options only works with nova-network.
2934# (string value)
2935# This option is deprecated for removal since 15.0.0.
2936# Its value may be silently ignored in the future.
2937# Reason:
2938# nova-network is deprecated, as are any related configuration
2939# options.
2940#instance_dns_manager = nova.network.noop_dns_driver.NoopDNSDriver
2941
2942# DEPRECATED:
2943# If specified, Nova checks if the availability_zone of every instance
2944# matches
2945# what the database says the availability_zone should be for the
2946# specified
2947# dns_domain.
2948#
2949# Related options:
2950#
2951# * use_neutron: this options only works with nova-network.
2952# (string value)
2953# This option is deprecated for removal since 15.0.0.
2954# Its value may be silently ignored in the future.
2955# Reason:
2956# nova-network is deprecated, as are any related configuration
2957# options.
2958#instance_dns_domain =
2959
2960# DEPRECATED:
2961# Assign IPv6 and IPv4 addresses when creating instances.
2962#
2963# Related options:
2964#
2965# * use_neutron: this only works with nova-network.
2966# (boolean value)
2967# This option is deprecated for removal since 16.0.0.
2968# Its value may be silently ignored in the future.
2969# Reason:
2970# nova-network is deprecated, as are any related configuration
2971# options.
2972#use_ipv6 = false
2973
2974# DEPRECATED:
2975# Abstracts out IPv6 address generation to pluggable backends.
2976#
2977# nova-network can be put into dual-stack mode, so that it uses
2978# both IPv4 and IPv6 addresses. In dual-stack mode, by default,
2979# instances
2980# acquire IPv6 global unicast addresses with the help of stateless
2981# address
2982# auto-configuration mechanism.
2983#
2984# Related options:
2985#
2986# * use_neutron: this option only works with nova-network.
2987# * use_ipv6: this option only works if ipv6 is enabled for nova-
2988# network.
2989# (string value)
2990# Possible values:
2991# rfc2462 - <No description provided>
2992# account_identifier - <No description provided>
2993# This option is deprecated for removal since 16.0.0.
2994# Its value may be silently ignored in the future.
2995# Reason:
2996# nova-network is deprecated, as are any related configuration
2997# options.
2998#ipv6_backend = rfc2462
2999
3000# DEPRECATED:
3001# This option is used to enable or disable quota checking for tenant
3002# networks.
3003#
3004# Related options:
3005#
3006# * quota_networks
3007# (boolean value)
3008# This option is deprecated for removal since 14.0.0.
3009# Its value may be silently ignored in the future.
3010# Reason:
3011# CRUD operations on tenant networks are only available when using
3012# nova-network
3013# and nova-network is itself deprecated.
3014#enable_network_quota = false
3015
3016# DEPRECATED:
3017# This option controls the number of private networks that can be
3018# created per
3019# project (or per tenant).
3020#
3021# Related options:
3022#
3023# * enable_network_quota
3024# (integer value)
3025# Minimum value: 0
3026# This option is deprecated for removal since 14.0.0.
3027# Its value may be silently ignored in the future.
3028# Reason:
3029# CRUD operations on tenant networks are only available when using
3030# nova-network
3031# and nova-network is itself deprecated.
3032#quota_networks = 3
3033
3034#
3035# Filename that will be used for storing websocket frames received
3036# and sent by a proxy service (like VNC, spice, serial) running on
3037# this host.
3038# If this is not set, no recording will be done.
3039# (string value)
3040#record = <None>
3041
3042# Run as a background process. (boolean value)
3043#daemon = false
3044
3045# Disallow non-encrypted connections. (boolean value)
3046#ssl_only = false
3047
3048# Set to True if source host is addressed with IPv6. (boolean value)
3049#source_is_ipv6 = false
3050
3051# Path to SSL certificate file. (string value)
3052#cert = self.pem
3053
3054# SSL key file (if separate from cert). (string value)
3055#key = <None>
3056
3057#
3058# Path to directory with content which will be served by a web server.
3059# (string value)
3060#web = /usr/share/spice-html5
3061
3062#
3063# The directory where the Nova python modules are installed.
3064#
3065# This directory is used to store template files for networking and
3066# remote
3067# console access. It is also the default path for other config options
3068# which
3069# need to persist Nova internal data. It is very unlikely that you
3070# need to
3071# change this option from its default value.
3072#
3073# Possible values:
3074#
3075# * The full path to a directory.
3076#
3077# Related options:
3078#
3079# * ``state_path``
3080# (string value)
3081#pybasedir = /usr/lib/python2.7/dist-packages
3082
3083#
3084# The directory where the Nova binaries are installed.
3085#
3086# This option is only relevant if the networking capabilities from
3087# Nova are
3088# used (see services below). Nova's networking capabilities are
3089# targeted to
3090# be fully replaced by Neutron in the future. It is very unlikely that
3091# you need
3092# to change this option from its default value.
3093#
3094# Possible values:
3095#
3096# * The full path to a directory.
3097# (string value)
3098#bindir = /usr/local/bin
3099
3100#
3101# The top-level directory for maintaining Nova's state.
3102#
3103# This directory is used to store Nova's internal state. It is used by
3104# a
3105# variety of other config options which derive from this. In some
3106# scenarios
3107# (for example migrations) it makes sense to use a storage location
3108# which is
3109# shared between multiple compute hosts (for example via NFS). Unless
3110# the
3111# option ``instances_path`` gets overwritten, this directory can grow
3112# very
3113# large.
3114#
3115# Possible values:
3116#
3117# * The full path to a directory. Defaults to value provided in
3118# ``pybasedir``.
3119# (string value)
3120state_path = /var/lib/nova
3121
3122#
3123# Number of seconds indicating how frequently the state of services on
3124# a
3125# given hypervisor is reported. Nova needs to know this to determine
3126# the
3127# overall health of the deployment.
3128#
3129# Related Options:
3130#
3131# * service_down_time
3132# report_interval should be less than service_down_time. If
3133# service_down_time
3134# is less than report_interval, services will routinely be
3135# considered down,
3136# because they report in too rarely.
3137# (integer value)
3138#report_interval = 10
3139report_interval = {{ compute.get('report_interval', '60') }}
3140
3141#
3142# Maximum time in seconds since last check-in for up service
3143#
3144# Each compute node periodically updates their database status based
3145# on the
3146# specified report interval. If the compute node hasn't updated the
3147# status
3148# for more than service_down_time, then the compute node is considered
3149# down.
3150#
3151# Related Options:
3152#
3153# * report_interval (service_down_time should not be less than
3154# report_interval)
3155# (integer value)
3156service_down_time = 90
3157
3158#
3159# Enable periodic tasks.
3160#
3161# If set to true, this option allows services to periodically run
3162# tasks
3163# on the manager.
3164#
3165# In case of running multiple schedulers or conductors you may want to
3166# run
3167# periodic tasks on only one host - in this case disable this option
3168# for all
3169# hosts but one.
3170# (boolean value)
3171#periodic_enable = true
3172
3173#
3174# Number of seconds to randomly delay when starting the periodic task
3175# scheduler to reduce stampeding.
3176#
3177# When compute workers are restarted in unison across a cluster,
3178# they all end up running the periodic tasks at the same time
3179# causing problems for the external services. To mitigate this
3180# behavior, periodic_fuzzy_delay option allows you to introduce a
3181# random initial delay when starting the periodic task scheduler.
3182#
3183# Possible Values:
3184#
3185# * Any positive integer (in seconds)
3186# * 0 : disable the random delay
3187# (integer value)
3188# Minimum value: 0
3189#periodic_fuzzy_delay = 60
3190
3191# List of APIs to be enabled by default. (list value)
3192enabled_apis = osapi_compute,metadata
3193
3194#
3195# List of APIs with enabled SSL.
3196#
3197# Nova provides SSL support for the API servers. enabled_ssl_apis
3198# option
3199# allows configuring the SSL support.
3200# (list value)
3201#enabled_ssl_apis =
3202
3203#
3204# IP address on which the OpenStack API will listen.
3205#
3206# The OpenStack API service listens on this IP address for incoming
3207# requests.
3208# (string value)
3209#osapi_compute_listen = 0.0.0.0
3210
3211#
3212# Port on which the OpenStack API will listen.
3213#
3214# The OpenStack API service listens on this port number for incoming
3215# requests.
3216# (port value)
3217# Minimum value: 0
3218# Maximum value: 65535
3219#osapi_compute_listen_port = 8774
3220
3221#
3222# Number of workers for OpenStack API service. The default will be the
3223# number
3224# of CPUs available.
3225#
3226# OpenStack API services can be configured to run as multi-process
3227# (workers).
3228# This overcomes the problem of reduction in throughput when API
3229# request
3230# concurrency increases. OpenStack API service will run in the
3231# specified
3232# number of processes.
3233#
3234# Possible Values:
3235#
3236# * Any positive integer
3237# * None (default value)
3238# (integer value)
3239# Minimum value: 1
3240#osapi_compute_workers = <None>
3241
3242#
3243# IP address on which the metadata API will listen.
3244#
3245# The metadata API service listens on this IP address for incoming
3246# requests.
3247# (string value)
3248#metadata_listen = 0.0.0.0
3249
3250#
3251# Port on which the metadata API will listen.
3252#
3253# The metadata API service listens on this port number for incoming
3254# requests.
3255# (port value)
3256# Minimum value: 0
3257# Maximum value: 65535
3258#metadata_listen_port = 8775
3259
3260#
3261# Number of workers for metadata service. If not specified the number
3262# of
3263# available CPUs will be used.
3264#
3265# The metadata service can be configured to run as multi-process
3266# (workers).
3267# This overcomes the problem of reduction in throughput when API
3268# request
3269# concurrency increases. The metadata service will run in the
3270# specified
3271# number of processes.
3272#
3273# Possible Values:
3274#
3275# * Any positive integer
3276# * None (default value)
3277# (integer value)
3278# Minimum value: 1
3279#metadata_workers = <None>
3280
3281# Full class name for the Manager for network (string value)
3282# Possible values:
3283# nova.network.manager.FlatManager - <No description provided>
3284# nova.network.manager.FlatDHCPManager - <No description provided>
3285# nova.network.manager.VlanManager - <No description provided>
3286#network_manager = nova.network.manager.VlanManager
3287
3288#
3289# This option specifies the driver to be used for the servicegroup
3290# service.
3291#
3292# ServiceGroup API in nova enables checking status of a compute node.
3293# When a
3294# compute worker running the nova-compute daemon starts, it calls the
3295# join API
3296# to join the compute group. Services like nova scheduler can query
3297# the
3298# ServiceGroup API to check if a node is alive. Internally, the
3299# ServiceGroup
3300# client driver automatically updates the compute worker status. There
3301# are
3302# multiple backend implementations for this service: Database
3303# ServiceGroup driver
3304# and Memcache ServiceGroup driver.
3305#
3306# Possible Values:
3307#
3308# * db : Database ServiceGroup driver
3309# * mc : Memcache ServiceGroup driver
3310#
3311# Related Options:
3312#
3313# * service_down_time (maximum time since last check-in for up
3314# service)
3315# (string value)
3316# Possible values:
3317# db - <No description provided>
3318# mc - <No description provided>
3319#servicegroup_driver = db
3320
3321#
3322# From oslo.service.periodic_task
3323#
3324
3325# Some periodic tasks can be run in a separate process. Should we run
3326# them here? (boolean value)
3327#run_external_periodic_tasks = true
3328
3329#
3330# From oslo.service.service
3331#
3332
3333# Enable eventlet backdoor. Acceptable values are 0, <port>, and
3334# <start>:<end>, where 0 results in listening on a random tcp port
3335# number; <port> results in listening on the specified port number
3336# (and not enabling backdoor if that port is in use); and
3337# <start>:<end> results in listening on the smallest unused port
3338# number within the specified range of port numbers. The chosen port
3339# is displayed in the service's log file. (string value)
3340#backdoor_port = <None>
3341
3342# Enable eventlet backdoor, using the provided path as a unix socket
3343# that can receive connections. This option is mutually exclusive with
3344# 'backdoor_port' in that only one should be provided. If both are
3345# provided then the existence of this option overrides the usage of
3346# that option. (string value)
3347#backdoor_socket = <None>
3348
3349# Enables or disables logging values of all registered options when
3350# starting a service (at DEBUG level). (boolean value)
3351#log_options = true
3352
3353# Specify a timeout after which a gracefully shutdown server will
3354# exit. Zero value means endless wait. (integer value)
3355#graceful_shutdown_timeout = 60
3356
3357{%- if compute.logging is defined %}
3358{%- set _data = compute.logging %}
3359{%- include "oslo_templates/files/queens/oslo/_log.conf" %}
3360{%- endif %}
3361
3362{%- set _data = compute.message_queue %}
Oleh Hryhorov52ea4c92018-06-04 15:06:59 +00003363{%- include "oslo_templates/files/queens/oslo/messaging/_default.conf" %}
Oleh Hryhorov5d0f13a2018-05-21 17:02:54 +00003364
3365[api]
3366#
3367# Options under this group are used to define Nova API.
3368
3369#
3370# From nova.conf
3371#
3372
3373#
3374# This determines the strategy to use for authentication: keystone or
3375# noauth2.
3376# 'noauth2' is designed for testing only, as it does no actual
3377# credential
3378# checking. 'noauth2' provides administrative credentials only if
3379# 'admin' is
3380# specified as the username.
3381# (string value)
3382# Possible values:
3383# keystone - <No description provided>
3384# noauth2 - <No description provided>
3385auth_strategy = keystone
3386
3387#
3388# When True, the 'X-Forwarded-For' header is treated as the canonical
3389# remote
3390# address. When False (the default), the 'remote_address' header is
3391# used.
3392#
3393# You should only enable this if you have an HTML sanitizing proxy.
3394# (boolean value)
3395#use_forwarded_for = false
3396
3397#
3398# When gathering the existing metadata for a config drive, the
3399# EC2-style
3400# metadata is returned for all versions that don't appear in this
3401# option.
3402# As of the Liberty release, the available versions are:
3403#
3404# * 1.0
3405# * 2007-01-19
3406# * 2007-03-01
3407# * 2007-08-29
3408# * 2007-10-10
3409# * 2007-12-15
3410# * 2008-02-01
3411# * 2008-09-01
3412# * 2009-04-04
3413#
3414# The option is in the format of a single string, with each version
3415# separated
3416# by a space.
3417#
3418# Possible values:
3419#
3420# * Any string that represents zero or more versions, separated by
3421# spaces.
3422# (string value)
3423#config_drive_skip_versions = 1.0 2007-01-19 2007-03-01 2007-08-29 2007-10-10 2007-12-15 2008-02-01 2008-09-01
3424
3425#
3426# A list of vendordata providers.
3427#
3428# vendordata providers are how deployers can provide metadata via
3429# configdrive
3430# and metadata that is specific to their deployment. There are
3431# currently two
3432# supported providers: StaticJSON and DynamicJSON.
3433#
3434# StaticJSON reads a JSON file configured by the flag
3435# vendordata_jsonfile_path
3436# and places the JSON from that file into vendor_data.json and
3437# vendor_data2.json.
3438#
3439# DynamicJSON is configured via the vendordata_dynamic_targets flag,
3440# which is
3441# documented separately. For each of the endpoints specified in that
3442# flag, a
3443# section is added to the vendor_data2.json.
3444#
3445# For more information on the requirements for implementing a
3446# vendordata
3447# dynamic endpoint, please see the vendordata.rst file in the nova
3448# developer
3449# reference.
3450#
3451# Possible values:
3452#
3453# * A list of vendordata providers, with StaticJSON and DynamicJSON
3454# being
3455# current options.
3456#
3457# Related options:
3458#
3459# * vendordata_dynamic_targets
3460# * vendordata_dynamic_ssl_certfile
3461# * vendordata_dynamic_connect_timeout
3462# * vendordata_dynamic_read_timeout
3463# * vendordata_dynamic_failure_fatal
3464# (list value)
3465#vendordata_providers = StaticJSON
3466
3467#
3468# A list of targets for the dynamic vendordata provider. These targets
3469# are of
3470# the form <name>@<url>.
3471#
3472# The dynamic vendordata provider collects metadata by contacting
3473# external REST
3474# services and querying them for information about the instance. This
3475# behaviour
3476# is documented in the vendordata.rst file in the nova developer
3477# reference.
3478# (list value)
3479#vendordata_dynamic_targets =
3480
3481#
3482# Path to an optional certificate file or CA bundle to verify dynamic
3483# vendordata REST services ssl certificates against.
3484#
3485# Possible values:
3486#
3487# * An empty string, or a path to a valid certificate file
3488#
3489# Related options:
3490#
3491# * vendordata_providers
3492# * vendordata_dynamic_targets
3493# * vendordata_dynamic_connect_timeout
3494# * vendordata_dynamic_read_timeout
3495# * vendordata_dynamic_failure_fatal
3496# (string value)
3497#vendordata_dynamic_ssl_certfile =
3498
3499#
3500# Maximum wait time for an external REST service to connect.
3501#
3502# Possible values:
3503#
3504# * Any integer with a value greater than three (the TCP packet
3505# retransmission
3506# timeout). Note that instance start may be blocked during this wait
3507# time,
3508# so this value should be kept small.
3509#
3510# Related options:
3511#
3512# * vendordata_providers
3513# * vendordata_dynamic_targets
3514# * vendordata_dynamic_ssl_certfile
3515# * vendordata_dynamic_read_timeout
3516# * vendordata_dynamic_failure_fatal
3517# (integer value)
3518# Minimum value: 3
3519#vendordata_dynamic_connect_timeout = 5
3520
3521#
3522# Maximum wait time for an external REST service to return data once
3523# connected.
3524#
3525# Possible values:
3526#
3527# * Any integer. Note that instance start is blocked during this wait
3528# time,
3529# so this value should be kept small.
3530#
3531# Related options:
3532#
3533# * vendordata_providers
3534# * vendordata_dynamic_targets
3535# * vendordata_dynamic_ssl_certfile
3536# * vendordata_dynamic_connect_timeout
3537# * vendordata_dynamic_failure_fatal
3538# (integer value)
3539# Minimum value: 0
3540#vendordata_dynamic_read_timeout = 5
3541
3542#
3543# Should failures to fetch dynamic vendordata be fatal to instance
3544# boot?
3545#
3546# Related options:
3547#
3548# * vendordata_providers
3549# * vendordata_dynamic_targets
3550# * vendordata_dynamic_ssl_certfile
3551# * vendordata_dynamic_connect_timeout
3552# * vendordata_dynamic_read_timeout
3553# (boolean value)
3554#vendordata_dynamic_failure_fatal = false
3555
3556#
3557# This option is the time (in seconds) to cache metadata. When set to
3558# 0,
3559# metadata caching is disabled entirely; this is generally not
3560# recommended for
3561# performance reasons. Increasing this setting should improve response
3562# times
3563# of the metadata API when under heavy load. Higher values may
3564# increase memory
3565# usage, and result in longer times for host metadata changes to take
3566# effect.
3567# (integer value)
3568# Minimum value: 0
3569#metadata_cache_expiration = 15
3570
3571#
3572# Cloud providers may store custom data in vendor data file that will
3573# then be
3574# available to the instances via the metadata service, and to the
3575# rendering of
3576# config-drive. The default class for this, JsonFileVendorData, loads
3577# this
3578# information from a JSON file, whose path is configured by this
3579# option. If
3580# there is no path set by this option, the class returns an empty
3581# dictionary.
3582#
3583# Possible values:
3584#
3585# * Any string representing the path to the data file, or an empty
3586# string
3587# (default).
3588# (string value)
3589#vendordata_jsonfile_path = <None>
3590
3591#
3592# As a query can potentially return many thousands of items, you can
3593# limit the
3594# maximum number of items in a single response by setting this option.
3595# (integer value)
3596# Minimum value: 0
3597# Deprecated group/name - [DEFAULT]/osapi_max_limit
3598#max_limit = 1000
3599
3600#
3601# This string is prepended to the normal URL that is returned in links
3602# to the
3603# OpenStack Compute API. If it is empty (the default), the URLs are
3604# returned
3605# unchanged.
3606#
3607# Possible values:
3608#
3609# * Any string, including an empty string (the default).
3610# (string value)
3611# Deprecated group/name - [DEFAULT]/osapi_compute_link_prefix
3612#compute_link_prefix = <None>
3613
3614#
3615# This string is prepended to the normal URL that is returned in links
3616# to
3617# Glance resources. If it is empty (the default), the URLs are
3618# returned
3619# unchanged.
3620#
3621# Possible values:
3622#
3623# * Any string, including an empty string (the default).
3624# (string value)
3625# Deprecated group/name - [DEFAULT]/osapi_glance_link_prefix
3626#glance_link_prefix = <None>
3627
3628# DEPRECATED:
3629# Operators can turn off the ability for a user to take snapshots of
3630# their
3631# instances by setting this option to False. When disabled, any
3632# attempt to
3633# take a snapshot will result in a HTTP 400 response ("Bad Request").
3634# (boolean value)
3635# This option is deprecated for removal since 16.0.0.
3636# Its value may be silently ignored in the future.
3637# Reason: This option disables the createImage server action API in a
3638# non-discoverable way and is thus a barrier to interoperability.
3639# Also, it is not used for other APIs that create snapshots like
3640# shelve or createBackup. Disabling snapshots should be done via
3641# policy if so desired.
3642#allow_instance_snapshots = true
3643
3644# DEPRECATED:
3645# This option is a list of all instance states for which network
3646# address
3647# information should not be returned from the API.
3648#
3649# Possible values:
3650#
3651# A list of strings, where each string is a valid VM state, as
3652# defined in
3653# nova/compute/vm_states.py. As of the Newton release, they are:
3654#
3655# * "active"
3656# * "building"
3657# * "paused"
3658# * "suspended"
3659# * "stopped"
3660# * "rescued"
3661# * "resized"
3662# * "soft-delete"
3663# * "deleted"
3664# * "error"
3665# * "shelved"
3666# * "shelved_offloaded"
3667# (list value)
3668# Deprecated group/name - [DEFAULT]/osapi_hide_server_address_states
3669# This option is deprecated for removal since 17.0.0.
3670# Its value may be silently ignored in the future.
3671# Reason: This option hide the server address in server representation
3672# for configured server states. Which makes GET server API controlled
3673# by this config options. Due to this config options, user would not
3674# be able to discover the API behavior on different clouds which leads
3675# to the interop issue.
3676#hide_server_address_states = building
3677
3678# The full path to the fping binary. (string value)
3679#fping_path = /usr/sbin/fping
3680
3681#
3682# When True, the TenantNetworkController will query the Neutron API to
3683# get the
3684# default networks to use.
3685#
3686# Related options:
3687#
3688# * neutron_default_tenant_id
3689# (boolean value)
3690#use_neutron_default_nets = false
3691
3692#
3693# Tenant ID for getting the default network from Neutron API (also
3694# referred in
3695# some places as the 'project ID') to use.
3696#
3697# Related options:
3698#
3699# * use_neutron_default_nets
3700# (string value)
3701#neutron_default_tenant_id = default
3702
3703#
3704# Enables returning of the instance password by the relevant server
3705# API calls
3706# such as create, rebuild, evacuate, or rescue. If the hypervisor does
3707# not
3708# support password injection, then the password returned will not be
3709# correct,
3710# so if your hypervisor does not support password injection, set this
3711# to False.
3712# (boolean value)
3713#enable_instance_password = true
3714
3715
3716[api_database]
Vasyl Saienko526c80b2018-05-25 13:50:57 +03003717{%- set _data = {} %}
Oleh Hryhorov52ea4c92018-06-04 15:06:59 +00003718{%- do _data.update(compute.database) %}
Vasyl Saienko526c80b2018-05-25 13:50:57 +03003719{%- do _data.update({'name': 'nova_api'}) %}
Oleh Hryhorov52ea4c92018-06-04 15:06:59 +00003720{%- if _data.ssl is defined and 'cacert_file' not in _data.get('ssl', {}).keys() %}{% do _data['ssl'].update({'cacert_file': compute.cacert_file}) %}{% endif %}
Vasyl Saienko526c80b2018-05-25 13:50:57 +03003721{%- include "oslo_templates/files/queens/oslo/_database.conf" %}
Oleh Hryhorov5d0f13a2018-05-21 17:02:54 +00003722
3723{%- if compute.get('barbican', {}).get('enabled', False) %}
3724{%- set _data = compute.identity %}
3725[barbican]
3726{%- include "oslo_templates/files/queens/castellan/_barbican.conf" %}
3727{%- endif %}
3728
3729[cache]
3730
3731#
3732# From nova.conf
3733#
3734{%- if compute.cache is defined %}
3735backend = oslo_cache.memcache_pool
3736enabled = true
3737memcache_servers={%- for member in compute.cache.members %}{{ member.host }}:11211{% if not loop.last %},{% endif %}{%- endfor %}
3738{%- endif %}
3739
3740# Prefix for building the configuration dictionary for the cache
3741# region. This should not need to be changed unless there is another
3742# dogpile.cache region with the same configuration name. (string
3743# value)
3744#config_prefix = cache.oslo
3745
3746# Default TTL, in seconds, for any cached item in the dogpile.cache
3747# region. This applies to any cached method that doesn't have an
3748# explicit cache expiration time defined for it. (integer value)
3749#expiration_time = 600
3750
3751# Cache backend module. For eventlet-based or environments with
3752# hundreds of threaded servers, Memcache with pooling
3753# (oslo_cache.memcache_pool) is recommended. For environments with
3754# less than 100 threaded servers, Memcached (dogpile.cache.memcached)
3755# or Redis (dogpile.cache.redis) is recommended. Test environments
3756# with a single instance of the server can use the
3757# dogpile.cache.memory backend. (string value)
3758# Possible values:
3759# oslo_cache.memcache_pool - <No description provided>
3760# oslo_cache.dict - <No description provided>
3761# oslo_cache.mongo - <No description provided>
3762# oslo_cache.etcd3gw - <No description provided>
3763# dogpile.cache.memcached - <No description provided>
3764# dogpile.cache.pylibmc - <No description provided>
3765# dogpile.cache.bmemcached - <No description provided>
3766# dogpile.cache.dbm - <No description provided>
3767# dogpile.cache.redis - <No description provided>
3768# dogpile.cache.memory - <No description provided>
3769# dogpile.cache.memory_pickle - <No description provided>
3770# dogpile.cache.null - <No description provided>
3771#backend = dogpile.cache.null
3772
3773# Arguments supplied to the backend module. Specify this option once
3774# per argument to be passed to the dogpile.cache backend. Example
3775# format: "<argname>:<value>". (multi valued)
3776#backend_argument =
3777
3778# Proxy classes to import that will affect the way the dogpile.cache
3779# backend functions. See the dogpile.cache documentation on changing-
3780# backend-behavior. (list value)
3781#proxies =
3782
3783# Global toggle for caching. (boolean value)
3784#enabled = false
3785
3786# Extra debugging from the cache backend (cache keys,
3787# get/set/delete/etc calls). This is only really useful if you need to
3788# see the specific cache-backend get/set/delete calls with the
3789# keys/values. Typically this should be left set to false. (boolean
3790# value)
3791#debug_cache_backend = false
3792
3793# Memcache servers in the format of "host:port".
3794# (dogpile.cache.memcache and oslo_cache.memcache_pool backends only).
3795# (list value)
3796#memcache_servers = localhost:11211
3797
3798# Number of seconds memcached server is considered dead before it is
3799# tried again. (dogpile.cache.memcache and oslo_cache.memcache_pool
3800# backends only). (integer value)
3801#memcache_dead_retry = 300
3802
3803# Timeout in seconds for every call to a server.
3804# (dogpile.cache.memcache and oslo_cache.memcache_pool backends only).
3805# (integer value)
3806#memcache_socket_timeout = 3
3807
3808# Max total number of open connections to every memcached server.
3809# (oslo_cache.memcache_pool backend only). (integer value)
3810#memcache_pool_maxsize = 10
3811
3812# Number of seconds a connection to memcached is held unused in the
3813# pool before it is closed. (oslo_cache.memcache_pool backend only).
3814# (integer value)
3815#memcache_pool_unused_timeout = 60
3816
3817# Number of seconds that an operation will wait to get a memcache
3818# client connection. (integer value)
3819#memcache_pool_connection_get_timeout = 10
3820
3821
3822[cells]
3823#
3824# DEPRECATED: Cells options allow you to use cells v1 functionality in
3825# an
3826# OpenStack deployment.
3827#
3828# Note that the options in this group are only for cells v1
3829# functionality, which
3830# is considered experimental and not recommended for new deployments.
3831# Cells v1
3832# is being replaced with cells v2, which starting in the 15.0.0 Ocata
3833# release is
3834# required and all Nova deployments will be at least a cells v2 cell
3835# of one.
3836#
3837
3838#
3839# From nova.conf
3840#
3841
3842# DEPRECATED:
3843# Enable cell v1 functionality.
3844#
3845# Note that cells v1 is considered experimental and not recommended
3846# for new
3847# Nova deployments. Cells v1 is being replaced by cells v2 which
3848# starting in
3849# the 15.0.0 Ocata release, all Nova deployments are at least a cells
3850# v2 cell
3851# of one. Setting this option, or any other options in the [cells]
3852# group, is
3853# not required for cells v2.
3854#
3855# When this functionality is enabled, it lets you to scale an
3856# OpenStack
3857# Compute cloud in a more distributed fashion without having to use
3858# complicated technologies like database and message queue clustering.
3859# Cells are configured as a tree. The top-level cell should have a
3860# host
3861# that runs a nova-api service, but no nova-compute services. Each
3862# child cell should run all of the typical nova-* services in a
3863# regular
3864# Compute cloud except for nova-api. You can think of cells as a
3865# normal
3866# Compute deployment in that each cell has its own database server and
3867# message queue broker.
3868#
3869# Related options:
3870#
3871# * name: A unique cell name must be given when this functionality
3872# is enabled.
3873# * cell_type: Cell type should be defined for all cells.
3874# (boolean value)
3875# This option is deprecated for removal since 16.0.0.
3876# Its value may be silently ignored in the future.
3877# Reason: Cells v1 is being replaced with Cells v2.
3878#enable = false
3879
3880# DEPRECATED:
3881# Name of the current cell.
3882#
3883# This value must be unique for each cell. Name of a cell is used as
3884# its id, leaving this option unset or setting the same name for
3885# two or more cells may cause unexpected behaviour.
3886#
3887# Related options:
3888#
3889# * enabled: This option is meaningful only when cells service
3890# is enabled
3891# (string value)
3892# This option is deprecated for removal since 16.0.0.
3893# Its value may be silently ignored in the future.
3894# Reason: Cells v1 is being replaced with Cells v2.
3895#name = nova
3896
3897# DEPRECATED:
3898# Cell capabilities.
3899#
3900# List of arbitrary key=value pairs defining capabilities of the
3901# current cell to be sent to the parent cells. These capabilities
3902# are intended to be used in cells scheduler filters/weighers.
3903#
3904# Possible values:
3905#
3906# * key=value pairs list for example;
3907# ``hypervisor=xenserver;kvm,os=linux;windows``
3908# (list value)
3909# This option is deprecated for removal since 16.0.0.
3910# Its value may be silently ignored in the future.
3911# Reason: Cells v1 is being replaced with Cells v2.
3912#capabilities = hypervisor=xenserver;kvm,os=linux;windows
3913
3914# DEPRECATED:
3915# Call timeout.
3916#
3917# Cell messaging module waits for response(s) to be put into the
3918# eventlet queue. This option defines the seconds waited for
3919# response from a call to a cell.
3920#
3921# Possible values:
3922#
3923# * An integer, corresponding to the interval time in seconds.
3924# (integer value)
3925# Minimum value: 0
3926# This option is deprecated for removal since 16.0.0.
3927# Its value may be silently ignored in the future.
3928# Reason: Cells v1 is being replaced with Cells v2.
3929#call_timeout = 60
3930
3931# DEPRECATED:
3932# Reserve percentage
3933#
3934# Percentage of cell capacity to hold in reserve, so the minimum
3935# amount of free resource is considered to be;
3936#
3937# min_free = total * (reserve_percent / 100.0)
3938#
3939# This option affects both memory and disk utilization.
3940#
3941# The primary purpose of this reserve is to ensure some space is
3942# available for users who want to resize their instance to be larger.
3943# Note that currently once the capacity expands into this reserve
3944# space this option is ignored.
3945#
3946# Possible values:
3947#
3948# * An integer or float, corresponding to the percentage of cell
3949# capacity to
3950# be held in reserve.
3951# (floating point value)
3952# This option is deprecated for removal since 16.0.0.
3953# Its value may be silently ignored in the future.
3954# Reason: Cells v1 is being replaced with Cells v2.
3955#reserve_percent = 10.0
3956
3957# DEPRECATED:
3958# Type of cell.
3959#
3960# When cells feature is enabled the hosts in the OpenStack Compute
3961# cloud are partitioned into groups. Cells are configured as a tree.
3962# The top-level cell's cell_type must be set to ``api``. All other
3963# cells are defined as a ``compute cell`` by default.
3964#
3965# Related option:
3966#
3967# * quota_driver: Disable quota checking for the child cells.
3968# (nova.quota.NoopQuotaDriver)
3969# (string value)
3970# Possible values:
3971# api - <No description provided>
3972# compute - <No description provided>
3973# This option is deprecated for removal since 16.0.0.
3974# Its value may be silently ignored in the future.
3975# Reason: Cells v1 is being replaced with Cells v2.
3976#cell_type = compute
3977
3978# DEPRECATED:
3979# Mute child interval.
3980#
3981# Number of seconds after which a lack of capability and capacity
3982# update the child cell is to be treated as a mute cell. Then the
3983# child cell will be weighed as recommend highly that it be skipped.
3984#
3985# Possible values:
3986#
3987# * An integer, corresponding to the interval time in seconds.
3988# (integer value)
3989# This option is deprecated for removal since 16.0.0.
3990# Its value may be silently ignored in the future.
3991# Reason: Cells v1 is being replaced with Cells v2.
3992#mute_child_interval = 300
3993
3994# DEPRECATED:
3995# Bandwidth update interval.
3996#
3997# Seconds between bandwidth usage cache updates for cells.
3998#
3999# Possible values:
4000#
4001# * An integer, corresponding to the interval time in seconds.
4002# (integer value)
4003# This option is deprecated for removal since 16.0.0.
4004# Its value may be silently ignored in the future.
4005# Reason: Cells v1 is being replaced with Cells v2.
4006#bandwidth_update_interval = 600
4007
4008# DEPRECATED:
4009# Instance update sync database limit.
4010#
4011# Number of instances to pull from the database at one time for
4012# a sync. If there are more instances to update the results will
4013# be paged through.
4014#
4015# Possible values:
4016#
4017# * An integer, corresponding to a number of instances.
4018# (integer value)
4019# This option is deprecated for removal since 16.0.0.
4020# Its value may be silently ignored in the future.
4021# Reason: Cells v1 is being replaced with Cells v2.
4022#instance_update_sync_database_limit = 100
4023
4024# DEPRECATED:
4025# Mute weight multiplier.
4026#
4027# Multiplier used to weigh mute children. Mute children cells are
4028# recommended to be skipped so their weight is multiplied by this
4029# negative value.
4030#
4031# Possible values:
4032#
4033# * Negative numeric number
4034# (floating point value)
4035# This option is deprecated for removal since 16.0.0.
4036# Its value may be silently ignored in the future.
4037# Reason: Cells v1 is being replaced with Cells v2.
4038#mute_weight_multiplier = -10000.0
4039
4040# DEPRECATED:
4041# Ram weight multiplier.
4042#
4043# Multiplier used for weighing ram. Negative numbers indicate that
4044# Compute should stack VMs on one host instead of spreading out new
4045# VMs to more hosts in the cell.
4046#
4047# Possible values:
4048#
4049# * Numeric multiplier
4050# (floating point value)
4051# This option is deprecated for removal since 16.0.0.
4052# Its value may be silently ignored in the future.
4053# Reason: Cells v1 is being replaced with Cells v2.
4054#ram_weight_multiplier = 10.0
4055
4056# DEPRECATED:
4057# Offset weight multiplier
4058#
4059# Multiplier used to weigh offset weigher. Cells with higher
4060# weight_offsets in the DB will be preferred. The weight_offset
4061# is a property of a cell stored in the database. It can be used
4062# by a deployer to have scheduling decisions favor or disfavor
4063# cells based on the setting.
4064#
4065# Possible values:
4066#
4067# * Numeric multiplier
4068# (floating point value)
4069# This option is deprecated for removal since 16.0.0.
4070# Its value may be silently ignored in the future.
4071# Reason: Cells v1 is being replaced with Cells v2.
4072#offset_weight_multiplier = 1.0
4073
4074# DEPRECATED:
4075# Instance updated at threshold
4076#
4077# Number of seconds after an instance was updated or deleted to
4078# continue to update cells. This option lets cells manager to only
4079# attempt to sync instances that have been updated recently.
4080# i.e., a threshold of 3600 means to only update instances that
4081# have modified in the last hour.
4082#
4083# Possible values:
4084#
4085# * Threshold in seconds
4086#
4087# Related options:
4088#
4089# * This value is used with the ``instance_update_num_instances``
4090# value in a periodic task run.
4091# (integer value)
4092# This option is deprecated for removal since 16.0.0.
4093# Its value may be silently ignored in the future.
4094# Reason: Cells v1 is being replaced with Cells v2.
4095#instance_updated_at_threshold = 3600
4096
4097# DEPRECATED:
4098# Instance update num instances
4099#
4100# On every run of the periodic task, nova cells manager will attempt
4101# to
4102# sync instance_updated_at_threshold number of instances. When the
4103# manager gets the list of instances, it shuffles them so that
4104# multiple
4105# nova-cells services do not attempt to sync the same instances in
4106# lockstep.
4107#
4108# Possible values:
4109#
4110# * Positive integer number
4111#
4112# Related options:
4113#
4114# * This value is used with the ``instance_updated_at_threshold``
4115# value in a periodic task run.
4116# (integer value)
4117# This option is deprecated for removal since 16.0.0.
4118# Its value may be silently ignored in the future.
4119# Reason: Cells v1 is being replaced with Cells v2.
4120#instance_update_num_instances = 1
4121
4122# DEPRECATED:
4123# Maximum hop count
4124#
4125# When processing a targeted message, if the local cell is not the
4126# target, a route is defined between neighbouring cells. And the
4127# message is processed across the whole routing path. This option
4128# defines the maximum hop counts until reaching the target.
4129#
4130# Possible values:
4131#
4132# * Positive integer value
4133# (integer value)
4134# This option is deprecated for removal since 16.0.0.
4135# Its value may be silently ignored in the future.
4136# Reason: Cells v1 is being replaced with Cells v2.
4137#max_hop_count = 10
4138
4139# DEPRECATED:
4140# Cells scheduler.
4141#
4142# The class of the driver used by the cells scheduler. This should be
4143# the full Python path to the class to be used. If nothing is
4144# specified
4145# in this option, the CellsScheduler is used.
4146# (string value)
4147# This option is deprecated for removal since 16.0.0.
4148# Its value may be silently ignored in the future.
4149# Reason: Cells v1 is being replaced with Cells v2.
4150#scheduler = nova.cells.scheduler.CellsScheduler
4151
4152# DEPRECATED:
4153# RPC driver queue base.
4154#
4155# When sending a message to another cell by JSON-ifying the message
4156# and making an RPC cast to 'process_message', a base queue is used.
4157# This option defines the base queue name to be used when
4158# communicating
4159# between cells. Various topics by message type will be appended to
4160# this.
4161#
4162# Possible values:
4163#
4164# * The base queue name to be used when communicating between cells.
4165# (string value)
4166# This option is deprecated for removal since 16.0.0.
4167# Its value may be silently ignored in the future.
4168# Reason: Cells v1 is being replaced with Cells v2.
4169#rpc_driver_queue_base = cells.intercell
4170
4171# DEPRECATED:
4172# Scheduler filter classes.
4173#
4174# Filter classes the cells scheduler should use. An entry of
4175# "nova.cells.filters.all_filters" maps to all cells filters
4176# included with nova. As of the Mitaka release the following
4177# filter classes are available:
4178#
4179# Different cell filter: A scheduler hint of 'different_cell'
4180# with a value of a full cell name may be specified to route
4181# a build away from a particular cell.
4182#
4183# Image properties filter: Image metadata named
4184# 'hypervisor_version_requires' with a version specification
4185# may be specified to ensure the build goes to a cell which
4186# has hypervisors of the required version. If either the version
4187# requirement on the image or the hypervisor capability of the
4188# cell is not present, this filter returns without filtering out
4189# the cells.
4190#
4191# Target cell filter: A scheduler hint of 'target_cell' with a
4192# value of a full cell name may be specified to route a build to
4193# a particular cell. No error handling is done as there's no way
4194# to know whether the full path is a valid.
4195#
4196# As an admin user, you can also add a filter that directs builds
4197# to a particular cell.
4198#
4199# (list value)
4200# This option is deprecated for removal since 16.0.0.
4201# Its value may be silently ignored in the future.
4202# Reason: Cells v1 is being replaced with Cells v2.
4203#scheduler_filter_classes = nova.cells.filters.all_filters
4204
4205# DEPRECATED:
4206# Scheduler weight classes.
4207#
4208# Weigher classes the cells scheduler should use. An entry of
4209# "nova.cells.weights.all_weighers" maps to all cell weighers
4210# included with nova. As of the Mitaka release the following
4211# weight classes are available:
4212#
4213# mute_child: Downgrades the likelihood of child cells being
4214# chosen for scheduling requests, which haven't sent capacity
4215# or capability updates in a while. Options include
4216# mute_weight_multiplier (multiplier for mute children; value
4217# should be negative).
4218#
4219# ram_by_instance_type: Select cells with the most RAM capacity
4220# for the instance type being requested. Because higher weights
4221# win, Compute returns the number of available units for the
4222# instance type requested. The ram_weight_multiplier option defaults
4223# to 10.0 that adds to the weight by a factor of 10. Use a negative
4224# number to stack VMs on one host instead of spreading out new VMs
4225# to more hosts in the cell.
4226#
4227# weight_offset: Allows modifying the database to weight a particular
4228# cell. The highest weight will be the first cell to be scheduled for
4229# launching an instance. When the weight_offset of a cell is set to 0,
4230# it is unlikely to be picked but it could be picked if other cells
4231# have a lower weight, like if they're full. And when the
4232# weight_offset
4233# is set to a very high value (for example, '999999999999999'), it is
4234# likely to be picked if another cell do not have a higher weight.
4235# (list value)
4236# This option is deprecated for removal since 16.0.0.
4237# Its value may be silently ignored in the future.
4238# Reason: Cells v1 is being replaced with Cells v2.
4239#scheduler_weight_classes = nova.cells.weights.all_weighers
4240
4241# DEPRECATED:
4242# Scheduler retries.
4243#
4244# How many retries when no cells are available. Specifies how many
4245# times the scheduler tries to launch a new instance when no cells
4246# are available.
4247#
4248# Possible values:
4249#
4250# * Positive integer value
4251#
4252# Related options:
4253#
4254# * This value is used with the ``scheduler_retry_delay`` value
4255# while retrying to find a suitable cell.
4256# (integer value)
4257# This option is deprecated for removal since 16.0.0.
4258# Its value may be silently ignored in the future.
4259# Reason: Cells v1 is being replaced with Cells v2.
4260#scheduler_retries = 10
4261
4262# DEPRECATED:
4263# Scheduler retry delay.
4264#
4265# Specifies the delay (in seconds) between scheduling retries when no
4266# cell can be found to place the new instance on. When the instance
4267# could not be scheduled to a cell after ``scheduler_retries`` in
4268# combination with ``scheduler_retry_delay``, then the scheduling
4269# of the instance failed.
4270#
4271# Possible values:
4272#
4273# * Time in seconds.
4274#
4275# Related options:
4276#
4277# * This value is used with the ``scheduler_retries`` value
4278# while retrying to find a suitable cell.
4279# (integer value)
4280# This option is deprecated for removal since 16.0.0.
4281# Its value may be silently ignored in the future.
4282# Reason: Cells v1 is being replaced with Cells v2.
4283#scheduler_retry_delay = 2
4284
4285# DEPRECATED:
4286# DB check interval.
4287#
4288# Cell state manager updates cell status for all cells from the DB
4289# only after this particular interval time is passed. Otherwise cached
4290# status are used. If this value is 0 or negative all cell status are
4291# updated from the DB whenever a state is needed.
4292#
4293# Possible values:
4294#
4295# * Interval time, in seconds.
4296#
4297# (integer value)
4298# This option is deprecated for removal since 16.0.0.
4299# Its value may be silently ignored in the future.
4300# Reason: Cells v1 is being replaced with Cells v2.
4301#db_check_interval = 60
4302
4303# DEPRECATED:
4304# Optional cells configuration.
4305#
4306# Configuration file from which to read cells configuration. If given,
4307# overrides reading cells from the database.
4308#
4309# Cells store all inter-cell communication data, including user names
4310# and passwords, in the database. Because the cells data is not
4311# updated
4312# very frequently, use this option to specify a JSON file to store
4313# cells data. With this configuration, the database is no longer
4314# consulted when reloading the cells data. The file must have columns
4315# present in the Cell model (excluding common database fields and the
4316# id column). You must specify the queue connection information
4317# through
4318# a transport_url field, instead of username, password, and so on.
4319#
4320# The transport_url has the following form:
4321# rabbit://USERNAME:PASSWORD@HOSTNAME:PORT/VIRTUAL_HOST
4322#
4323# Possible values:
4324#
4325# The scheme can be either qpid or rabbit, the following sample shows
4326# this optional configuration:
4327#
4328# {
4329# "parent": {
4330# "name": "parent",
4331# "api_url": "http://api.example.com:8774",
4332# "transport_url": "rabbit://rabbit.example.com",
4333# "weight_offset": 0.0,
4334# "weight_scale": 1.0,
4335# "is_parent": true
4336# },
4337# "cell1": {
4338# "name": "cell1",
4339# "api_url": "http://api.example.com:8774",
4340# "transport_url": "rabbit://rabbit1.example.com",
4341# "weight_offset": 0.0,
4342# "weight_scale": 1.0,
4343# "is_parent": false
4344# },
4345# "cell2": {
4346# "name": "cell2",
4347# "api_url": "http://api.example.com:8774",
4348# "transport_url": "rabbit://rabbit2.example.com",
4349# "weight_offset": 0.0,
4350# "weight_scale": 1.0,
4351# "is_parent": false
4352# }
4353# }
4354#
4355# (string value)
4356# This option is deprecated for removal since 16.0.0.
4357# Its value may be silently ignored in the future.
4358# Reason: Cells v1 is being replaced with Cells v2.
4359#cells_config = <None>
4360
4361
4362[cinder]
4363
4364#
4365# From nova.conf
4366#
Oleh Hryhorov5d0f13a2018-05-21 17:02:54 +00004367
4368#
4369# Info to match when looking for cinder in the service catalog.
4370#
4371# Possible values:
4372#
4373# * Format is separated values of the form:
4374# <service_type>:<service_name>:<endpoint_type>
4375#
4376# Note: Nova does not support the Cinder v2 API since the Nova 17.0.0
4377# Queens
4378# release.
4379#
4380# Related options:
4381#
4382# * endpoint_template - Setting this option will override catalog_info
4383# (string value)
4384#catalog_info = volumev3:cinderv3:publicURL
Michael Polenchuk2c24ab82018-06-18 17:45:42 +04004385catalog_info = volumev3:cinderv3:internalURL
Oleh Hryhorov5d0f13a2018-05-21 17:02:54 +00004386
4387#
4388# If this option is set then it will override service catalog lookup
4389# with
4390# this template for cinder endpoint
4391#
4392# Possible values:
4393#
4394# * URL for cinder endpoint API
4395# e.g. http://localhost:8776/v3/%(project_id)s
4396#
4397# Note: Nova does not support the Cinder v2 API since the Nova 17.0.0
4398# Queens
4399# release.
4400#
4401# Related options:
4402#
4403# * catalog_info - If endpoint_template is not set, catalog_info will
4404# be used.
4405# (string value)
4406#endpoint_template = <None>
4407
4408#
4409# Region name of this node. This is used when picking the URL in the
4410# service
4411# catalog.
4412#
4413# Possible values:
4414#
4415# * Any string representing region name
4416# (string value)
4417#os_region_name = <None>
Vasyl Saienkob6066be2018-05-25 15:41:55 +03004418os_region_name = {{ compute.identity.region }}
Oleh Hryhorov5d0f13a2018-05-21 17:02:54 +00004419
4420#
4421# Number of times cinderclient should retry on any failed http call.
4422# 0 means connection is attempted only once. Setting it to any
4423# positive integer
4424# means that on failure connection is retried that many times e.g.
4425# setting it
4426# to 3 means total attempts to connect will be 4.
4427#
4428# Possible values:
4429#
4430# * Any integer value. 0 means connection is attempted only once
4431# (integer value)
4432# Minimum value: 0
4433#http_retries = 3
4434
4435#
4436# Allow attach between instance and volume in different availability
4437# zones.
4438#
4439# If False, volumes attached to an instance must be in the same
4440# availability
4441# zone in Cinder as the instance availability zone in Nova.
4442# This also means care should be taken when booting an instance from a
4443# volume
4444# where source is not "volume" because Nova will attempt to create a
4445# volume using
4446# the same availability zone as what is assigned to the instance.
4447# If that AZ is not in Cinder (or
4448# allow_availability_zone_fallback=False in
4449# cinder.conf), the volume create request will fail and the instance
4450# will fail
4451# the build request.
4452# By default there is no availability zone restriction on volume
4453# attach.
4454# (boolean value)
4455#cross_az_attach = true
4456{%- if compute.cross_az_attach is defined %}
4457cross_az_attach={{ compute.cross_az_attach }}
4458{%- endif %}
4459
Vasyl Saienkob6066be2018-05-25 15:41:55 +03004460{%- set _data = compute.get('cinder', compute.get('identity', {})) %}
4461{%- set auth_type = _data.get('auth_type', 'password') %}
4462{%- include "oslo_templates/files/queens/keystoneauth/_type_" + auth_type + ".conf" %}
Oleh Hryhorov5d0f13a2018-05-21 17:02:54 +00004463
4464
4465[compute]
4466
4467#
4468# From nova.conf
4469#
4470
4471#
4472# Number of consecutive failed builds that result in disabling a
4473# compute service.
4474#
4475# This option will cause nova-compute to set itself to a disabled
4476# state
4477# if a certain number of consecutive build failures occur. This will
4478# prevent the scheduler from continuing to send builds to a compute
4479# node that is
4480# consistently failing. Note that all failures qualify and count
4481# towards this
4482# score, including reschedules that may have been due to racy
4483# scheduler behavior.
4484# Since the failures must be consecutive, it is unlikely that
4485# occasional expected
4486# reschedules will actually disable a compute node.
4487#
4488# Possible values:
4489#
4490# * Any positive integer representing a build failure count.
4491# * Zero to never auto-disable.
4492# (integer value)
4493#consecutive_build_service_disable_threshold = 10
4494{%- if compute.get('compute', {}).consecutive_build_service_disable_threshold is defined %}
4495consecutive_build_service_disable_threshold = {{ compute.compute.consecutive_build_service_disable_threshold }}
4496{%- endif %}
4497
4498#
4499# Interval for updating nova-compute-side cache of the compute node
4500# resource
4501# provider's aggregates and traits info.
4502#
4503# This option specifies the number of seconds between attempts to
4504# update a
4505# provider's aggregates and traits information in the local cache of
4506# the compute
4507# node.
4508#
4509# Possible values:
4510#
4511# * Any positive integer in seconds.
4512# (integer value)
4513# Minimum value: 1
4514#resource_provider_association_refresh = 300
4515
4516
4517[conductor]
4518#
4519# Options under this group are used to define Conductor's
4520# communication,
4521# which manager should be act as a proxy between computes and
4522# database,
4523# and finally, how many worker processes will be used.
4524
4525#
4526# From nova.conf
4527#
4528
4529# DEPRECATED:
4530# Topic exchange name on which conductor nodes listen.
4531# (string value)
4532# This option is deprecated for removal since 15.0.0.
4533# Its value may be silently ignored in the future.
4534# Reason:
4535# There is no need to let users choose the RPC topic for all services
4536# - there
4537# is little gain from this. Furthermore, it makes it really easy to
4538# break Nova
4539# by using this option.
4540#topic = conductor
4541
4542#
4543# Number of workers for OpenStack Conductor service. The default will
4544# be the
4545# number of CPUs available.
4546# (integer value)
4547#workers = <None>
4548
4549
4550[console]
4551#
4552# Options under this group allow to tune the configuration of the
4553# console proxy
4554# service.
4555#
4556# Note: in configuration of every compute is a ``console_host``
4557# option,
4558# which allows to select the console proxy service to connect to.
4559
4560#
4561# From nova.conf
4562#
4563
4564#
4565# Adds list of allowed origins to the console websocket proxy to allow
4566# connections from other origin hostnames.
4567# Websocket proxy matches the host header with the origin header to
4568# prevent cross-site requests. This list specifies if any there are
4569# values other than host are allowed in the origin header.
4570#
4571# Possible values:
4572#
4573# * A list where each element is an allowed origin hostnames, else an
4574# empty list
4575# (list value)
4576# Deprecated group/name - [DEFAULT]/console_allowed_origins
4577#allowed_origins =
4578
4579
4580[consoleauth]
4581
4582#
4583# From nova.conf
4584#
4585
4586#
4587# The lifetime of a console auth token (in seconds).
4588#
4589# A console auth token is used in authorizing console access for a
4590# user.
4591# Once the auth token time to live count has elapsed, the token is
4592# considered expired. Expired tokens are then deleted.
4593# (integer value)
4594# Minimum value: 0
4595# Deprecated group/name - [DEFAULT]/console_token_ttl
4596#token_ttl = 600
4597
4598
4599[crypto]
4600
4601#
4602# From nova.conf
4603#
4604
4605#
4606# Filename of root CA (Certificate Authority). This is a container
4607# format
4608# and includes root certificates.
4609#
4610# Possible values:
4611#
4612# * Any file name containing root CA, cacert.pem is default
4613#
4614# Related options:
4615#
4616# * ca_path
4617# (string value)
4618#ca_file = cacert.pem
4619
4620#
4621# Filename of a private key.
4622#
4623# Related options:
4624#
4625# * keys_path
4626# (string value)
4627#key_file = private/cakey.pem
4628
4629#
4630# Filename of root Certificate Revocation List (CRL). This is a list
4631# of
4632# certificates that have been revoked, and therefore, entities
4633# presenting
4634# those (revoked) certificates should no longer be trusted.
4635#
4636# Related options:
4637#
4638# * ca_path
4639# (string value)
4640#crl_file = crl.pem
4641
4642#
4643# Directory path where keys are located.
4644#
4645# Related options:
4646#
4647# * key_file
4648# (string value)
4649#keys_path = $state_path/keys
4650
4651#
4652# Directory path where root CA is located.
4653#
4654# Related options:
4655#
4656# * ca_file
4657# (string value)
4658#ca_path = $state_path/CA
4659
4660# Option to enable/disable use of CA for each project. (boolean value)
4661#use_project_ca = false
4662
4663#
4664# Subject for certificate for users, %s for
4665# project, user, timestamp
4666# (string value)
4667#user_cert_subject = /C=US/ST=California/O=OpenStack/OU=NovaDev/CN=%.16s-%.16s-%s
4668
4669#
4670# Subject for certificate for projects, %s for
4671# project, timestamp
4672# (string value)
4673#project_cert_subject = /C=US/ST=California/O=OpenStack/OU=NovaDev/CN=project-ca-%.16s-%s
4674
4675
4676[devices]
4677
4678#
4679# From nova.conf
4680#
4681
4682#
4683# A list of the vGPU types enabled in the compute node.
4684#
4685# Some pGPUs (e.g. NVIDIA GRID K1) support different vGPU types. User
4686# can use
4687# this option to specify a list of enabled vGPU types that may be
4688# assigned to a
4689# guest instance. But please note that Nova only supports a single
4690# type in the
4691# Queens release. If more than one vGPU type is specified (as a comma-
4692# separated
4693# list), only the first one will be used. An example is as the
4694# following:
4695# [devices]
4696# enabled_vgpu_types = GRID K100,Intel GVT-g,MxGPU.2,nvidia-11
4697# (list value)
4698#enabled_vgpu_types =
4699
4700
4701[ephemeral_storage_encryption]
4702
4703#
4704# From nova.conf
4705#
4706
4707#
4708# Enables/disables LVM ephemeral storage encryption.
4709# (boolean value)
4710#enabled = false
4711
4712#
4713# Cipher-mode string to be used.
4714#
4715# The cipher and mode to be used to encrypt ephemeral storage. The set
4716# of
4717# cipher-mode combinations available depends on kernel support.
4718# According
4719# to the dm-crypt documentation, the cipher is expected to be in the
4720# format:
4721# "<cipher>-<chainmode>-<ivmode>".
4722#
4723# Possible values:
4724#
4725# * Any crypto option listed in ``/proc/crypto``.
4726# (string value)
4727#cipher = aes-xts-plain64
4728
4729#
4730# Encryption key length in bits.
4731#
4732# The bit length of the encryption key to be used to encrypt ephemeral
4733# storage.
4734# In XTS mode only half of the bits are used for encryption key.
4735# (integer value)
4736# Minimum value: 1
4737#key_size = 512
4738
4739
4740[filter_scheduler]
4741
4742#
4743# From nova.conf
4744#
4745
4746#
4747# Size of subset of best hosts selected by scheduler.
4748#
4749# New instances will be scheduled on a host chosen randomly from a
4750# subset of the
4751# N best hosts, where N is the value set by this option.
4752#
4753# Setting this to a value greater than 1 will reduce the chance that
4754# multiple
4755# scheduler processes handling similar requests will select the same
4756# host,
4757# creating a potential race condition. By selecting a host randomly
4758# from the N
4759# hosts that best fit the request, the chance of a conflict is
4760# reduced. However,
4761# the higher you set this value, the less optimal the chosen host may
4762# be for a
4763# given request.
4764#
4765# This option is only used by the FilterScheduler and its subclasses;
4766# if you use
4767# a different scheduler, this option has no effect.
4768#
4769# Possible values:
4770#
4771# * An integer, where the integer corresponds to the size of a host
4772# subset. Any
4773# integer is valid, although any value less than 1 will be treated
4774# as 1
4775# (integer value)
4776# Minimum value: 1
4777# Deprecated group/name - [DEFAULT]/scheduler_host_subset_size
4778#host_subset_size = 1
4779
4780#
4781# The number of instances that can be actively performing IO on a
4782# host.
4783#
4784# Instances performing IO includes those in the following states:
4785# build, resize,
4786# snapshot, migrate, rescue, unshelve.
4787#
4788# This option is only used by the FilterScheduler and its subclasses;
4789# if you use
4790# a different scheduler, this option has no effect. Also note that
4791# this setting
4792# only affects scheduling if the 'io_ops_filter' filter is enabled.
4793#
4794# Possible values:
4795#
4796# * An integer, where the integer corresponds to the max number of
4797# instances
4798# that can be actively performing IO on any given host.
4799# (integer value)
4800#max_io_ops_per_host = 8
4801
4802#
4803# Maximum number of instances that be active on a host.
4804#
4805# If you need to limit the number of instances on any given host, set
4806# this option
4807# to the maximum number of instances you want to allow. The
4808# num_instances_filter
4809# will reject any host that has at least as many instances as this
4810# option's
4811# value.
4812#
4813# This option is only used by the FilterScheduler and its subclasses;
4814# if you use
4815# a different scheduler, this option has no effect. Also note that
4816# this setting
4817# only affects scheduling if the 'num_instances_filter' filter is
4818# enabled.
4819#
4820# Possible values:
4821#
4822# * An integer, where the integer corresponds to the max instances
4823# that can be
4824# scheduled on a host.
4825# (integer value)
4826# Minimum value: 1
4827#max_instances_per_host = 50
4828
4829#
4830# Enable querying of individual hosts for instance information.
4831#
4832# The scheduler may need information about the instances on a host in
4833# order to
4834# evaluate its filters and weighers. The most common need for this
4835# information is
4836# for the (anti-)affinity filters, which need to choose a host based
4837# on the
4838# instances already running on a host.
4839#
4840# If the configured filters and weighers do not need this information,
4841# disabling
4842# this option will improve performance. It may also be disabled when
4843# the tracking
4844# overhead proves too heavy, although this will cause classes
4845# requiring host
4846# usage data to query the database on each request instead.
4847#
4848# This option is only used by the FilterScheduler and its subclasses;
4849# if you use
4850# a different scheduler, this option has no effect.
4851#
4852# NOTE: In a multi-cell (v2) setup where the cell MQ is separated from
4853# the
4854# top-level, computes cannot directly communicate with the scheduler.
4855# Thus,
4856# this option cannot be enabled in that scenario. See also the
4857# [workarounds]/disable_group_policy_check_upcall option.
4858# (boolean value)
4859# Deprecated group/name - [DEFAULT]/scheduler_tracks_instance_changes
4860#track_instance_changes = true
4861
4862#
4863# Filters that the scheduler can use.
4864#
4865# An unordered list of the filter classes the nova scheduler may
4866# apply. Only the
4867# filters specified in the 'enabled_filters' option will be used, but
4868# any filter appearing in that option must also be included in this
4869# list.
4870#
4871# By default, this is set to all filters that are included with nova.
4872#
4873# This option is only used by the FilterScheduler and its subclasses;
4874# if you use
4875# a different scheduler, this option has no effect.
4876#
4877# Possible values:
4878#
4879# * A list of zero or more strings, where each string corresponds to
4880# the name of
4881# a filter that may be used for selecting a host
4882#
4883# Related options:
4884#
4885# * enabled_filters
4886# (multi valued)
4887# Deprecated group/name - [DEFAULT]/scheduler_available_filters
4888#available_filters = nova.scheduler.filters.all_filters
4889
4890#
4891# Filters that the scheduler will use.
4892#
4893# An ordered list of filter class names that will be used for
4894# filtering
4895# hosts. These filters will be applied in the order they are listed so
4896# place your most restrictive filters first to make the filtering
4897# process more
4898# efficient.
4899#
4900# This option is only used by the FilterScheduler and its subclasses;
4901# if you use
4902# a different scheduler, this option has no effect.
4903#
4904# Possible values:
4905#
4906# * A list of zero or more strings, where each string corresponds to
4907# the name of
4908# a filter to be used for selecting a host
4909#
4910# Related options:
4911#
4912# * All of the filters in this option *must* be present in the
4913# 'scheduler_available_filters' option, or a
4914# SchedulerHostFilterNotFound
4915# exception will be raised.
4916# (list value)
4917# Deprecated group/name - [DEFAULT]/scheduler_default_filters
4918#enabled_filters = RetryFilter,AvailabilityZoneFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,ServerGroupAntiAffinityFilter,ServerGroupAffinityFilter
4919
4920# DEPRECATED:
4921# Filters used for filtering baremetal hosts.
4922#
4923# Filters are applied in order, so place your most restrictive filters
4924# first to
4925# make the filtering process more efficient.
4926#
4927# This option is only used by the FilterScheduler and its subclasses;
4928# if you use
4929# a different scheduler, this option has no effect.
4930#
4931# Possible values:
4932#
4933# * A list of zero or more strings, where each string corresponds to
4934# the name of
4935# a filter to be used for selecting a baremetal host
4936#
4937# Related options:
4938#
4939# * If the 'scheduler_use_baremetal_filters' option is False, this
4940# option has
4941# no effect.
4942# (list value)
4943# Deprecated group/name - [DEFAULT]/baremetal_scheduler_default_filters
4944# This option is deprecated for removal.
4945# Its value may be silently ignored in the future.
4946# Reason:
4947# These filters were used to overcome some of the baremetal scheduling
4948# limitations in Nova prior to the use of the Placement API. Now
4949# scheduling will
4950# use the custom resource class defined for each baremetal node to
4951# make its
4952# selection.
4953#baremetal_enabled_filters = RetryFilter,AvailabilityZoneFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,ExactRamFilter,ExactDiskFilter,ExactCoreFilter
4954
4955# DEPRECATED:
4956# Enable baremetal filters.
4957#
4958# Set this to True to tell the nova scheduler that it should use the
4959# filters
4960# specified in the 'baremetal_enabled_filters' option. If you are not
4961# scheduling baremetal nodes, leave this at the default setting of
4962# False.
4963#
4964# This option is only used by the FilterScheduler and its subclasses;
4965# if you use
4966# a different scheduler, this option has no effect.
4967#
4968# Related options:
4969#
4970# * If this option is set to True, then the filters specified in the
4971# 'baremetal_enabled_filters' are used instead of the filters
4972# specified in 'enabled_filters'.
4973# (boolean value)
4974# Deprecated group/name - [DEFAULT]/scheduler_use_baremetal_filters
4975# This option is deprecated for removal.
4976# Its value may be silently ignored in the future.
4977# Reason:
4978# These filters were used to overcome some of the baremetal scheduling
4979# limitations in Nova prior to the use of the Placement API. Now
4980# scheduling will
4981# use the custom resource class defined for each baremetal node to
4982# make its
4983# selection.
4984#use_baremetal_filters = false
4985
4986#
4987# Weighers that the scheduler will use.
4988#
4989# Only hosts which pass the filters are weighed. The weight for any
4990# host starts
4991# at 0, and the weighers order these hosts by adding to or subtracting
4992# from the
4993# weight assigned by the previous weigher. Weights may become
4994# negative. An
4995# instance will be scheduled to one of the N most-weighted hosts,
4996# where N is
4997# 'scheduler_host_subset_size'.
4998#
4999# By default, this is set to all weighers that are included with Nova.
5000#
5001# This option is only used by the FilterScheduler and its subclasses;
5002# if you use
5003# a different scheduler, this option has no effect.
5004#
5005# Possible values:
5006#
5007# * A list of zero or more strings, where each string corresponds to
5008# the name of
5009# a weigher that will be used for selecting a host
5010# (list value)
5011# Deprecated group/name - [DEFAULT]/scheduler_weight_classes
5012#weight_classes = nova.scheduler.weights.all_weighers
5013
5014#
5015# Ram weight multipler ratio.
5016#
5017# This option determines how hosts with more or less available RAM are
5018# weighed. A
5019# positive value will result in the scheduler preferring hosts with
5020# more
5021# available RAM, and a negative number will result in the scheduler
5022# preferring
5023# hosts with less available RAM. Another way to look at it is that
5024# positive
5025# values for this option will tend to spread instances across many
5026# hosts, while
5027# negative values will tend to fill up (stack) hosts as much as
5028# possible before
5029# scheduling to a less-used host. The absolute value, whether positive
5030# or
5031# negative, controls how strong the RAM weigher is relative to other
5032# weighers.
5033#
5034# This option is only used by the FilterScheduler and its subclasses;
5035# if you use
5036# a different scheduler, this option has no effect. Also note that
5037# this setting
5038# only affects scheduling if the 'ram' weigher is enabled.
5039#
5040# Possible values:
5041#
5042# * An integer or float value, where the value corresponds to the
5043# multipler
5044# ratio for this weigher.
5045# (floating point value)
5046#ram_weight_multiplier = 1.0
5047
5048#
5049# Disk weight multipler ratio.
5050#
5051# Multiplier used for weighing free disk space. Negative numbers mean
5052# to
5053# stack vs spread.
5054#
5055# This option is only used by the FilterScheduler and its subclasses;
5056# if you use
5057# a different scheduler, this option has no effect. Also note that
5058# this setting
5059# only affects scheduling if the 'disk' weigher is enabled.
5060#
5061# Possible values:
5062#
5063# * An integer or float value, where the value corresponds to the
5064# multipler
5065# ratio for this weigher.
5066# (floating point value)
5067#disk_weight_multiplier = 1.0
5068
5069#
5070# IO operations weight multipler ratio.
5071#
5072# This option determines how hosts with differing workloads are
5073# weighed. Negative
5074# values, such as the default, will result in the scheduler preferring
5075# hosts with
5076# lighter workloads whereas positive values will prefer hosts with
5077# heavier
5078# workloads. Another way to look at it is that positive values for
5079# this option
5080# will tend to schedule instances onto hosts that are already busy,
5081# while
5082# negative values will tend to distribute the workload across more
5083# hosts. The
5084# absolute value, whether positive or negative, controls how strong
5085# the io_ops
5086# weigher is relative to other weighers.
5087#
5088# This option is only used by the FilterScheduler and its subclasses;
5089# if you use
5090# a different scheduler, this option has no effect. Also note that
5091# this setting
5092# only affects scheduling if the 'io_ops' weigher is enabled.
5093#
5094# Possible values:
5095#
5096# * An integer or float value, where the value corresponds to the
5097# multipler
5098# ratio for this weigher.
5099# (floating point value)
5100#io_ops_weight_multiplier = -1.0
5101
5102#
5103# PCI device affinity weight multiplier.
5104#
5105# The PCI device affinity weighter computes a weighting based on the
5106# number of
5107# PCI devices on the host and the number of PCI devices requested by
5108# the
5109# instance. The ``NUMATopologyFilter`` filter must be enabled for this
5110# to have
5111# any significance. For more information, refer to the filter
5112# documentation:
5113#
5114# https://docs.openstack.org/nova/latest/user/filter-
5115# scheduler.html
5116#
5117# Possible values:
5118#
5119# * A positive integer or float value, where the value corresponds to
5120# the
5121# multiplier ratio for this weigher.
5122# (floating point value)
5123# Minimum value: 0
5124#pci_weight_multiplier = 1.0
5125
5126#
5127# Multiplier used for weighing hosts for group soft-affinity.
5128#
5129# Possible values:
5130#
5131# * An integer or float value, where the value corresponds to weight
5132# multiplier
5133# for hosts with group soft affinity. Only a positive value are
5134# meaningful, as
5135# negative values would make this behave as a soft anti-affinity
5136# weigher.
5137# (floating point value)
5138#soft_affinity_weight_multiplier = 1.0
5139
5140#
5141# Multiplier used for weighing hosts for group soft-anti-affinity.
5142#
5143# Possible values:
5144#
5145# * An integer or float value, where the value corresponds to weight
5146# multiplier
5147# for hosts with group soft anti-affinity. Only a positive value are
5148# meaningful, as negative values would make this behave as a soft
5149# affinity
5150# weigher.
5151# (floating point value)
5152#soft_anti_affinity_weight_multiplier = 1.0
5153
5154#
5155# Enable spreading the instances between hosts with the same best
5156# weight.
5157#
5158# Enabling it is beneficial for cases when host_subset_size is 1
5159# (default), but there is a large number of hosts with same maximal
5160# weight.
5161# This scenario is common in Ironic deployments where there are
5162# typically many
5163# baremetal nodes with identical weights returned to the scheduler.
5164# In such case enabling this option will reduce contention and chances
5165# for
5166# rescheduling events.
5167# At the same time it will make the instance packing (even in
5168# unweighed case)
5169# less dense.
5170# (boolean value)
5171#shuffle_best_same_weighed_hosts = false
5172
5173#
5174# The default architecture to be used when using the image properties
5175# filter.
5176#
5177# When using the ImagePropertiesFilter, it is possible that you want
5178# to define
5179# a default architecture to make the user experience easier and avoid
5180# having
5181# something like x86_64 images landing on aarch64 compute nodes
5182# because the
5183# user did not specify the 'hw_architecture' property in Glance.
5184#
5185# Possible values:
5186#
5187# * CPU Architectures such as x86_64, aarch64, s390x.
5188# (string value)
5189# Possible values:
5190# alpha - <No description provided>
5191# armv6 - <No description provided>
5192# armv7l - <No description provided>
5193# armv7b - <No description provided>
5194# aarch64 - <No description provided>
5195# cris - <No description provided>
5196# i686 - <No description provided>
5197# ia64 - <No description provided>
5198# lm32 - <No description provided>
5199# m68k - <No description provided>
5200# microblaze - <No description provided>
5201# microblazeel - <No description provided>
5202# mips - <No description provided>
5203# mipsel - <No description provided>
5204# mips64 - <No description provided>
5205# mips64el - <No description provided>
5206# openrisc - <No description provided>
5207# parisc - <No description provided>
5208# parisc64 - <No description provided>
5209# ppc - <No description provided>
5210# ppcle - <No description provided>
5211# ppc64 - <No description provided>
5212# ppc64le - <No description provided>
5213# ppcemb - <No description provided>
5214# s390 - <No description provided>
5215# s390x - <No description provided>
5216# sh4 - <No description provided>
5217# sh4eb - <No description provided>
5218# sparc - <No description provided>
5219# sparc64 - <No description provided>
5220# unicore32 - <No description provided>
5221# x86_64 - <No description provided>
5222# xtensa - <No description provided>
5223# xtensaeb - <No description provided>
5224#image_properties_default_architecture = <None>
5225
5226#
5227# List of UUIDs for images that can only be run on certain hosts.
5228#
5229# If there is a need to restrict some images to only run on certain
5230# designated
5231# hosts, list those image UUIDs here.
5232#
5233# This option is only used by the FilterScheduler and its subclasses;
5234# if you use
5235# a different scheduler, this option has no effect. Also note that
5236# this setting
5237# only affects scheduling if the 'IsolatedHostsFilter' filter is
5238# enabled.
5239#
5240# Possible values:
5241#
5242# * A list of UUID strings, where each string corresponds to the UUID
5243# of an
5244# image
5245#
5246# Related options:
5247#
5248# * scheduler/isolated_hosts
5249# * scheduler/restrict_isolated_hosts_to_isolated_images
5250# (list value)
5251#isolated_images =
5252
5253#
5254# List of hosts that can only run certain images.
5255#
5256# If there is a need to restrict some images to only run on certain
5257# designated
5258# hosts, list those host names here.
5259#
5260# This option is only used by the FilterScheduler and its subclasses;
5261# if you use
5262# a different scheduler, this option has no effect. Also note that
5263# this setting
5264# only affects scheduling if the 'IsolatedHostsFilter' filter is
5265# enabled.
5266#
5267# Possible values:
5268#
5269# * A list of strings, where each string corresponds to the name of a
5270# host
5271#
5272# Related options:
5273#
5274# * scheduler/isolated_images
5275# * scheduler/restrict_isolated_hosts_to_isolated_images
5276# (list value)
5277#isolated_hosts =
5278
5279#
5280# Prevent non-isolated images from being built on isolated hosts.
5281#
5282# This option is only used by the FilterScheduler and its subclasses;
5283# if you use
5284# a different scheduler, this option has no effect. Also note that
5285# this setting
5286# only affects scheduling if the 'IsolatedHostsFilter' filter is
5287# enabled. Even
5288# then, this option doesn't affect the behavior of requests for
5289# isolated images,
5290# which will *always* be restricted to isolated hosts.
5291#
5292# Related options:
5293#
5294# * scheduler/isolated_images
5295# * scheduler/isolated_hosts
5296# (boolean value)
5297#restrict_isolated_hosts_to_isolated_images = true
5298
5299#
5300# Image property namespace for use in the host aggregate.
5301#
5302# Images and hosts can be configured so that certain images can only
5303# be scheduled
5304# to hosts in a particular aggregate. This is done with metadata
5305# values set on
5306# the host aggregate that are identified by beginning with the value
5307# of this
5308# option. If the host is part of an aggregate with such a metadata
5309# key, the image
5310# in the request spec must have the value of that metadata in its
5311# properties in
5312# order for the scheduler to consider the host as acceptable.
5313#
5314# This option is only used by the FilterScheduler and its subclasses;
5315# if you use
5316# a different scheduler, this option has no effect. Also note that
5317# this setting
5318# only affects scheduling if the
5319# 'aggregate_image_properties_isolation' filter is
5320# enabled.
5321#
5322# Possible values:
5323#
5324# * A string, where the string corresponds to an image property
5325# namespace
5326#
5327# Related options:
5328#
5329# * aggregate_image_properties_isolation_separator
5330# (string value)
5331#aggregate_image_properties_isolation_namespace = <None>
5332
5333#
5334# Separator character(s) for image property namespace and name.
5335#
5336# When using the aggregate_image_properties_isolation filter, the
5337# relevant
5338# metadata keys are prefixed with the namespace defined in the
5339# aggregate_image_properties_isolation_namespace configuration option
5340# plus a
5341# separator. This option defines the separator to be used.
5342#
5343# This option is only used by the FilterScheduler and its subclasses;
5344# if you use
5345# a different scheduler, this option has no effect. Also note that
5346# this setting
5347# only affects scheduling if the
5348# 'aggregate_image_properties_isolation' filter
5349# is enabled.
5350#
5351# Possible values:
5352#
5353# * A string, where the string corresponds to an image property
5354# namespace
5355# separator character
5356#
5357# Related options:
5358#
5359# * aggregate_image_properties_isolation_namespace
5360# (string value)
5361#aggregate_image_properties_isolation_separator = .
5362
5363
5364[glance]
5365# Configuration options for the Image service
5366
5367#
5368# From nova.conf
5369#
5370
5371#
5372# List of glance api servers endpoints available to nova.
5373#
5374# https is used for ssl-based glance api servers.
5375#
5376# NOTE: The preferred mechanism for endpoint discovery is via
5377# keystoneauth1
5378# loading options. Only use api_servers if you need multiple endpoints
5379# and are
5380# unable to use a load balancer for some reason.
5381#
5382# Possible values:
5383#
5384# * A list of any fully qualified url of the form
5385# "scheme://hostname:port[/path]"
5386# (i.e. "http://10.0.1.0:9292" or "https://my.glance.server/image").
5387# (list value)
5388#api_servers = <None>
5389{%- if compute.image is defined %}
5390api_servers = {{ compute.image.get('protocol', 'http') }}://{{ compute.image.host }}:{{ compute.image.get('port', 9292) }}
5391{% endif %}
5392
5393#
5394# Enable glance operation retries.
5395#
5396# Specifies the number of retries when uploading / downloading
5397# an image to / from glance. 0 means no retries.
5398# (integer value)
5399# Minimum value: 0
5400#num_retries = 0
5401
5402# DEPRECATED:
5403# List of url schemes that can be directly accessed.
5404#
5405# This option specifies a list of url schemes that can be downloaded
5406# directly via the direct_url. This direct_URL can be fetched from
5407# Image metadata which can be used by nova to get the
5408# image more efficiently. nova-compute could benefit from this by
5409# invoking a copy when it has access to the same file system as
5410# glance.
5411#
5412# Possible values:
5413#
5414# * [file], Empty list (default)
5415# (list value)
5416# This option is deprecated for removal since 17.0.0.
5417# Its value may be silently ignored in the future.
5418# Reason:
5419# This was originally added for the 'nova.image.download.file'
5420# FileTransfer
5421# extension which was removed in the 16.0.0 Pike release. The
5422# 'nova.image.download.modules' extension point is not maintained
5423# and there is no indication of its use in production clouds.
5424#allowed_direct_url_schemes =
5425
5426#
5427# Enable image signature verification.
5428#
5429# nova uses the image signature metadata from glance and verifies the
5430# signature
5431# of a signed image while downloading that image. If the image
5432# signature cannot
5433# be verified or if the image signature metadata is either incomplete
5434# or
5435# unavailable, then nova will not boot the image and instead will
5436# place the
5437# instance into an error state. This provides end users with stronger
5438# assurances
5439# of the integrity of the image data they are using to create servers.
5440#
5441# Related options:
5442#
5443# * The options in the `key_manager` group, as the key_manager is used
5444# for the signature validation.
5445# * Both enable_certificate_validation and
5446# default_trusted_certificate_ids
5447# below depend on this option being enabled.
5448# (boolean value)
Vladyslav Drok539e9932018-08-15 19:17:46 +03005449{%- if compute.get('image', {}).verify_glance_signatures is defined %}
5450verify_glance_signatures={{ compute.image.verify_glance_signatures }}
5451{%- elif compute.get('barbican', {}).get('enabled', False) %}
Oleh Hryhorov5d0f13a2018-05-21 17:02:54 +00005452verify_glance_signatures=true
5453{%- else %}
5454#verify_glance_signatures=false
5455{%- endif %}
5456
5457# DEPRECATED:
5458# Enable certificate validation for image signature verification.
5459#
5460# During image signature verification nova will first verify the
5461# validity of the
5462# image's signing certificate using the set of trusted certificates
5463# associated
5464# with the instance. If certificate validation fails, signature
5465# verification
5466# will not be performed and the image will be placed into an error
5467# state. This
5468# provides end users with stronger assurances that the image data is
5469# unmodified
5470# and trustworthy. If left disabled, image signature verification can
5471# still
5472# occur but the end user will not have any assurance that the signing
5473# certificate used to generate the image signature is still
5474# trustworthy.
5475#
5476# Related options:
5477#
5478# * This option only takes effect if verify_glance_signatures is
5479# enabled.
5480# * The value of default_trusted_certificate_ids may be used when this
5481# option
5482# is enabled.
5483# (boolean value)
5484# This option is deprecated for removal since 16.0.0.
5485# Its value may be silently ignored in the future.
5486# Reason:
5487# This option is intended to ease the transition for deployments
5488# leveraging
5489# image signature verification. The intended state long-term is for
5490# signature
5491# verification and certificate validation to always happen together.
5492#enable_certificate_validation = false
5493
5494#
5495# List of certificate IDs for certificates that should be trusted.
5496#
5497# May be used as a default list of trusted certificate IDs for
5498# certificate
5499# validation. The value of this option will be ignored if the user
5500# provides a
5501# list of trusted certificate IDs with an instance API request. The
5502# value of
5503# this option will be persisted with the instance data if signature
5504# verification
5505# and certificate validation are enabled and if the user did not
5506# provide an
5507# alternative list. If left empty when certificate validation is
5508# enabled the
5509# user must provide a list of trusted certificate IDs otherwise
5510# certificate
5511# validation will fail.
5512#
5513# Related options:
5514#
5515# * The value of this option may be used if both
5516# verify_glance_signatures and
5517# enable_certificate_validation are enabled.
5518# (list value)
5519#default_trusted_certificate_ids =
5520
5521# Enable or disable debug logging with glanceclient. (boolean value)
5522#debug = false
5523
5524# PEM encoded Certificate Authority to use when verifying HTTPs
5525# connections. (string value)
5526#cafile = <None>
5527
5528# PEM encoded client certificate cert file (string value)
5529#certfile = <None>
5530
5531# PEM encoded client certificate key file (string value)
5532#keyfile = <None>
5533
5534# Verify HTTPS connections. (boolean value)
5535#insecure = false
5536
5537# Timeout value for http requests (integer value)
5538#timeout = <None>
5539
5540# The default service_type for endpoint URL discovery. (string value)
5541#service_type = image
5542
5543# The default service_name for endpoint URL discovery. (string value)
5544#service_name = <None>
5545
5546# List of interfaces, in order of preference, for endpoint URL. (list
5547# value)
5548#valid_interfaces = internal,public
5549
5550# The default region_name for endpoint URL discovery. (string value)
5551#region_name = <None>
5552
5553# Always use this endpoint URL for requests for this client. NOTE: The
5554# unversioned endpoint should be specified here; to request a
5555# particular API version, use the `version`, `min-version`, and/or
5556# `max-version` options. (string value)
5557#endpoint_override = <None>
5558
5559
5560[guestfs]
5561#
5562# libguestfs is a set of tools for accessing and modifying virtual
5563# machine (VM) disk images. You can use this for viewing and editing
5564# files inside guests, scripting changes to VMs, monitoring disk
5565# used/free statistics, creating guests, P2V, V2V, performing backups,
5566# cloning VMs, building VMs, formatting disks and resizing disks.
5567
5568#
5569# From nova.conf
5570#
5571
5572#
5573# Enable/disables guestfs logging.
5574#
5575# This configures guestfs to debug messages and push them to OpenStack
5576# logging system. When set to True, it traces libguestfs API calls and
5577# enable verbose debug messages. In order to use the above feature,
5578# "libguestfs" package must be installed.
5579#
5580# Related options:
5581# Since libguestfs access and modifies VM's managed by libvirt, below
5582# options
5583# should be set to give access to those VM's.
5584# * libvirt.inject_key
5585# * libvirt.inject_partition
5586# * libvirt.inject_password
5587# (boolean value)
5588#debug = false
5589
5590
5591[hyperv]
5592#
5593# The hyperv feature allows you to configure the Hyper-V hypervisor
5594# driver to be used within an OpenStack deployment.
5595
5596#
5597# From nova.conf
5598#
5599
5600#
5601# Dynamic memory ratio
5602#
5603# Enables dynamic memory allocation (ballooning) when set to a value
5604# greater than 1. The value expresses the ratio between the total RAM
5605# assigned to an instance and its startup RAM amount. For example a
5606# ratio of 2.0 for an instance with 1024MB of RAM implies 512MB of
5607# RAM allocated at startup.
5608#
5609# Possible values:
5610#
5611# * 1.0: Disables dynamic memory allocation (Default).
5612# * Float values greater than 1.0: Enables allocation of total implied
5613# RAM divided by this value for startup.
5614# (floating point value)
5615#dynamic_memory_ratio = 1.0
5616
5617#
5618# Enable instance metrics collection
5619#
5620# Enables metrics collections for an instance by using Hyper-V's
5621# metric APIs. Collected data can be retrieved by other apps and
5622# services, e.g.: Ceilometer.
5623# (boolean value)
5624#enable_instance_metrics_collection = false
5625
5626#
5627# Instances path share
5628#
5629# The name of a Windows share mapped to the "instances_path" dir
5630# and used by the resize feature to copy files to the target host.
5631# If left blank, an administrative share (hidden network share) will
5632# be used, looking for the same "instances_path" used locally.
5633#
5634# Possible values:
5635#
5636# * "": An administrative share will be used (Default).
5637# * Name of a Windows share.
5638#
5639# Related options:
5640#
5641# * "instances_path": The directory which will be used if this option
5642# here is left blank.
5643# (string value)
5644#instances_path_share =
5645
5646#
5647# Limit CPU features
5648#
5649# This flag is needed to support live migration to hosts with
5650# different CPU features and checked during instance creation
5651# in order to limit the CPU features used by the instance.
5652# (boolean value)
5653#limit_cpu_features = false
5654
5655#
5656# Mounted disk query retry count
5657#
5658# The number of times to retry checking for a mounted disk.
5659# The query runs until the device can be found or the retry
5660# count is reached.
5661#
5662# Possible values:
5663#
5664# * Positive integer values. Values greater than 1 is recommended
5665# (Default: 10).
5666#
5667# Related options:
5668#
5669# * Time interval between disk mount retries is declared with
5670# "mounted_disk_query_retry_interval" option.
5671# (integer value)
5672# Minimum value: 0
5673#mounted_disk_query_retry_count = 10
5674
5675#
5676# Mounted disk query retry interval
5677#
5678# Interval between checks for a mounted disk, in seconds.
5679#
5680# Possible values:
5681#
5682# * Time in seconds (Default: 5).
5683#
5684# Related options:
5685#
5686# * This option is meaningful when the mounted_disk_query_retry_count
5687# is greater than 1.
5688# * The retry loop runs with mounted_disk_query_retry_count and
5689# mounted_disk_query_retry_interval configuration options.
5690# (integer value)
5691# Minimum value: 0
5692#mounted_disk_query_retry_interval = 5
5693
5694#
5695# Power state check timeframe
5696#
5697# The timeframe to be checked for instance power state changes.
5698# This option is used to fetch the state of the instance from Hyper-V
5699# through the WMI interface, within the specified timeframe.
5700#
5701# Possible values:
5702#
5703# * Timeframe in seconds (Default: 60).
5704# (integer value)
5705# Minimum value: 0
5706#power_state_check_timeframe = 60
5707
5708#
5709# Power state event polling interval
5710#
5711# Instance power state change event polling frequency. Sets the
5712# listener interval for power state events to the given value.
5713# This option enhances the internal lifecycle notifications of
5714# instances that reboot themselves. It is unlikely that an operator
5715# has to change this value.
5716#
5717# Possible values:
5718#
5719# * Time in seconds (Default: 2).
5720# (integer value)
5721# Minimum value: 0
5722#power_state_event_polling_interval = 2
5723
5724#
5725# qemu-img command
5726#
5727# qemu-img is required for some of the image related operations
5728# like converting between different image types. You can get it
5729# from here: (http://qemu.weilnetz.de/) or you can install the
5730# Cloudbase OpenStack Hyper-V Compute Driver
5731# (https://cloudbase.it/openstack-hyperv-driver/) which automatically
5732# sets the proper path for this config option. You can either give the
5733# full path of qemu-img.exe or set its path in the PATH environment
5734# variable and leave this option to the default value.
5735#
5736# Possible values:
5737#
5738# * Name of the qemu-img executable, in case it is in the same
5739# directory as the nova-compute service or its path is in the
5740# PATH environment variable (Default).
5741# * Path of qemu-img command (DRIVELETTER:\PATH\TO\QEMU-IMG\COMMAND).
5742#
5743# Related options:
5744#
5745# * If the config_drive_cdrom option is False, qemu-img will be used
5746# to
5747# convert the ISO to a VHD, otherwise the configuration drive will
5748# remain an ISO. To use configuration drive with Hyper-V, you must
5749# set the mkisofs_cmd value to the full path to an mkisofs.exe
5750# installation.
5751# (string value)
5752#qemu_img_cmd = qemu-img.exe
5753
5754#
5755# External virtual switch name
5756#
5757# The Hyper-V Virtual Switch is a software-based layer-2 Ethernet
5758# network switch that is available with the installation of the
5759# Hyper-V server role. The switch includes programmatically managed
5760# and extensible capabilities to connect virtual machines to both
5761# virtual networks and the physical network. In addition, Hyper-V
5762# Virtual Switch provides policy enforcement for security, isolation,
5763# and service levels. The vSwitch represented by this config option
5764# must be an external one (not internal or private).
5765#
5766# Possible values:
5767#
5768# * If not provided, the first of a list of available vswitches
5769# is used. This list is queried using WQL.
5770# * Virtual switch name.
5771# (string value)
5772#vswitch_name = <None>
5773
5774#
5775# Wait soft reboot seconds
5776#
5777# Number of seconds to wait for instance to shut down after soft
5778# reboot request is made. We fall back to hard reboot if instance
5779# does not shutdown within this window.
5780#
5781# Possible values:
5782#
5783# * Time in seconds (Default: 60).
5784# (integer value)
5785# Minimum value: 0
5786#wait_soft_reboot_seconds = 60
5787
5788#
5789# Configuration drive cdrom
5790#
5791# OpenStack can be configured to write instance metadata to
5792# a configuration drive, which is then attached to the
5793# instance before it boots. The configuration drive can be
5794# attached as a disk drive (default) or as a CD drive.
5795#
5796# Possible values:
5797#
5798# * True: Attach the configuration drive image as a CD drive.
5799# * False: Attach the configuration drive image as a disk drive
5800# (Default).
5801#
5802# Related options:
5803#
5804# * This option is meaningful with force_config_drive option set to
5805# 'True'
5806# or when the REST API call to create an instance will have
5807# '--config-drive=True' flag.
5808# * config_drive_format option must be set to 'iso9660' in order to
5809# use
5810# CD drive as the configuration drive image.
5811# * To use configuration drive with Hyper-V, you must set the
5812# mkisofs_cmd value to the full path to an mkisofs.exe installation.
5813# Additionally, you must set the qemu_img_cmd value to the full path
5814# to an qemu-img command installation.
5815# * You can configure the Compute service to always create a
5816# configuration
5817# drive by setting the force_config_drive option to 'True'.
5818# (boolean value)
5819#config_drive_cdrom = false
5820config_drive_cdrom = {{ compute.get('config_drive', {}).get('cdrom', False)|lower }}
5821
5822#
5823# Configuration drive inject password
5824#
5825# Enables setting the admin password in the configuration drive image.
5826#
5827# Related options:
5828#
5829# * This option is meaningful when used with other options that enable
5830# configuration drive usage with Hyper-V, such as
5831# force_config_drive.
5832# * Currently, the only accepted config_drive_format is 'iso9660'.
5833# (boolean value)
5834#config_drive_inject_password = false
5835config_drive_inject_password = {{ compute.get('config_drive', {}).get('inject_password', False)|lower }}
5836
5837#
5838# Volume attach retry count
5839#
5840# The number of times to retry attaching a volume. Volume attachment
5841# is retried until success or the given retry count is reached.
5842#
5843# Possible values:
5844#
5845# * Positive integer values (Default: 10).
5846#
5847# Related options:
5848#
5849# * Time interval between attachment attempts is declared with
5850# volume_attach_retry_interval option.
5851# (integer value)
5852# Minimum value: 0
5853#volume_attach_retry_count = 10
5854
5855#
5856# Volume attach retry interval
5857#
5858# Interval between volume attachment attempts, in seconds.
5859#
5860# Possible values:
5861#
5862# * Time in seconds (Default: 5).
5863#
5864# Related options:
5865#
5866# * This options is meaningful when volume_attach_retry_count
5867# is greater than 1.
5868# * The retry loop runs with volume_attach_retry_count and
5869# volume_attach_retry_interval configuration options.
5870# (integer value)
5871# Minimum value: 0
5872#volume_attach_retry_interval = 5
5873
5874#
5875# Enable RemoteFX feature
5876#
5877# This requires at least one DirectX 11 capable graphics adapter for
5878# Windows / Hyper-V Server 2012 R2 or newer and RDS-Virtualization
5879# feature has to be enabled.
5880#
5881# Instances with RemoteFX can be requested with the following flavor
5882# extra specs:
5883#
5884# **os:resolution**. Guest VM screen resolution size. Acceptable
5885# values::
5886#
5887# 1024x768, 1280x1024, 1600x1200, 1920x1200, 2560x1600, 3840x2160
5888#
5889# ``3840x2160`` is only available on Windows / Hyper-V Server 2016.
5890#
5891# **os:monitors**. Guest VM number of monitors. Acceptable values::
5892#
5893# [1, 4] - Windows / Hyper-V Server 2012 R2
5894# [1, 8] - Windows / Hyper-V Server 2016
5895#
5896# **os:vram**. Guest VM VRAM amount. Only available on
5897# Windows / Hyper-V Server 2016. Acceptable values::
5898#
5899# 64, 128, 256, 512, 1024
5900# (boolean value)
5901#enable_remotefx = false
5902
5903#
5904# Use multipath connections when attaching iSCSI or FC disks.
5905#
5906# This requires the Multipath IO Windows feature to be enabled. MPIO
5907# must be
5908# configured to claim such devices.
5909# (boolean value)
5910#use_multipath_io = false
5911
5912#
5913# List of iSCSI initiators that will be used for estabilishing iSCSI
5914# sessions.
5915#
5916# If none are specified, the Microsoft iSCSI initiator service will
5917# choose the
5918# initiator.
5919# (list value)
5920#iscsi_initiator_list =
5921
5922{% if compute.ironic is defined -%}
5923[ironic]
5924#
5925# Configuration options for Ironic driver (Bare Metal).
5926# If using the Ironic driver following options must be set:
5927# * auth_type
5928# * auth_url
5929# * project_name
5930# * username
5931# * password
5932# * project_domain_id or project_domain_name
5933# * user_domain_id or user_domain_name
5934
5935#
5936# From nova.conf
5937#
5938
5939# DEPRECATED: URL override for the Ironic API endpoint. (uri value)
5940# This option is deprecated for removal.
5941# Its value may be silently ignored in the future.
5942# Reason: Endpoint lookup uses the service catalog via common
5943# keystoneauth1 Adapter configuration options. In the current release,
5944# api_endpoint will override this behavior, but will be ignored and/or
5945# removed in a future release. To achieve the same result, use the
5946# endpoint_override option instead.
5947#api_endpoint = http://ironic.example.org:6385/
5948api_endpoint={{ compute.ironic.get('protocol', 'http') }}://{{ compute.ironic.host }}:{{ compute.ironic.port }}
5949
5950#
5951# The number of times to retry when a request conflicts.
5952# If set to 0, only try once, no retries.
5953#
5954# Related options:
5955#
5956# * api_retry_interval
5957# (integer value)
5958# Minimum value: 0
5959#api_max_retries = 60
5960
5961#
5962# The number of seconds to wait before retrying the request.
5963#
5964# Related options:
5965#
5966# * api_max_retries
5967# (integer value)
5968# Minimum value: 0
5969#api_retry_interval = 2
5970
5971# Timeout (seconds) to wait for node serial console state changed. Set
5972# to 0 to disable timeout. (integer value)
5973# Minimum value: 0
5974#serial_console_state_timeout = 10
5975
5976# PEM encoded Certificate Authority to use when verifying HTTPs
5977# connections. (string value)
5978#cafile = <None>
5979{%- if compute.ironic.get('protocol', 'http') == 'https' %}
5980cafile={{ compute.identity.get('cacert_file', compute.cacert_file) }}
5981{%- endif %}
5982
5983# PEM encoded client certificate cert file (string value)
5984#certfile = <None>
5985
5986# PEM encoded client certificate key file (string value)
5987#keyfile = <None>
5988
5989# Verify HTTPS connections. (boolean value)
5990#insecure = false
5991
5992# Timeout value for http requests (integer value)
5993#timeout = <None>
5994
5995# Authentication type to load (string value)
5996# Deprecated group/name - [ironic]/auth_plugin
5997#auth_type = <None>
5998auth_type={{ compute.ironic.auth_type }}
5999
6000# Config Section from which to load plugin specific options (string
6001# value)
6002#auth_section = <None>
6003
6004# Authentication URL (string value)
6005#auth_url = <None>
6006auth_url={{ compute.identity.get('protocol', 'http') }}://{{ compute.identity.host }}:{{ compute.identity.port }}/v3
6007
6008# Scope for system operations (string value)
6009#system_scope = <None>
6010
6011# Domain ID to scope to (string value)
6012#domain_id = <None>
6013
6014# Domain name to scope to (string value)
6015#domain_name = <None>
6016
6017# Project ID to scope to (string value)
6018#project_id = <None>
6019
6020# Project name to scope to (string value)
6021#project_name = <None>
6022project_name={{ compute.identity.tenant }}
6023
6024# Domain ID containing project (string value)
6025#project_domain_id = <None>
6026
6027# Domain name containing project (string value)
6028#project_domain_name = <None>
6029project_domain_name={{ compute.ironic.project_domain_name }}
6030
6031# Trust ID (string value)
6032#trust_id = <None>
6033
6034# User ID (string value)
6035#user_id = <None>
6036
6037# Username (string value)
6038# Deprecated group/name - [ironic]/user_name
6039#username = <None>
6040username={{ compute.ironic.user }}
6041
6042# User's domain id (string value)
6043#user_domain_id = <None>
6044
6045# User's domain name (string value)
6046#user_domain_name = <None>
6047user_domain_name={{ compute.ironic.user_domain_name }}
6048
6049
6050# User's password (string value)
6051#password = <None>
6052password={{ compute.ironic.password }}
6053# The default service_type for endpoint URL discovery. (string value)
6054#service_type = baremetal
6055
6056# The default service_name for endpoint URL discovery. (string value)
6057#service_name = <None>
6058
6059# List of interfaces, in order of preference, for endpoint URL. (list
6060# value)
6061#valid_interfaces = internal,public
6062
6063# The default region_name for endpoint URL discovery. (string value)
6064#region_name = <None>
6065
6066# Always use this endpoint URL for requests for this client. NOTE: The
6067# unversioned endpoint should be specified here; to request a
6068# particular API version, use the `version`, `min-version`, and/or
6069# `max-version` options. (string value)
6070# Deprecated group/name - [ironic]/api_endpoint
6071#endpoint_override = <None>
6072{%- endif %}
6073
6074
6075[key_manager]
6076
6077#
6078# From nova.conf
6079#
6080
6081#
6082# Fixed key returned by key manager, specified in hex.
6083#
6084# Possible values:
6085#
6086# * Empty string or a key in hex value
6087# (string value)
6088#fixed_key = <None>
6089{%- if compute.get('barbican', {}).get('enabled', False) %}
6090api_class=castellan.key_manager.barbican_key_manager.BarbicanKeyManager
6091{%- endif %}
6092
6093# Specify the key manager implementation. Options are "barbican" and
6094# "vault". Default is "barbican". Will support the values earlier
6095# set using [key_manager]/api_class for some time. (string value)
6096# Deprecated group/name - [key_manager]/api_class
6097#backend = barbican
6098
6099# The type of authentication credential to create. Possible values are
6100# 'token', 'password', 'keystone_token', and 'keystone_password'.
6101# Required if no context is passed to the credential factory. (string
6102# value)
6103#auth_type = <None>
6104
6105# Token for authentication. Required for 'token' and 'keystone_token'
6106# auth_type if no context is passed to the credential factory. (string
6107# value)
6108#token = <None>
6109
6110# Username for authentication. Required for 'password' auth_type.
6111# Optional for the 'keystone_password' auth_type. (string value)
6112#username = <None>
6113
6114# Password for authentication. Required for 'password' and
6115# 'keystone_password' auth_type. (string value)
6116#password = <None>
6117
6118# Use this endpoint to connect to Keystone. (string value)
6119#auth_url = <None>
6120
6121# User ID for authentication. Optional for 'keystone_token' and
6122# 'keystone_password' auth_type. (string value)
6123#user_id = <None>
6124
6125# User's domain ID for authentication. Optional for 'keystone_token'
6126# and 'keystone_password' auth_type. (string value)
6127#user_domain_id = <None>
6128
6129# User's domain name for authentication. Optional for 'keystone_token'
6130# and 'keystone_password' auth_type. (string value)
6131#user_domain_name = <None>
6132
6133# Trust ID for trust scoping. Optional for 'keystone_token' and
6134# 'keystone_password' auth_type. (string value)
6135#trust_id = <None>
6136
6137# Domain ID for domain scoping. Optional for 'keystone_token' and
6138# 'keystone_password' auth_type. (string value)
6139#domain_id = <None>
6140
6141# Domain name for domain scoping. Optional for 'keystone_token' and
6142# 'keystone_password' auth_type. (string value)
6143#domain_name = <None>
6144
6145# Project ID for project scoping. Optional for 'keystone_token' and
6146# 'keystone_password' auth_type. (string value)
6147#project_id = <None>
6148
6149# Project name for project scoping. Optional for 'keystone_token' and
6150# 'keystone_password' auth_type. (string value)
6151#project_name = <None>
6152
6153# Project's domain ID for project. Optional for 'keystone_token' and
6154# 'keystone_password' auth_type. (string value)
6155#project_domain_id = <None>
6156
6157# Project's domain name for project. Optional for 'keystone_token' and
6158# 'keystone_password' auth_type. (string value)
6159#project_domain_name = <None>
6160
6161# Allow fetching a new token if the current one is going to expire.
6162# Optional for 'keystone_token' and 'keystone_password' auth_type.
6163# (boolean value)
6164#reauthenticate = true
6165
6166
6167[keystone]
6168# Configuration options for the identity service
6169
6170#
6171# From nova.conf
6172#
6173
6174# PEM encoded Certificate Authority to use when verifying HTTPs
6175# connections. (string value)
6176#cafile = <None>
6177
6178# PEM encoded client certificate cert file (string value)
6179#certfile = <None>
6180
6181# PEM encoded client certificate key file (string value)
6182#keyfile = <None>
6183
6184# Verify HTTPS connections. (boolean value)
6185#insecure = false
6186
6187# Timeout value for http requests (integer value)
6188#timeout = <None>
6189
6190# The default service_type for endpoint URL discovery. (string value)
6191#service_type = identity
6192
6193# The default service_name for endpoint URL discovery. (string value)
6194#service_name = <None>
6195
6196# List of interfaces, in order of preference, for endpoint URL. (list
6197# value)
6198#valid_interfaces = internal,public
6199
6200# The default region_name for endpoint URL discovery. (string value)
6201#region_name = <None>
6202
6203# Always use this endpoint URL for requests for this client. NOTE: The
6204# unversioned endpoint should be specified here; to request a
6205# particular API version, use the `version`, `min-version`, and/or
6206# `max-version` options. (string value)
6207#endpoint_override = <None>
6208
6209
6210[libvirt]
6211#
6212# Libvirt options allows cloud administrator to configure related
6213# libvirt hypervisor driver to be used within an OpenStack deployment.
6214#
6215# Almost all of the libvirt config options are influence by
6216# ``virt_type`` config
6217# which describes the virtualization type (or so called domain type)
6218# libvirt
6219# should use for specific features such as live migration, snapshot.
6220
6221#
6222# From nova.conf
6223#
Oleh Hryhorov5d0f13a2018-05-21 17:02:54 +00006224{%- if compute.libvirt.virt_type is defined %}
6225virt_type = {{ compute.libvirt.virt_type }}
6226{%- else %}
6227virt_type = kvm
6228{%- endif%}
6229
6230inject_partition={{ compute.libvirt.inject_partition }}
6231{%- if compute.libvirt.get('inject_partition', '-2')|string == '-2' %}
6232inject_password=False
6233{%- else %}
6234inject_password={{ compute.libvirt.inject_password }}
6235{%- endif %}
6236
6237disk_cachemodes="{{ compute.get('disk_cachemodes', 'network=writeback,block=none') }}"
6238block_migration_flag=VIR_MIGRATE_UNDEFINE_SOURCE,VIR_MIGRATE_PEER2PEER,VIR_MIGRATE_LIVE,VIR_MIGRATE_NON_SHARED_INC
6239live_migration_flag=VIR_MIGRATE_UNDEFINE_SOURCE,VIR_MIGRATE_PEER2PEER,VIR_MIGRATE_LIVE,VIR_MIGRATE_PERSIST_DEST
6240inject_key=True
6241vif_driver=nova.virt.libvirt.vif.LibvirtGenericVIFDriver
6242
6243{%- if compute.get('ceph', {}).get('ephemeral', False) %}
6244images_type=rbd
6245images_rbd_pool={{ compute.ceph.rbd_pool }}
6246images_rbd_ceph_conf=/etc/ceph/ceph.conf
6247rbd_user={{ compute.ceph.rbd_user }}
6248rbd_secret_uuid={{ compute.ceph.secret_uuid }}
6249inject_password=false
6250inject_key=false
6251{%- elif compute.get('lvm', {}).get('ephemeral', False) %}
6252images_type=lvm
6253images_volume_group={{ compute.lvm.images_volume_group }}
6254{%- if compute.lvm.volume_clear is defined %}
6255volume_clear={{ compute.lvm.volume_clear }}
6256{%- endif %}
6257{%- if compute.lvm.volume_clear_size is defined %}
6258volume_clear_size={{ compute.lvm.volume_clear_size }}
6259{%- endif %}
6260{%- endif %}
6261
Oleksandr Shyshko1c020d12018-05-24 12:47:08 +03006262{%- if compute.libvirt.tls.get('enabled', False) %}
6263live_migration_scheme="tls"
6264{%- else %}
Oleh Hryhorov5d0f13a2018-05-21 17:02:54 +00006265{%- if compute.get('libvirt', {}).uri is defined %}
6266connection_uri={{ compute.libvirt.uri }}
6267{%- endif %}
Oleksandr Shyshko1c020d12018-05-24 12:47:08 +03006268{%- endif %}
Oleh Hryhorov5d0f13a2018-05-21 17:02:54 +00006269
6270#
6271# The ID of the image to boot from to rescue data from a corrupted
6272# instance.
6273#
6274# If the rescue REST API operation doesn't provide an ID of an image
6275# to
6276# use, the image which is referenced by this ID is used. If this
6277# option is not set, the image from the instance is used.
6278#
6279# Possible values:
6280#
6281# * An ID of an image or nothing. If it points to an *Amazon Machine
6282# Image* (AMI), consider to set the config options
6283# ``rescue_kernel_id``
6284# and ``rescue_ramdisk_id`` too. If nothing is set, the image of the
6285# instance
6286# is used.
6287#
6288# Related options:
6289#
6290# * ``rescue_kernel_id``: If the chosen rescue image allows the
6291# separate
6292# definition of its kernel disk, the value of this option is used,
6293# if specified. This is the case when *Amazon*'s AMI/AKI/ARI image
6294# format is used for the rescue image.
6295# * ``rescue_ramdisk_id``: If the chosen rescue image allows the
6296# separate
6297# definition of its RAM disk, the value of this option is used if,
6298# specified. This is the case when *Amazon*'s AMI/AKI/ARI image
6299# format is used for the rescue image.
6300# (string value)
6301#rescue_image_id = <None>
6302
6303#
6304# The ID of the kernel (AKI) image to use with the rescue image.
6305#
6306# If the chosen rescue image allows the separate definition of its
6307# kernel
6308# disk, the value of this option is used, if specified. This is the
6309# case
6310# when *Amazon*'s AMI/AKI/ARI image format is used for the rescue
6311# image.
6312#
6313# Possible values:
6314#
6315# * An ID of an kernel image or nothing. If nothing is specified, the
6316# kernel
6317# disk from the instance is used if it was launched with one.
6318#
6319# Related options:
6320#
6321# * ``rescue_image_id``: If that option points to an image in
6322# *Amazon*'s
6323# AMI/AKI/ARI image format, it's useful to use ``rescue_kernel_id``
6324# too.
6325# (string value)
6326#rescue_kernel_id = <None>
6327
6328#
6329# The ID of the RAM disk (ARI) image to use with the rescue image.
6330#
6331# If the chosen rescue image allows the separate definition of its RAM
6332# disk, the value of this option is used, if specified. This is the
6333# case
6334# when *Amazon*'s AMI/AKI/ARI image format is used for the rescue
6335# image.
6336#
6337# Possible values:
6338#
6339# * An ID of a RAM disk image or nothing. If nothing is specified, the
6340# RAM
6341# disk from the instance is used if it was launched with one.
6342#
6343# Related options:
6344#
6345# * ``rescue_image_id``: If that option points to an image in
6346# *Amazon*'s
6347# AMI/AKI/ARI image format, it's useful to use ``rescue_ramdisk_id``
6348# too.
6349# (string value)
6350#rescue_ramdisk_id = <None>
6351
6352#
6353# Describes the virtualization type (or so called domain type) libvirt
6354# should
6355# use.
6356#
6357# The choice of this type must match the underlying virtualization
6358# strategy
6359# you have chosen for this host.
6360#
6361# Possible values:
6362#
6363# * See the predefined set of case-sensitive values.
6364#
6365# Related options:
6366#
6367# * ``connection_uri``: depends on this
6368# * ``disk_prefix``: depends on this
6369# * ``cpu_mode``: depends on this
6370# * ``cpu_model``: depends on this
6371# (string value)
6372# Possible values:
6373# kvm - <No description provided>
6374# lxc - <No description provided>
6375# qemu - <No description provided>
6376# uml - <No description provided>
6377# xen - <No description provided>
6378# parallels - <No description provided>
6379#virt_type = kvm
6380
6381#
6382# Overrides the default libvirt URI of the chosen virtualization type.
6383#
6384# If set, Nova will use this URI to connect to libvirt.
6385#
6386# Possible values:
6387#
6388# * An URI like ``qemu:///system`` or ``xen+ssh://oirase/`` for
6389# example.
6390# This is only necessary if the URI differs to the commonly known
6391# URIs
6392# for the chosen virtualization type.
6393#
6394# Related options:
6395#
6396# * ``virt_type``: Influences what is used as default value here.
6397# (string value)
6398#connection_uri =
6399
6400#
6401# Algorithm used to hash the injected password.
6402# Note that it must be supported by libc on the compute host
6403# _and_ by libc inside *any guest image* that will be booted by this
6404# compute
6405# host whith requested password injection.
6406# In case the specified algorithm is not supported by libc on the
6407# compute host,
6408# a fallback to DES algorithm will be performed.
6409#
6410# Related options:
6411#
6412# * ``inject_password``
6413# * ``inject_partition``
6414# (string value)
6415# Possible values:
6416# SHA-512 - <No description provided>
6417# SHA-256 - <No description provided>
6418# MD5 - <No description provided>
6419#inject_password_algorithm = MD5
6420
6421#
6422# Allow the injection of an admin password for instance only at
6423# ``create`` and
6424# ``rebuild`` process.
6425#
6426# There is no agent needed within the image to do this. If
6427# *libguestfs* is
6428# available on the host, it will be used. Otherwise *nbd* is used. The
6429# file
6430# system of the image will be mounted and the admin password, which is
6431# provided
6432# in the REST API call will be injected as password for the root user.
6433# If no
6434# root user is available, the instance won't be launched and an error
6435# is thrown.
6436# Be aware that the injection is *not* possible when the instance gets
6437# launched
6438# from a volume.
6439#
6440# Possible values:
6441#
6442# * True: Allows the injection.
6443# * False (default): Disallows the injection. Any via the REST API
6444# provided
6445# admin password will be silently ignored.
6446#
6447# Related options:
6448#
6449# * ``inject_partition``: That option will decide about the discovery
6450# and usage
6451# of the file system. It also can disable the injection at all.
6452# (boolean value)
6453#inject_password = false
6454
6455#
6456# Allow the injection of an SSH key at boot time.
6457#
6458# There is no agent needed within the image to do this. If
6459# *libguestfs* is
6460# available on the host, it will be used. Otherwise *nbd* is used. The
6461# file
6462# system of the image will be mounted and the SSH key, which is
6463# provided
6464# in the REST API call will be injected as SSH key for the root user
6465# and
6466# appended to the ``authorized_keys`` of that user. The SELinux
6467# context will
6468# be set if necessary. Be aware that the injection is *not* possible
6469# when the
6470# instance gets launched from a volume.
6471#
6472# This config option will enable directly modifying the instance disk
6473# and does
6474# not affect what cloud-init may do using data from config_drive
6475# option or the
6476# metadata service.
6477#
6478# Related options:
6479#
6480# * ``inject_partition``: That option will decide about the discovery
6481# and usage
6482# of the file system. It also can disable the injection at all.
6483# (boolean value)
6484#inject_key = false
6485
6486#
6487# Determines the way how the file system is chosen to inject data into
6488# it.
6489#
6490# *libguestfs* will be used a first solution to inject data. If that's
6491# not
6492# available on the host, the image will be locally mounted on the host
6493# as a
6494# fallback solution. If libguestfs is not able to determine the root
6495# partition
6496# (because there are more or less than one root partition) or cannot
6497# mount the
6498# file system it will result in an error and the instance won't be
6499# boot.
6500#
6501# Possible values:
6502#
6503# * -2 => disable the injection of data.
6504# * -1 => find the root partition with the file system to mount with
6505# libguestfs
6506# * 0 => The image is not partitioned
6507# * >0 => The number of the partition to use for the injection
6508#
6509# Related options:
6510#
6511# * ``inject_key``: If this option allows the injection of a SSH key
6512# it depends
6513# on value greater or equal to -1 for ``inject_partition``.
6514# * ``inject_password``: If this option allows the injection of an
6515# admin password
6516# it depends on value greater or equal to -1 for
6517# ``inject_partition``.
6518# * ``guestfs`` You can enable the debug log level of libguestfs with
6519# this
6520# config option. A more verbose output will help in debugging
6521# issues.
6522# * ``virt_type``: If you use ``lxc`` as virt_type it will be treated
6523# as a
6524# single partition image
6525# (integer value)
6526# Minimum value: -2
6527#inject_partition = -2
6528
6529# DEPRECATED:
6530# Enable a mouse cursor within a graphical VNC or SPICE sessions.
6531#
6532# This will only be taken into account if the VM is fully virtualized
6533# and VNC
6534# and/or SPICE is enabled. If the node doesn't support a graphical
6535# framebuffer,
6536# then it is valid to set this to False.
6537#
6538# Related options:
6539# * ``[vnc]enabled``: If VNC is enabled, ``use_usb_tablet`` will have
6540# an effect.
6541# * ``[spice]enabled`` + ``[spice].agent_enabled``: If SPICE is
6542# enabled and the
6543# spice agent is disabled, the config value of ``use_usb_tablet``
6544# will have
6545# an effect.
6546# (boolean value)
6547# This option is deprecated for removal since 14.0.0.
6548# Its value may be silently ignored in the future.
6549# Reason: This option is being replaced by the 'pointer_model' option.
6550#use_usb_tablet = true
6551
6552#
6553# The IP address or hostname to be used as the target for live
6554# migration traffic.
6555#
6556# If this option is set to None, the hostname of the migration target
6557# compute
6558# node will be used.
6559#
6560# This option is useful in environments where the live-migration
6561# traffic can
6562# impact the network plane significantly. A separate network for live-
6563# migration
6564# traffic can then use this config option and avoids the impact on the
6565# management network.
6566#
6567# Possible values:
6568#
6569# * A valid IP address or hostname, else None.
6570#
6571# Related options:
6572#
6573# * ``live_migration_tunnelled``: The live_migration_inbound_addr
6574# value is
6575# ignored if tunneling is enabled.
6576# (string value)
6577#live_migration_inbound_addr = <None>
6578{%- if compute.libvirt.migration_inbound_addr is defined %}
6579live_migration_inbound_addr = {{ compute.libvirt.migration_inbound_addr }}
6580{%- endif %}
6581
6582# DEPRECATED:
6583# Live migration target URI to use.
6584#
6585# Override the default libvirt live migration target URI (which is
6586# dependent
6587# on virt_type). Any included "%s" is replaced with the migration
6588# target
6589# hostname.
6590#
6591# If this option is set to None (which is the default), Nova will
6592# automatically
6593# generate the `live_migration_uri` value based on only 4 supported
6594# `virt_type`
6595# in following list:
6596#
6597# * 'kvm': 'qemu+tcp://%s/system'
6598# * 'qemu': 'qemu+tcp://%s/system'
6599# * 'xen': 'xenmigr://%s/system'
6600# * 'parallels': 'parallels+tcp://%s/system'
6601#
6602# Related options:
6603#
6604# * ``live_migration_inbound_addr``: If
6605# ``live_migration_inbound_addr`` value
6606# is not None and ``live_migration_tunnelled`` is False, the
6607# ip/hostname
6608# address of target compute node is used instead of
6609# ``live_migration_uri`` as
6610# the uri for live migration.
6611# * ``live_migration_scheme``: If ``live_migration_uri`` is not set,
6612# the scheme
6613# used for live migration is taken from ``live_migration_scheme``
6614# instead.
6615# (string value)
6616# This option is deprecated for removal since 15.0.0.
6617# Its value may be silently ignored in the future.
6618# Reason:
6619# live_migration_uri is deprecated for removal in favor of two other
6620# options that
6621# allow to change live migration scheme and target URI:
6622# ``live_migration_scheme``
6623# and ``live_migration_inbound_addr`` respectively.
6624#live_migration_uri = <None>
6625
6626#
6627# URI scheme used for live migration.
6628#
6629# Override the default libvirt live migration scheme (which is
6630# dependent on
6631# virt_type). If this option is set to None, nova will automatically
6632# choose a
6633# sensible default based on the hypervisor. It is not recommended that
6634# you change
6635# this unless you are very sure that hypervisor supports a particular
6636# scheme.
6637#
6638# Related options:
6639#
6640# * ``virt_type``: This option is meaningful only when ``virt_type``
6641# is set to
6642# `kvm` or `qemu`.
6643# * ``live_migration_uri``: If ``live_migration_uri`` value is not
6644# None, the
6645# scheme used for live migration is taken from
6646# ``live_migration_uri`` instead.
6647# (string value)
6648#live_migration_scheme = <None>
6649
6650#
6651# Enable tunnelled migration.
6652#
6653# This option enables the tunnelled migration feature, where migration
6654# data is
6655# transported over the libvirtd connection. If enabled, we use the
6656# VIR_MIGRATE_TUNNELLED migration flag, avoiding the need to configure
6657# the network to allow direct hypervisor to hypervisor communication.
6658# If False, use the native transport. If not set, Nova will choose a
6659# sensible default based on, for example the availability of native
6660# encryption support in the hypervisor. Enabling this option will
6661# definitely
6662# impact performance massively.
6663#
6664# Note that this option is NOT compatible with use of block migration.
6665#
6666# Related options:
6667#
6668# * ``live_migration_inbound_addr``: The live_migration_inbound_addr
6669# value is
6670# ignored if tunneling is enabled.
6671# (boolean value)
6672#live_migration_tunnelled = false
6673{%- if compute.libvirt.live_migration_tunnelled is defined %}
6674live_migration_tunnelled = {{ compute.libvirt.live_migration_tunnelled }}
6675{%- endif %}
6676
6677#
6678# Maximum bandwidth(in MiB/s) to be used during migration.
6679#
6680# If set to 0, the hypervisor will choose a suitable default. Some
6681# hypervisors
6682# do not support this feature and will return an error if bandwidth is
6683# not 0.
6684# Please refer to the libvirt documentation for further details.
6685# (integer value)
6686#live_migration_bandwidth = 0
6687
6688#
6689# Maximum permitted downtime, in milliseconds, for live migration
6690# switchover.
6691#
6692# Will be rounded up to a minimum of 100ms. You can increase this
6693# value
6694# if you want to allow live-migrations to complete faster, or avoid
6695# live-migration timeout errors by allowing the guest to be paused for
6696# longer during the live-migration switch over.
6697#
6698# Related options:
6699#
6700# * live_migration_completion_timeout
6701# (integer value)
6702# Minimum value: 100
6703#live_migration_downtime = 500
6704
6705#
6706# Number of incremental steps to reach max downtime value.
6707#
6708# Will be rounded up to a minimum of 3 steps.
6709# (integer value)
6710# Minimum value: 3
6711#live_migration_downtime_steps = 10
6712
6713#
6714# Time to wait, in seconds, between each step increase of the
6715# migration
6716# downtime.
6717#
6718# Minimum delay is 3 seconds. Value is per GiB of guest RAM + disk to
6719# be
6720# transferred, with lower bound of a minimum of 2 GiB per device.
6721# (integer value)
6722# Minimum value: 3
6723#live_migration_downtime_delay = 75
6724
6725#
6726# Time to wait, in seconds, for migration to successfully complete
6727# transferring
6728# data before aborting the operation.
6729#
6730# Value is per GiB of guest RAM + disk to be transferred, with lower
6731# bound of
6732# a minimum of 2 GiB. Should usually be larger than downtime delay *
6733# downtime
6734# steps. Set to 0 to disable timeouts.
6735#
6736# Related options:
6737#
6738# * live_migration_downtime
6739# * live_migration_downtime_steps
6740# * live_migration_downtime_delay
6741# (integer value)
6742# Note: This option can be changed without restarting.
6743#live_migration_completion_timeout = 800
6744
6745# DEPRECATED:
6746# Time to wait, in seconds, for migration to make forward progress in
6747# transferring data before aborting the operation.
6748#
6749# Set to 0 to disable timeouts.
6750#
6751# This is deprecated, and now disabled by default because we have
6752# found serious
6753# bugs in this feature that caused false live-migration timeout
6754# failures. This
6755# feature will be removed or replaced in a future release.
6756# (integer value)
6757# Note: This option can be changed without restarting.
6758# This option is deprecated for removal.
6759# Its value may be silently ignored in the future.
6760# Reason: Serious bugs found in this feature.
6761#live_migration_progress_timeout = 0
6762
6763#
6764# This option allows nova to switch an on-going live migration to
6765# post-copy
6766# mode, i.e., switch the active VM to the one on the destination node
6767# before the
6768# migration is complete, therefore ensuring an upper bound on the
6769# memory that
6770# needs to be transferred. Post-copy requires libvirt>=1.3.3 and
6771# QEMU>=2.5.0.
6772#
6773# When permitted, post-copy mode will be automatically activated if a
6774# live-migration memory copy iteration does not make percentage
6775# increase of at
6776# least 10% over the last iteration.
6777#
6778# The live-migration force complete API also uses post-copy when
6779# permitted. If
6780# post-copy mode is not available, force complete falls back to
6781# pausing the VM
6782# to ensure the live-migration operation will complete.
6783#
6784# When using post-copy mode, if the source and destination hosts loose
6785# network
6786# connectivity, the VM being live-migrated will need to be rebooted.
6787# For more
6788# details, please see the Administration guide.
6789#
6790# Related options:
6791#
6792# * live_migration_permit_auto_converge
6793# (boolean value)
6794#live_migration_permit_post_copy = false
6795
6796#
6797# This option allows nova to start live migration with auto converge
6798# on.
6799#
6800# Auto converge throttles down CPU if a progress of on-going live
6801# migration
6802# is slow. Auto converge will only be used if this flag is set to True
6803# and
6804# post copy is not permitted or post copy is unavailable due to the
6805# version
6806# of libvirt and QEMU in use.
6807#
6808# Related options:
6809#
6810# * live_migration_permit_post_copy
6811# (boolean value)
6812#live_migration_permit_auto_converge = false
6813{%- if compute.libvirt.live_migration_permit_auto_converge is defined %}
6814live_migration_permit_auto_converge={{ compute.libvirt.live_migration_permit_auto_converge|lower }}
6815{%- endif %}
6816
6817#
6818# Determine the snapshot image format when sending to the image
6819# service.
6820#
6821# If set, this decides what format is used when sending the snapshot
6822# to the
6823# image service.
6824# If not set, defaults to same type as source image.
6825#
6826# Possible values:
6827#
6828# * ``raw``: RAW disk format
6829# * ``qcow2``: KVM default disk format
6830# * ``vmdk``: VMWare default disk format
6831# * ``vdi``: VirtualBox default disk format
6832# * If not set, defaults to same type as source image.
6833# (string value)
6834# Possible values:
6835# raw - <No description provided>
6836# qcow2 - <No description provided>
6837# vmdk - <No description provided>
6838# vdi - <No description provided>
6839#snapshot_image_format = <None>
6840
6841#
6842# Override the default disk prefix for the devices attached to an
6843# instance.
6844#
6845# If set, this is used to identify a free disk device name for a bus.
6846#
6847# Possible values:
6848#
6849# * Any prefix which will result in a valid disk device name like
6850# 'sda' or 'hda'
6851# for example. This is only necessary if the device names differ to
6852# the
6853# commonly known device name prefixes for a virtualization type such
6854# as: sd,
6855# xvd, uvd, vd.
6856#
6857# Related options:
6858#
6859# * ``virt_type``: Influences which device type is used, which
6860# determines
6861# the default disk prefix.
6862# (string value)
6863#disk_prefix = <None>
6864
6865# Number of seconds to wait for instance to shut down after soft
6866# reboot request is made. We fall back to hard reboot if instance does
6867# not shutdown within this window. (integer value)
6868#wait_soft_reboot_seconds = 120
6869
6870#
6871# Is used to set the CPU mode an instance should have.
6872#
6873# If virt_type="kvm|qemu", it will default to "host-model", otherwise
6874# it will
6875# default to "none".
6876#
6877# Possible values:
6878#
6879# * ``host-model``: Clones the host CPU feature flags
6880# * ``host-passthrough``: Use the host CPU model exactly
6881# * ``custom``: Use a named CPU model
6882# * ``none``: Don't set a specific CPU model. For instances with
6883# ``virt_type`` as KVM/QEMU, the default CPU model from QEMU will be
6884# used,
6885# which provides a basic set of CPU features that are compatible with
6886# most
6887# hosts.
6888#
6889# Related options:
6890#
6891# * ``cpu_model``: This should be set ONLY when ``cpu_mode`` is set to
6892# ``custom``. Otherwise, it would result in an error and the instance
6893# launch will fail.
6894#
6895# (string value)
6896# Possible values:
6897# host-model - <No description provided>
6898# host-passthrough - <No description provided>
6899# custom - <No description provided>
6900# none - <No description provided>
Vasyl Saienko4be5cca2018-05-25 16:15:49 +03006901#cpu_mode = <None>
6902cpu_mode = {{ compute.cpu_mode }}
Oleh Hryhorov5d0f13a2018-05-21 17:02:54 +00006903
6904#
6905# Set the name of the libvirt CPU model the instance should use.
6906#
6907# Possible values:
6908#
6909# * The named CPU models listed in ``/usr/share/libvirt/cpu_map.xml``
6910#
6911# Related options:
6912#
6913# * ``cpu_mode``: This should be set to ``custom`` ONLY when you want
6914# to
6915# configure (via ``cpu_model``) a specific named CPU model.
6916# Otherwise, it
6917# would result in an error and the instance launch will fail.
6918#
6919# * ``virt_type``: Only the virtualization types ``kvm`` and ``qemu``
6920# use this.
6921# (string value)
6922#cpu_model = <None>
Vasyl Saienko4be5cca2018-05-25 16:15:49 +03006923{%- if compute.get('libvirt', {}).cpu_model is defined and compute.cpu_mode == 'custom' %}
6924cpu_model = {{ compute.libvirt.cpu_model }}
6925{%- endif %}
Oleh Hryhorov5d0f13a2018-05-21 17:02:54 +00006926
6927#
6928# This allows specifying granular CPU feature flags when specifying
6929# CPU
6930# models. For example, to explicitly specify the ``pcid``
6931# (Process-Context ID, an Intel processor feature) flag to the
6932# "IvyBridge"
6933# virtual CPU model::
6934#
6935# [libvirt]
6936# cpu_mode = custom
6937# cpu_model = IvyBridge
6938# cpu_model_extra_flags = pcid
6939#
6940# Currently, the choice is restricted to only one option: ``pcid``
6941# (the
6942# option is case-insensitive, so ``PCID`` is also valid). This flag
6943# is
6944# now required to address the guest performance degradation as a
6945# result of
6946# applying the "Meltdown" CVE fixes on certain Intel CPU models.
6947#
6948# Note that when using this config attribute to set the 'PCID' CPU
6949# flag,
6950# not all virtual (i.e. libvirt / QEMU) CPU models need it:
6951#
6952# * The only virtual CPU models that include the 'PCID' capability are
6953# Intel "Haswell", "Broadwell", and "Skylake" variants.
6954#
6955# * The libvirt / QEMU CPU models "Nehalem", "Westmere",
6956# "SandyBridge",
6957# and "IvyBridge" will _not_ expose the 'PCID' capability by
6958# default,
6959# even if the host CPUs by the same name include it. I.e. 'PCID'
6960# needs
6961# to be explicitly specified when using the said virtual CPU models.
6962#
6963# For now, the ``cpu_model_extra_flags`` config attribute is valid
6964# only in
6965# combination with ``cpu_mode`` + ``cpu_model`` options.
6966#
6967# Besides ``custom``, the libvirt driver has two other CPU modes: The
6968# default, ``host-model``, tells it to do the right thing with respect
6969# to
6970# handling 'PCID' CPU flag for the guest -- *assuming* you are running
6971# updated processor microcode, host and guest kernel, libvirt, and
6972# QEMU.
6973# The other mode, ``host-passthrough``, checks if 'PCID' is available
6974# in
6975# the hardware, and if so directly passes it through to the Nova
6976# guests.
6977# Thus, in context of 'PCID', with either of these CPU modes
6978# (``host-model`` or ``host-passthrough``), there is no need to use
6979# the
6980# ``cpu_model_extra_flags``.
6981#
6982# Related options:
6983#
6984# * cpu_mode
6985# * cpu_model
6986# (list value)
6987#cpu_model_extra_flags =
6988
6989# Location where libvirt driver will store snapshots before uploading
6990# them to image service (string value)
6991#snapshots_directory = $instances_path/snapshots
6992
6993# Location where the Xen hvmloader is kept (string value)
6994#xen_hvmloader_path = /usr/lib/xen/boot/hvmloader
6995
6996#
6997# Specific cache modes to use for different disk types.
6998#
6999# For example: file=directsync,block=none,network=writeback
7000#
7001# For local or direct-attached storage, it is recommended that you use
7002# writethrough (default) mode, as it ensures data integrity and has
7003# acceptable
7004# I/O performance for applications running in the guest, especially
7005# for read
7006# operations. However, caching mode none is recommended for remote NFS
7007# storage,
7008# because direct I/O operations (O_DIRECT) perform better than
7009# synchronous I/O
7010# operations (with O_SYNC). Caching mode none effectively turns all
7011# guest I/O
7012# operations into direct I/O operations on the host, which is the NFS
7013# client in
7014# this environment.
7015#
7016# Possible cache modes:
7017#
7018# * default: Same as writethrough.
7019# * none: With caching mode set to none, the host page cache is
7020# disabled, but
7021# the disk write cache is enabled for the guest. In this mode, the
7022# write
7023# performance in the guest is optimal because write operations
7024# bypass the host
7025# page cache and go directly to the disk write cache. If the disk
7026# write cache
7027# is battery-backed, or if the applications or storage stack in the
7028# guest
7029# transfer data properly (either through fsync operations or file
7030# system
7031# barriers), then data integrity can be ensured. However, because
7032# the host
7033# page cache is disabled, the read performance in the guest would
7034# not be as
7035# good as in the modes where the host page cache is enabled, such as
7036# writethrough mode. Shareable disk devices, like for a multi-
7037# attachable block
7038# storage volume, will have their cache mode set to 'none'
7039# regardless of
7040# configuration.
7041# * writethrough: writethrough mode is the default caching mode. With
7042# caching set to writethrough mode, the host page cache is enabled,
7043# but the
7044# disk write cache is disabled for the guest. Consequently, this
7045# caching mode
7046# ensures data integrity even if the applications and storage stack
7047# in the
7048# guest do not transfer data to permanent storage properly (either
7049# through
7050# fsync operations or file system barriers). Because the host page
7051# cache is
7052# enabled in this mode, the read performance for applications
7053# running in the
7054# guest is generally better. However, the write performance might be
7055# reduced
7056# because the disk write cache is disabled.
7057# * writeback: With caching set to writeback mode, both the host page
7058# cache
7059# and the disk write cache are enabled for the guest. Because of
7060# this, the
7061# I/O performance for applications running in the guest is good, but
7062# the data
7063# is not protected in a power failure. As a result, this caching
7064# mode is
7065# recommended only for temporary data where potential data loss is
7066# not a
7067# concern.
7068# * directsync: Like "writethrough", but it bypasses the host page
7069# cache.
7070# * unsafe: Caching mode of unsafe ignores cache transfer operations
7071# completely. As its name implies, this caching mode should be used
7072# only for
7073# temporary data where data loss is not a concern. This mode can be
7074# useful for
7075# speeding up guest installations, but you should switch to another
7076# caching
7077# mode in production environments.
7078# (list value)
7079#disk_cachemodes =
7080
7081# A path to a device that will be used as source of entropy on the
7082# host. Permitted options are: /dev/random or /dev/hwrng (string
7083# value)
7084#rng_dev_path = <None>
7085
7086# For qemu or KVM guests, set this option to specify a default machine
7087# type per host architecture. You can find a list of supported machine
7088# types in your environment by checking the output of the "virsh
7089# capabilities"command. The format of the value for this config option
7090# is host-arch=machine-type. For example:
7091# x86_64=machinetype1,armv7l=machinetype2 (list value)
7092#hw_machine_type = <None>
7093
7094# The data source used to the populate the host "serial" UUID exposed
7095# to guest in the virtual BIOS. (string value)
7096# Possible values:
7097# none - <No description provided>
7098# os - <No description provided>
7099# hardware - <No description provided>
7100# auto - <No description provided>
7101#sysinfo_serial = auto
7102
7103# A number of seconds to memory usage statistics period. Zero or
7104# negative value mean to disable memory usage statistics. (integer
7105# value)
7106#mem_stats_period_seconds = 10
7107
7108# List of uid targets and ranges.Syntax is guest-uid:host-
7109# uid:countMaximum of 5 allowed. (list value)
7110#uid_maps =
7111
7112# List of guid targets and ranges.Syntax is guest-gid:host-
7113# gid:countMaximum of 5 allowed. (list value)
7114#gid_maps =
7115
7116# In a realtime host context vCPUs for guest will run in that
7117# scheduling priority. Priority depends on the host kernel (usually
7118# 1-99) (integer value)
7119#realtime_scheduler_priority = 1
7120
7121#
7122# This is a performance event list which could be used as monitor.
7123# These events
7124# will be passed to libvirt domain xml while creating a new instances.
7125# Then event statistics data can be collected from libvirt. The
7126# minimum
7127# libvirt version is 2.0.0. For more information about `Performance
7128# monitoring
7129# events`, refer https://libvirt.org/formatdomain.html#elementsPerf .
7130#
7131# Possible values:
7132# * A string list. For example: ``enabled_perf_events = cmt, mbml,
7133# mbmt``
7134# The supported events list can be found in
7135# https://libvirt.org/html/libvirt-libvirt-domain.html ,
7136# which you may need to search key words ``VIR_PERF_PARAM_*``
7137# (list value)
7138#enabled_perf_events =
7139
7140#
7141# VM Images format.
7142#
7143# If default is specified, then use_cow_images flag is used instead of
7144# this
7145# one.
7146#
7147# Related options:
7148#
7149# * virt.use_cow_images
7150# * images_volume_group
7151# (string value)
7152# Possible values:
7153# raw - <No description provided>
7154# flat - <No description provided>
7155# qcow2 - <No description provided>
7156# lvm - <No description provided>
7157# rbd - <No description provided>
7158# ploop - <No description provided>
7159# default - <No description provided>
7160#images_type = default
7161
7162#
7163# LVM Volume Group that is used for VM images, when you specify
7164# images_type=lvm
7165#
7166# Related options:
7167#
7168# * images_type
7169# (string value)
7170#images_volume_group = <None>
7171
7172#
7173# Create sparse logical volumes (with virtualsize) if this flag is set
7174# to True.
7175# (boolean value)
7176#sparse_logical_volumes = false
7177
7178# The RADOS pool in which rbd volumes are stored (string value)
7179#images_rbd_pool = rbd
7180
7181# Path to the ceph configuration file to use (string value)
7182#images_rbd_ceph_conf =
7183
7184#
7185# Discard option for nova managed disks.
7186#
7187# Requires:
7188#
7189# * Libvirt >= 1.0.6
7190# * Qemu >= 1.5 (raw format)
7191# * Qemu >= 1.6 (qcow2 format)
7192# (string value)
7193# Possible values:
7194# ignore - <No description provided>
7195# unmap - <No description provided>
7196#hw_disk_discard = <None>
7197{%- if compute.libvirt.hw_disk_discard is defined %}
7198hw_disk_discard={{ compute.libvirt.hw_disk_discard }}
7199{%- endif %}
7200
7201# DEPRECATED: Allows image information files to be stored in non-
7202# standard locations (string value)
7203# This option is deprecated for removal since 14.0.0.
7204# Its value may be silently ignored in the future.
7205# Reason: Image info files are no longer used by the image cache
7206#image_info_filename_pattern = $instances_path/$image_cache_subdirectory_name/%(image)s.info
7207
7208# Unused resized base images younger than this will not be removed
7209# (integer value)
7210#remove_unused_resized_minimum_age_seconds = 3600
7211
7212# DEPRECATED: Write a checksum for files in _base to disk (boolean
7213# value)
7214# This option is deprecated for removal since 14.0.0.
7215# Its value may be silently ignored in the future.
7216# Reason: The image cache no longer periodically calculates checksums
7217# of stored images. Data integrity can be checked at the block or
7218# filesystem level.
7219#checksum_base_images = false
7220
7221# DEPRECATED: How frequently to checksum base images (integer value)
7222# This option is deprecated for removal since 14.0.0.
7223# Its value may be silently ignored in the future.
7224# Reason: The image cache no longer periodically calculates checksums
7225# of stored images. Data integrity can be checked at the block or
7226# filesystem level.
7227#checksum_interval_seconds = 3600
7228
7229#
7230# Method used to wipe ephemeral disks when they are deleted. Only
7231# takes effect
7232# if LVM is set as backing storage.
7233#
7234# Possible values:
7235#
7236# * none - do not wipe deleted volumes
7237# * zero - overwrite volumes with zeroes
7238# * shred - overwrite volume repeatedly
7239#
7240# Related options:
7241#
7242# * images_type - must be set to ``lvm``
7243# * volume_clear_size
7244# (string value)
7245# Possible values:
7246# none - <No description provided>
7247# zero - <No description provided>
7248# shred - <No description provided>
7249#volume_clear = zero
7250
7251#
7252# Size of area in MiB, counting from the beginning of the allocated
7253# volume,
7254# that will be cleared using method set in ``volume_clear`` option.
7255#
7256# Possible values:
7257#
7258# * 0 - clear whole volume
7259# * >0 - clear specified amount of MiB
7260#
7261# Related options:
7262#
7263# * images_type - must be set to ``lvm``
7264# * volume_clear - must be set and the value must be different than
7265# ``none``
7266# for this option to have any impact
7267# (integer value)
7268# Minimum value: 0
7269#volume_clear_size = 0
7270
7271#
7272# Enable snapshot compression for ``qcow2`` images.
7273#
7274# Note: you can set ``snapshot_image_format`` to ``qcow2`` to force
7275# all
7276# snapshots to be in ``qcow2`` format, independently from their
7277# original image
7278# type.
7279#
7280# Related options:
7281#
7282# * snapshot_image_format
7283# (boolean value)
7284#snapshot_compression = false
7285
7286# Use virtio for bridge interfaces with KVM/QEMU (boolean value)
7287#use_virtio_for_bridges = true
7288
7289#
7290# Use multipath connection of the iSCSI or FC volume
7291#
7292# Volumes can be connected in the LibVirt as multipath devices. This
7293# will
7294# provide high availability and fault tolerance.
7295# (boolean value)
7296# Deprecated group/name - [libvirt]/iscsi_use_multipath
7297#volume_use_multipath = false
7298
7299#
7300# Number of times to scan given storage protocol to find volume.
7301# (integer value)
7302# Deprecated group/name - [libvirt]/num_iscsi_scan_tries
7303#num_volume_scan_tries = 5
7304
7305#
7306# Number of times to rediscover AoE target to find volume.
7307#
7308# Nova provides support for block storage attaching to hosts via AOE
7309# (ATA over
7310# Ethernet). This option allows the user to specify the maximum number
7311# of retry
7312# attempts that can be made to discover the AoE device.
7313# (integer value)
7314#num_aoe_discover_tries = 3
7315
7316#
7317# The iSCSI transport iface to use to connect to target in case
7318# offload support
7319# is desired.
7320#
7321# Default format is of the form <transport_name>.<hwaddress> where
7322# <transport_name> is one of (be2iscsi, bnx2i, cxgb3i, cxgb4i,
7323# qla4xxx, ocs) and
7324# <hwaddress> is the MAC address of the interface and can be generated
7325# via the
7326# iscsiadm -m iface command. Do not confuse the iscsi_iface parameter
7327# to be
7328# provided here with the actual transport name.
7329# (string value)
7330# Deprecated group/name - [libvirt]/iscsi_transport
7331#iscsi_iface = <None>
7332
7333#
7334# Number of times to scan iSER target to find volume.
7335#
7336# iSER is a server network protocol that extends iSCSI protocol to use
7337# Remote
7338# Direct Memory Access (RDMA). This option allows the user to specify
7339# the maximum
7340# number of scan attempts that can be made to find iSER volume.
7341# (integer value)
7342#num_iser_scan_tries = 5
7343
7344#
7345# Use multipath connection of the iSER volume.
7346#
7347# iSER volumes can be connected as multipath devices. This will
7348# provide high
7349# availability and fault tolerance.
7350# (boolean value)
7351#iser_use_multipath = false
7352
7353#
7354# The RADOS client name for accessing rbd(RADOS Block Devices)
7355# volumes.
7356#
7357# Libvirt will refer to this user when connecting and authenticating
7358# with
7359# the Ceph RBD server.
7360# (string value)
7361#rbd_user = <None>
7362
7363#
7364# The libvirt UUID of the secret for the rbd_user volumes.
7365# (string value)
7366#rbd_secret_uuid = <None>
7367
7368#
7369# Directory where the NFS volume is mounted on the compute node.
7370# The default is 'mnt' directory of the location where nova's Python
7371# module
7372# is installed.
7373#
7374# NFS provides shared storage for the OpenStack Block Storage service.
7375#
7376# Possible values:
7377#
7378# * A string representing absolute path of mount point.
7379# (string value)
7380#nfs_mount_point_base = $state_path/mnt
7381
7382#
7383# Mount options passed to the NFS client. See section of the nfs man
7384# page
7385# for details.
7386#
7387# Mount options controls the way the filesystem is mounted and how the
7388# NFS client behaves when accessing files on this mount point.
7389#
7390# Possible values:
7391#
7392# * Any string representing mount options separated by commas.
7393# * Example string: vers=3,lookupcache=pos
7394# (string value)
Martin Polreichb8f389f2018-08-29 10:48:45 +02007395{%- if compute.nfs_mount_options is defined %}
7396nfs_mount_options="{{ compute.nfs_mount_options }}"
7397{%- endif %}
Oleh Hryhorov5d0f13a2018-05-21 17:02:54 +00007398
7399#
7400# Directory where the Quobyte volume is mounted on the compute node.
7401#
7402# Nova supports Quobyte volume driver that enables storing Block
7403# Storage
7404# service volumes on a Quobyte storage back end. This Option specifies
7405# the
7406# path of the directory where Quobyte volume is mounted.
7407#
7408# Possible values:
7409#
7410# * A string representing absolute path of mount point.
7411# (string value)
7412#quobyte_mount_point_base = $state_path/mnt
7413
7414# Path to a Quobyte Client configuration file. (string value)
7415#quobyte_client_cfg = <None>
7416
7417#
7418# Directory where the SMBFS shares are mounted on the compute node.
7419# (string value)
7420#smbfs_mount_point_base = $state_path/mnt
7421
7422#
7423# Mount options passed to the SMBFS client.
7424#
7425# Provide SMBFS options as a single string containing all parameters.
7426# See mount.cifs man page for details. Note that the libvirt-qemu
7427# ``uid``
7428# and ``gid`` must be specified.
7429# (string value)
7430#smbfs_mount_options =
7431
7432#
7433# libvirt's transport method for remote file operations.
7434#
7435# Because libvirt cannot use RPC to copy files over network to/from
7436# other
7437# compute nodes, other method must be used for:
7438#
7439# * creating directory on remote host
7440# * creating file on remote host
7441# * removing file from remote host
7442# * copying file to remote host
7443# (string value)
7444# Possible values:
7445# ssh - <No description provided>
7446# rsync - <No description provided>
7447#remote_filesystem_transport = ssh
7448
7449#
7450# Directory where the Virtuozzo Storage clusters are mounted on the
7451# compute
7452# node.
7453#
7454# This option defines non-standard mountpoint for Vzstorage cluster.
7455#
7456# Related options:
7457#
7458# * vzstorage_mount_* group of parameters
7459# (string value)
7460#vzstorage_mount_point_base = $state_path/mnt
7461
7462#
7463# Mount owner user name.
7464#
7465# This option defines the owner user of Vzstorage cluster mountpoint.
7466#
7467# Related options:
7468#
7469# * vzstorage_mount_* group of parameters
7470# (string value)
7471#vzstorage_mount_user = stack
7472
7473#
7474# Mount owner group name.
7475#
7476# This option defines the owner group of Vzstorage cluster mountpoint.
7477#
7478# Related options:
7479#
7480# * vzstorage_mount_* group of parameters
7481# (string value)
7482#vzstorage_mount_group = qemu
7483
7484#
7485# Mount access mode.
7486#
7487# This option defines the access bits of Vzstorage cluster mountpoint,
7488# in the format similar to one of chmod(1) utility, like this: 0770.
7489# It consists of one to four digits ranging from 0 to 7, with missing
7490# lead digits assumed to be 0's.
7491#
7492# Related options:
7493#
7494# * vzstorage_mount_* group of parameters
7495# (string value)
7496#vzstorage_mount_perms = 0770
7497
7498#
7499# Path to vzstorage client log.
7500#
7501# This option defines the log of cluster operations,
7502# it should include "%(cluster_name)s" template to separate
7503# logs from multiple shares.
7504#
7505# Related options:
7506#
7507# * vzstorage_mount_opts may include more detailed logging options.
7508# (string value)
7509#vzstorage_log_path = /var/log/vstorage/%(cluster_name)s/nova.log.gz
7510
7511#
7512# Path to the SSD cache file.
7513#
7514# You can attach an SSD drive to a client and configure the drive to
7515# store
7516# a local cache of frequently accessed data. By having a local cache
7517# on a
7518# client's SSD drive, you can increase the overall cluster performance
7519# by
7520# up to 10 and more times.
7521# WARNING! There is a lot of SSD models which are not server grade and
7522# may loose arbitrary set of data changes on power loss.
7523# Such SSDs should not be used in Vstorage and are dangerous as may
7524# lead
7525# to data corruptions and inconsistencies. Please consult with the
7526# manual
7527# on which SSD models are known to be safe or verify it using
7528# vstorage-hwflush-check(1) utility.
7529#
7530# This option defines the path which should include "%(cluster_name)s"
7531# template to separate caches from multiple shares.
7532#
7533# Related options:
7534#
7535# * vzstorage_mount_opts may include more detailed cache options.
7536# (string value)
7537#vzstorage_cache_path = <None>
7538
7539#
7540# Extra mount options for pstorage-mount
7541#
7542# For full description of them, see
7543# https://static.openvz.org/vz-man/man1/pstorage-mount.1.gz.html
7544# Format is a python string representation of arguments list, like:
7545# "['-v', '-R', '500']"
7546# Shouldn't include -c, -l, -C, -u, -g and -m as those have
7547# explicit vzstorage_* options.
7548#
7549# Related options:
7550#
7551# * All other vzstorage_* options
7552# (list value)
7553#vzstorage_mount_opts =
7554
7555
7556[metrics]
7557#
7558# Configuration options for metrics
7559#
7560# Options under this group allow to adjust how values assigned to
7561# metrics are
7562# calculated.
7563
7564#
7565# From nova.conf
7566#
7567
7568#
7569# When using metrics to weight the suitability of a host, you can use
7570# this option
7571# to change how the calculated weight influences the weight assigned
7572# to a host as
7573# follows:
7574#
7575# * >1.0: increases the effect of the metric on overall weight
7576# * 1.0: no change to the calculated weight
7577# * >0.0,<1.0: reduces the effect of the metric on overall weight
7578# * 0.0: the metric value is ignored, and the value of the
7579# 'weight_of_unavailable' option is returned instead
7580# * >-1.0,<0.0: the effect is reduced and reversed
7581# * -1.0: the effect is reversed
7582# * <-1.0: the effect is increased proportionally and reversed
7583#
7584# This option is only used by the FilterScheduler and its subclasses;
7585# if you use
7586# a different scheduler, this option has no effect.
7587#
7588# Possible values:
7589#
7590# * An integer or float value, where the value corresponds to the
7591# multipler
7592# ratio for this weigher.
7593#
7594# Related options:
7595#
7596# * weight_of_unavailable
7597# (floating point value)
7598#weight_multiplier = 1.0
7599
7600#
7601# This setting specifies the metrics to be weighed and the relative
7602# ratios for
7603# each metric. This should be a single string value, consisting of a
7604# series of
7605# one or more 'name=ratio' pairs, separated by commas, where 'name' is
7606# the name
7607# of the metric to be weighed, and 'ratio' is the relative weight for
7608# that
7609# metric.
7610#
7611# Note that if the ratio is set to 0, the metric value is ignored, and
7612# instead
7613# the weight will be set to the value of the 'weight_of_unavailable'
7614# option.
7615#
7616# As an example, let's consider the case where this option is set to:
7617#
7618# ``name1=1.0, name2=-1.3``
7619#
7620# The final weight will be:
7621#
7622# ``(name1.value * 1.0) + (name2.value * -1.3)``
7623#
7624# This option is only used by the FilterScheduler and its subclasses;
7625# if you use
7626# a different scheduler, this option has no effect.
7627#
7628# Possible values:
7629#
7630# * A list of zero or more key/value pairs separated by commas, where
7631# the key is
7632# a string representing the name of a metric and the value is a
7633# numeric weight
7634# for that metric. If any value is set to 0, the value is ignored
7635# and the
7636# weight will be set to the value of the 'weight_of_unavailable'
7637# option.
7638#
7639# Related options:
7640#
7641# * weight_of_unavailable
7642# (list value)
7643#weight_setting =
7644
7645#
7646# This setting determines how any unavailable metrics are treated. If
7647# this option
7648# is set to True, any hosts for which a metric is unavailable will
7649# raise an
7650# exception, so it is recommended to also use the MetricFilter to
7651# filter out
7652# those hosts before weighing.
7653#
7654# This option is only used by the FilterScheduler and its subclasses;
7655# if you use
7656# a different scheduler, this option has no effect.
7657#
7658# Possible values:
7659#
7660# * True or False, where False ensures any metric being unavailable
7661# for a host
7662# will set the host weight to 'weight_of_unavailable'.
7663#
7664# Related options:
7665#
7666# * weight_of_unavailable
7667# (boolean value)
7668#required = true
7669
7670#
7671# When any of the following conditions are met, this value will be
7672# used in place
7673# of any actual metric value:
7674#
7675# * One of the metrics named in 'weight_setting' is not available for
7676# a host,
7677# and the value of 'required' is False
7678# * The ratio specified for a metric in 'weight_setting' is 0
7679# * The 'weight_multiplier' option is set to 0
7680#
7681# This option is only used by the FilterScheduler and its subclasses;
7682# if you use
7683# a different scheduler, this option has no effect.
7684#
7685# Possible values:
7686#
7687# * An integer or float value, where the value corresponds to the
7688# multipler
7689# ratio for this weigher.
7690#
7691# Related options:
7692#
7693# * weight_setting
7694# * required
7695# * weight_multiplier
7696# (floating point value)
7697#weight_of_unavailable = -10000.0
7698
7699
7700[mks]
7701#
7702# Nova compute node uses WebMKS, a desktop sharing protocol to provide
7703# instance console access to VM's created by VMware hypervisors.
7704#
7705# Related options:
7706# Following options must be set to provide console access.
7707# * mksproxy_base_url
7708# * enabled
7709
7710#
7711# From nova.conf
7712#
7713
7714#
7715# Location of MKS web console proxy
7716#
7717# The URL in the response points to a WebMKS proxy which
7718# starts proxying between client and corresponding vCenter
7719# server where instance runs. In order to use the web based
7720# console access, WebMKS proxy should be installed and configured
7721#
7722# Possible values:
7723#
7724# * Must be a valid URL of the form:``http://host:port/`` or
7725# ``https://host:port/``
7726# (uri value)
7727#mksproxy_base_url = http://127.0.0.1:6090/
7728
7729#
7730# Enables graphical console access for virtual machines.
7731# (boolean value)
7732#enabled = false
7733
7734
7735[neutron]
7736#
7737# Configuration options for neutron (network connectivity as a
7738# service).
7739
7740#
7741# From nova.conf
7742#
7743
7744# DEPRECATED:
7745# This option specifies the URL for connecting to Neutron.
7746#
7747# Possible values:
7748#
7749# * Any valid URL that points to the Neutron API service is
7750# appropriate here.
7751# This typically matches the URL returned for the 'network' service
7752# type
7753# from the Keystone service catalog.
7754# (uri value)
7755# This option is deprecated for removal since 17.0.0.
7756# Its value may be silently ignored in the future.
7757# Reason: Endpoint lookup uses the service catalog via common
7758# keystoneauth1 Adapter configuration options. In the current release,
7759# "url" will override this behavior, but will be ignored and/or
7760# removed in a future release. To achieve the same result, use the
7761# endpoint_override option instead.
7762#url = http://127.0.0.1:9696
7763
7764#
7765# Default name for the Open vSwitch integration bridge.
7766#
7767# Specifies the name of an integration bridge interface used by
7768# OpenvSwitch.
7769# This option is only used if Neutron does not specify the OVS bridge
7770# name in
7771# port binding responses.
7772# (string value)
7773#ovs_bridge = br-int
7774
7775#
7776# Default name for the floating IP pool.
7777#
7778# Specifies the name of floating IP pool used for allocating floating
7779# IPs. This
7780# option is only used if Neutron does not specify the floating IP pool
7781# name in
7782# port binding reponses.
7783# (string value)
7784#default_floating_pool = nova
7785
7786#
7787# Integer value representing the number of seconds to wait before
7788# querying
7789# Neutron for extensions. After this number of seconds the next time
7790# Nova
7791# needs to create a resource in Neutron it will requery Neutron for
7792# the
7793# extensions that it has loaded. Setting value to 0 will refresh the
7794# extensions with no wait.
7795# (integer value)
7796# Minimum value: 0
7797#extension_sync_interval = 600
7798extension_sync_interval={{ compute.network.get('extension_sync_interval', '600') }}
7799
7800#
7801# When set to True, this option indicates that Neutron will be used to
7802# proxy
7803# metadata requests and resolve instance ids. Otherwise, the instance
7804# ID must be
7805# passed to the metadata request in the 'X-Instance-ID' header.
7806#
7807# Related options:
7808#
7809# * metadata_proxy_shared_secret
7810# (boolean value)
7811#service_metadata_proxy = false
7812
7813#
7814# This option holds the shared secret string used to validate proxy
7815# requests to
7816# Neutron metadata requests. In order to be used, the
7817# 'X-Metadata-Provider-Signature' header must be supplied in the
7818# request.
7819#
7820# Related options:
7821#
7822# * service_metadata_proxy
7823# (string value)
7824#metadata_proxy_shared_secret =
7825
7826# PEM encoded Certificate Authority to use when verifying HTTPs
7827# connections. (string value)
7828#cafile = <None>
7829{%- if compute.network.get('protocol', 'http') == 'https' %}
7830cafile={{ compute.network.get('cacert_file', compute.cacert_file) }}
7831{%- endif %}
7832
7833# PEM encoded client certificate cert file (string value)
7834#certfile = <None>
7835
7836# PEM encoded client certificate key file (string value)
7837#keyfile = <None>
7838
7839# Verify HTTPS connections. (boolean value)
7840#insecure = false
7841
7842# Timeout value for http requests (integer value)
7843#timeout = <None>
7844timeout=300
7845
7846# Authentication type to load (string value)
7847# Deprecated group/name - [neutron]/auth_plugin
7848#auth_type = <None>
7849auth_type = v3password
7850
7851# Config Section from which to load plugin specific options (string
7852# value)
7853#auth_section = <None>
7854
7855# Authentication URL (string value)
7856#auth_url = <None>
7857auth_url = {{ compute.identity.get('protocol', 'http') }}://{{ compute.identity.host }}:{{ compute.identity.port }}/v3
7858
7859# Scope for system operations (string value)
7860#system_scope = <None>
7861
7862# Domain ID to scope to (string value)
7863#domain_id = <None>
7864
7865# Domain name to scope to (string value)
7866#domain_name = <None>
7867
7868# Project ID to scope to (string value)
7869#project_id = <None>
7870
7871# Project name to scope to (string value)
7872#project_name = <None>
7873project_name={{ compute.identity.tenant }}
7874
7875# Domain ID containing project (string value)
7876#project_domain_id = <None>
7877
7878# Domain name containing project (string value)
7879#project_domain_name = <None>
7880project_domain_name = {{ compute.get('project_domain_name', 'Default') }}
7881
7882# Trust ID (string value)
7883#trust_id = <None>
7884
7885# Optional domain ID to use with v3 and v2 parameters. It will be used
7886# for both the user and project domain in v3 and ignored in v2
7887# authentication. (string value)
7888#default_domain_id = <None>
7889
7890# Optional domain name to use with v3 API and v2 parameters. It will
7891# be used for both the user and project domain in v3 and ignored in v2
7892# authentication. (string value)
7893#default_domain_name = <None>
7894
7895# User ID (string value)
7896#user_id = <None>
7897
7898# Username (string value)
7899# Deprecated group/name - [neutron]/user_name
7900#username = <None>
7901username={{ compute.network.user }}
7902
7903# User's domain id (string value)
7904#user_domain_id = <None>
7905
7906# User's domain name (string value)
7907#user_domain_name = <None>
7908user_domain_name = {{ compute.get('user_domain_name', 'Default') }}
7909
7910# User's password (string value)
7911#password = <None>
7912password={{ compute.network.password }}
7913
7914# Tenant ID (string value)
7915#tenant_id = <None>
7916
7917# Tenant Name (string value)
7918#tenant_name = <None>
7919
7920# The default service_type for endpoint URL discovery. (string value)
7921#service_type = network
7922
7923# The default service_name for endpoint URL discovery. (string value)
7924#service_name = <None>
7925
7926# List of interfaces, in order of preference, for endpoint URL. (list
7927# value)
7928#valid_interfaces = internal,public
7929
7930# The default region_name for endpoint URL discovery. (string value)
7931#region_name = <None>
7932region_name= {{ compute.network.region }}
7933
7934# Always use this endpoint URL for requests for this client. NOTE: The
7935# unversioned endpoint should be specified here; to request a
7936# particular API version, use the `version`, `min-version`, and/or
7937# `max-version` options. (string value)
7938#endpoint_override = <None>
7939
7940
7941[notifications]
7942#
7943# Most of the actions in Nova which manipulate the system state
7944# generate
7945# notifications which are posted to the messaging component (e.g.
7946# RabbitMQ) and
7947# can be consumed by any service outside the OpenStack. More technical
7948# details
7949# at
7950# https://docs.openstack.org/nova/latest/reference/notifications.html
7951
7952#
7953# From nova.conf
7954#
7955
7956#
7957# If set, send compute.instance.update notifications on
7958# instance state changes.
7959#
7960# Please refer to
7961# https://docs.openstack.org/nova/latest/reference/notifications.html
7962# for
7963# additional information on notifications.
7964#
7965# Possible values:
7966#
7967# * None - no notifications
7968# * "vm_state" - notifications are sent with VM state transition
7969# information in
7970# the ``old_state`` and ``state`` fields. The ``old_task_state`` and
7971# ``new_task_state`` fields will be set to the current task_state of
7972# the
7973# instance.
7974# * "vm_and_task_state" - notifications are sent with VM and task
7975# state
7976# transition information.
7977# (string value)
7978# Possible values:
7979# <None> - <No description provided>
7980# vm_state - <No description provided>
7981# vm_and_task_state - <No description provided>
7982#notify_on_state_change = <None>
7983{%- if compute.get('notification', {}).notify_on is defined %}
7984{%- for key, value in compute.notification.notify_on.iteritems() %}
7985notify_on_{{ key }} = {{ value }}
7986{%- endfor %}
7987{%- elif pillar.ceilometer is defined %}
7988notify_on_state_change = vm_and_task_state
7989{%- endif %}
7990
7991# Default notification level for outgoing notifications. (string
7992# value)
7993# Possible values:
7994# DEBUG - <No description provided>
7995# INFO - <No description provided>
7996# WARN - <No description provided>
7997# ERROR - <No description provided>
7998# CRITICAL - <No description provided>
7999# Deprecated group/name - [DEFAULT]/default_notification_level
8000#default_level = INFO
8001
8002# DEPRECATED:
8003# Default publisher_id for outgoing notifications. If you consider
8004# routing
8005# notifications using different publisher, change this value
8006# accordingly.
8007#
8008# Possible values:
8009#
8010# * Defaults to the current hostname of this host, but it can be any
8011# valid
8012# oslo.messaging publisher_id
8013#
8014# Related options:
8015#
8016# * host - Hostname, FQDN or IP address of this host.
8017# (string value)
8018# This option is deprecated for removal since 17.0.0.
8019# Its value may be silently ignored in the future.
8020# Reason:
8021# This option is only used when ``monkey_patch=True`` and
8022# ``monkey_patch_modules`` is configured to specify the legacy
8023# notify_decorator.
8024# Since the monkey_patch and monkey_patch_modules options are
8025# deprecated, this
8026# option is also deprecated.
8027#default_publisher_id = $host
8028
8029#
8030# Specifies which notification format shall be used by nova.
8031#
8032# The default value is fine for most deployments and rarely needs to
8033# be changed.
8034# This value can be set to 'versioned' once the infrastructure moves
8035# closer to
8036# consuming the newer format of notifications. After this occurs, this
8037# option
8038# will be removed.
8039#
8040# Note that notifications can be completely disabled by setting
8041# ``driver=noop``
8042# in the ``[oslo_messaging_notifications]`` group.
8043#
8044# Possible values:
8045# * unversioned: Only the legacy unversioned notifications are
8046# emitted.
8047# * versioned: Only the new versioned notifications are emitted.
8048# * both: Both the legacy unversioned and the new versioned
8049# notifications are
8050# emitted. (Default)
8051#
8052# The list of versioned notifications is visible in
8053# https://docs.openstack.org/nova/latest/reference/notifications.html
8054# (string value)
8055# Possible values:
8056# unversioned - <No description provided>
8057# versioned - <No description provided>
8058# both - <No description provided>
8059#notification_format = both
8060
8061#
8062# Specifies the topics for the versioned notifications issued by nova.
8063#
8064# The default value is fine for most deployments and rarely needs to
8065# be changed.
8066# However, if you have a third-party service that consumes versioned
8067# notifications, it might be worth getting a topic for that service.
8068# Nova will send a message containing a versioned notification payload
8069# to each
8070# topic queue in this list.
8071#
8072# The list of versioned notifications is visible in
8073# https://docs.openstack.org/nova/latest/reference/notifications.html
8074# (list value)
8075#versioned_notifications_topics = versioned_notifications
8076
8077#
8078# If enabled, include block device information in the versioned
8079# notification
8080# payload. Sending block device information is disabled by default as
8081# providing
8082# that information can incur some overhead on the system since the
8083# information
8084# may need to be loaded from the database.
8085# (boolean value)
8086#bdms_in_notifications = false
8087
8088
8089[osapi_v21]
8090
8091#
8092# From nova.conf
8093#
8094
8095# DEPRECATED:
8096# This option is a string representing a regular expression (regex)
8097# that matches
8098# the project_id as contained in URLs. If not set, it will match
8099# normal UUIDs
8100# created by keystone.
8101#
8102# Possible values:
8103#
8104# * A string representing any legal regular expression
8105# (string value)
8106# This option is deprecated for removal since 13.0.0.
8107# Its value may be silently ignored in the future.
8108# Reason:
8109# Recent versions of nova constrain project IDs to hexadecimal
8110# characters and
8111# dashes. If your installation uses IDs outside of this range, you
8112# should use
8113# this option to provide your own regex and give you time to migrate
8114# offending
8115# projects to valid IDs before the next release.
8116#project_id_regex = <None>
8117
8118
8119[pci]
8120
8121#
8122# From nova.conf
8123#
8124
8125#
8126# An alias for a PCI passthrough device requirement.
8127#
8128# This allows users to specify the alias in the extra specs for a
8129# flavor, without
8130# needing to repeat all the PCI property requirements.
8131#
8132# Possible Values:
8133#
8134# * A list of JSON values which describe the aliases. For example::
8135#
8136# alias = {
8137# "name": "QuickAssist",
8138# "product_id": "0443",
8139# "vendor_id": "8086",
8140# "device_type": "type-PCI",
8141# "numa_policy": "required"
8142# }
8143#
8144# This defines an alias for the Intel QuickAssist card. (multi
8145# valued). Valid
8146# key values are :
8147#
8148# ``name``
8149# Name of the PCI alias.
8150#
8151# ``product_id``
8152# Product ID of the device in hexadecimal.
8153#
8154# ``vendor_id``
8155# Vendor ID of the device in hexadecimal.
8156#
8157# ``device_type``
8158# Type of PCI device. Valid values are: ``type-PCI``, ``type-PF``
8159# and
8160# ``type-VF``.
8161#
8162# ``numa_policy``
8163# Required NUMA affinity of device. Valid values are: ``legacy``,
8164# ``preferred`` and ``required``.
8165# (multi valued)
8166# Deprecated group/name - [DEFAULT]/pci_alias
8167#alias =
8168
8169#
8170# White list of PCI devices available to VMs.
8171#
8172# Possible values:
8173#
8174# * A JSON dictionary which describe a whitelisted PCI device. It
8175# should take
8176# the following format:
8177#
8178# ["vendor_id": "<id>",] ["product_id": "<id>",]
8179# ["address": "[[[[<domain>]:]<bus>]:][<slot>][.[<function>]]" |
8180# "devname": "<name>",]
8181# {"<tag>": "<tag_value>",}
8182#
8183# Where '[' indicates zero or one occurrences, '{' indicates zero or
8184# multiple
8185# occurrences, and '|' mutually exclusive options. Note that any
8186# missing
8187# fields are automatically wildcarded.
8188#
8189# Valid key values are :
8190#
8191# * "vendor_id": Vendor ID of the device in hexadecimal.
8192# * "product_id": Product ID of the device in hexadecimal.
8193# * "address": PCI address of the device.
8194# * "devname": Device name of the device (for e.g. interface name).
8195# Not all
8196# PCI devices have a name.
8197# * "<tag>": Additional <tag> and <tag_value> used for matching PCI
8198# devices.
8199# Supported <tag>: "physical_network".
8200#
8201# The address key supports traditional glob style and regular
8202# expression
8203# syntax. Valid examples are:
8204#
8205# passthrough_whitelist = {"devname":"eth0",
8206# "physical_network":"physnet"}
8207# passthrough_whitelist = {"address":"*:0a:00.*"}
8208# passthrough_whitelist = {"address":":0a:00.",
8209# "physical_network":"physnet1"}
8210# passthrough_whitelist = {"vendor_id":"1137",
8211# "product_id":"0071"}
8212# passthrough_whitelist = {"vendor_id":"1137",
8213# "product_id":"0071",
8214# "address": "0000:0a:00.1",
8215# "physical_network":"physnet1"}
8216# passthrough_whitelist = {"address":{"domain": ".*",
8217# "bus": "02", "slot": "01",
8218# "function": "[2-7]"},
8219# "physical_network":"physnet1"}
8220# passthrough_whitelist = {"address":{"domain": ".*",
8221# "bus": "02", "slot":
8222# "0[1-2]",
8223# "function": ".*"},
8224# "physical_network":"physnet1"}
8225#
8226# The following are invalid, as they specify mutually exclusive
8227# options:
8228#
8229# passthrough_whitelist = {"devname":"eth0",
8230# "physical_network":"physnet",
8231# "address":"*:0a:00.*"}
8232#
8233# * A JSON list of JSON dictionaries corresponding to the above
8234# format. For
8235# example:
8236#
8237# passthrough_whitelist = [{"product_id":"0001",
8238# "vendor_id":"8086"},
8239# {"product_id":"0002",
8240# "vendor_id":"8086"}]
8241# (multi valued)
8242# Deprecated group/name - [DEFAULT]/pci_passthrough_whitelist
8243#passthrough_whitelist =
8244{%- if compute.get('sriov', false) %}
8245{%- for nic_name, sriov in compute.sriov.iteritems() %}
8246passthrough_whitelist = {"devname":"{{ sriov.devname }}","physical_network":"{{ sriov.physical_network }}"}
8247{%- endfor %}
8248{%- endif %}
8249
8250[placement]
8251
8252#
8253# From nova.conf
8254#
8255
8256# DEPRECATED:
8257# Region name of this node. This is used when picking the URL in the
8258# service
8259# catalog.
8260#
8261# Possible values:
8262#
8263# * Any string representing region name
8264# (string value)
8265# This option is deprecated for removal since 17.0.0.
8266# Its value may be silently ignored in the future.
8267# Reason: Endpoint lookup uses the service catalog via common
8268# keystoneauth1 Adapter configuration options. Use the region_name
8269# option instead.
8270os_region_name = {{ compute.identity.region }}
8271
8272# DEPRECATED:
8273# Endpoint interface for this node. This is used when picking the URL
8274# in the
8275# service catalog.
8276# (string value)
8277# This option is deprecated for removal since 17.0.0.
8278# Its value may be silently ignored in the future.
8279# Reason: Endpoint lookup uses the service catalog via common
8280# keystoneauth1 Adapter configuration options. Use the
8281# valid_interfaces option instead.
8282#os_interface = <None>
8283
8284#
8285# If True, when limiting allocation candidate results, the results
8286# will be
8287# a random sampling of the full result set. If False, allocation
8288# candidates
8289# are returned in a deterministic but undefined order. That is, all
8290# things
8291# being equal, two requests for allocation candidates will return the
8292# same
8293# results in the same order; but no guarantees are made as to how that
8294# order
8295# is determined.
8296# (boolean value)
8297#randomize_allocation_candidates = false
8298
8299# PEM encoded Certificate Authority to use when verifying HTTPs
8300# connections. (string value)
8301#cafile = <None>
8302{%- if compute.identity.get('protocol', 'http') == 'https' %}
8303cafile={{ compute.identity.get('cacert_file', compute.cacert_file) }}
8304{%- endif %}
8305
8306# PEM encoded client certificate cert file (string value)
8307#certfile = <None>
8308
8309# PEM encoded client certificate key file (string value)
8310#keyfile = <None>
8311
8312# Verify HTTPS connections. (boolean value)
8313#insecure = false
8314
8315# Timeout value for http requests (integer value)
8316#timeout = <None>
8317
8318# Authentication type to load (string value)
8319# Deprecated group/name - [placement]/auth_plugin
8320auth_type = password
8321
8322# Config Section from which to load plugin specific options (string
8323# value)
8324#auth_section = <None>
8325
8326# Authentication URL (string value)
8327#auth_url = <None>
8328auth_url={{ compute.identity.get('protocol', 'http') }}://{{ compute.identity.host }}:35357/v3
8329
8330# Scope for system operations (string value)
8331#system_scope = <None>
8332
8333# Domain ID to scope to (string value)
8334#domain_id = <None>
8335
8336# Domain name to scope to (string value)
8337#domain_name = <None>
8338
8339# Project ID to scope to (string value)
8340#project_id = <None>
8341
8342# Project name to scope to (string value)
8343project_name = {{ compute.identity.tenant }}
8344
8345# Domain ID containing project (string value)
8346project_domain_id = {{ compute.identity.get('domain', 'default') }}
8347
8348# Domain name containing project (string value)
8349#project_domain_name = <None>
8350
8351# Trust ID (string value)
8352#trust_id = <None>
8353
8354# Optional domain ID to use with v3 and v2 parameters. It will be used
8355# for both the user and project domain in v3 and ignored in v2
8356# authentication. (string value)
8357#default_domain_id = <None>
8358
8359# Optional domain name to use with v3 API and v2 parameters. It will
8360# be used for both the user and project domain in v3 and ignored in v2
8361# authentication. (string value)
8362#default_domain_name = <None>
8363
8364# User ID (string value)
8365#user_id = <None>
8366
8367# Username (string value)
8368# Deprecated group/name - [placement]/user_name
8369username = {{ compute.identity.user }}
8370
8371# User's domain id (string value)
8372user_domain_id = {{ compute.identity.get('domain', 'default') }}
8373
8374# User's domain name (string value)
8375#user_domain_name = <None>
8376
8377# User's password (string value)
8378password = {{ compute.identity.password }}
8379
8380# Tenant ID (string value)
8381#tenant_id = <None>
8382
8383# Tenant Name (string value)
8384#tenant_name = <None>
8385
8386# The default service_type for endpoint URL discovery. (string value)
8387#service_type = placement
8388
8389# The default service_name for endpoint URL discovery. (string value)
8390#service_name = <None>
8391
8392# List of interfaces, in order of preference, for endpoint URL. (list
8393# value)
8394# Deprecated group/name - [placement]/os_interface
8395valid_interfaces = internal
8396
8397# The default region_name for endpoint URL discovery. (string value)
8398# Deprecated group/name - [placement]/os_region_name
8399#region_name = <None>
8400
8401# Always use this endpoint URL for requests for this client. NOTE: The
8402# unversioned endpoint should be specified here; to request a
8403# particular API version, use the `version`, `min-version`, and/or
8404# `max-version` options. (string value)
8405#endpoint_override = <None>
8406
8407
8408[quota]
8409#
8410# Quota options allow to manage quotas in openstack deployment.
8411
8412#
8413# From nova.conf
8414#
8415
8416#
8417# The number of instances allowed per project.
8418#
8419# Possible Values
8420#
8421# * A positive integer or 0.
8422# * -1 to disable the quota.
8423# (integer value)
8424# Minimum value: -1
8425# Deprecated group/name - [DEFAULT]/quota_instances
8426#instances = 10
8427
8428#
8429# The number of instance cores or vCPUs allowed per project.
8430#
8431# Possible values:
8432#
8433# * A positive integer or 0.
8434# * -1 to disable the quota.
8435# (integer value)
8436# Minimum value: -1
8437# Deprecated group/name - [DEFAULT]/quota_cores
8438#cores = 20
8439
8440#
8441# The number of megabytes of instance RAM allowed per project.
8442#
8443# Possible values:
8444#
8445# * A positive integer or 0.
8446# * -1 to disable the quota.
8447# (integer value)
8448# Minimum value: -1
8449# Deprecated group/name - [DEFAULT]/quota_ram
8450#ram = 51200
8451
8452# DEPRECATED:
8453# The number of floating IPs allowed per project.
8454#
8455# Floating IPs are not allocated to instances by default. Users need
8456# to select
8457# them from the pool configured by the OpenStack administrator to
8458# attach to their
8459# instances.
8460#
8461# Possible values:
8462#
8463# * A positive integer or 0.
8464# * -1 to disable the quota.
8465# (integer value)
8466# Minimum value: -1
8467# Deprecated group/name - [DEFAULT]/quota_floating_ips
8468# This option is deprecated for removal since 15.0.0.
8469# Its value may be silently ignored in the future.
8470# Reason:
8471# nova-network is deprecated, as are any related configuration
8472# options.
8473#floating_ips = 10
8474
8475# DEPRECATED:
8476# The number of fixed IPs allowed per project.
8477#
8478# Unlike floating IPs, fixed IPs are allocated dynamically by the
8479# network
8480# component when instances boot up. This quota value should be at
8481# least the
8482# number of instances allowed
8483#
8484# Possible values:
8485#
8486# * A positive integer or 0.
8487# * -1 to disable the quota.
8488# (integer value)
8489# Minimum value: -1
8490# Deprecated group/name - [DEFAULT]/quota_fixed_ips
8491# This option is deprecated for removal since 15.0.0.
8492# Its value may be silently ignored in the future.
8493# Reason:
8494# nova-network is deprecated, as are any related configuration
8495# options.
8496#fixed_ips = -1
8497
8498#
8499# The number of metadata items allowed per instance.
8500#
8501# Users can associate metadata with an instance during instance
8502# creation. This
8503# metadata takes the form of key-value pairs.
8504#
8505# Possible values:
8506#
8507# * A positive integer or 0.
8508# * -1 to disable the quota.
8509# (integer value)
8510# Minimum value: -1
8511# Deprecated group/name - [DEFAULT]/quota_metadata_items
8512#metadata_items = 128
8513
8514#
8515# The number of injected files allowed.
8516#
8517# File injection allows users to customize the personality of an
8518# instance by
8519# injecting data into it upon boot. Only text file injection is
8520# permitted: binary
8521# or ZIP files are not accepted. During file injection, any existing
8522# files that
8523# match specified files are renamed to include ``.bak`` extension
8524# appended with a
8525# timestamp.
8526#
8527# Possible values:
8528#
8529# * A positive integer or 0.
8530# * -1 to disable the quota.
8531# (integer value)
8532# Minimum value: -1
8533# Deprecated group/name - [DEFAULT]/quota_injected_files
8534#injected_files = 5
8535
8536#
8537# The number of bytes allowed per injected file.
8538#
8539# Possible values:
8540#
8541# * A positive integer or 0.
8542# * -1 to disable the quota.
8543# (integer value)
8544# Minimum value: -1
8545# Deprecated group/name - [DEFAULT]/quota_injected_file_content_bytes
8546#injected_file_content_bytes = 10240
8547
8548#
8549# The maximum allowed injected file path length.
8550#
8551# Possible values:
8552#
8553# * A positive integer or 0.
8554# * -1 to disable the quota.
8555# (integer value)
8556# Minimum value: -1
8557# Deprecated group/name - [DEFAULT]/quota_injected_file_path_length
8558#injected_file_path_length = 255
8559
8560# DEPRECATED:
8561# The number of security groups per project.
8562#
8563# Possible values:
8564#
8565# * A positive integer or 0.
8566# * -1 to disable the quota.
8567# (integer value)
8568# Minimum value: -1
8569# Deprecated group/name - [DEFAULT]/quota_security_groups
8570# This option is deprecated for removal since 15.0.0.
8571# Its value may be silently ignored in the future.
8572# Reason:
8573# nova-network is deprecated, as are any related configuration
8574# options.
8575#security_groups = 10
8576
8577# DEPRECATED:
8578# The number of security rules per security group.
8579#
8580# The associated rules in each security group control the traffic to
8581# instances in
8582# the group.
8583#
8584# Possible values:
8585#
8586# * A positive integer or 0.
8587# * -1 to disable the quota.
8588# (integer value)
8589# Minimum value: -1
8590# Deprecated group/name - [DEFAULT]/quota_security_group_rules
8591# This option is deprecated for removal since 15.0.0.
8592# Its value may be silently ignored in the future.
8593# Reason:
8594# nova-network is deprecated, as are any related configuration
8595# options.
8596#security_group_rules = 20
8597
8598#
8599# The maximum number of key pairs allowed per user.
8600#
8601# Users can create at least one key pair for each project and use the
8602# key pair
8603# for multiple instances that belong to that project.
8604#
8605# Possible values:
8606#
8607# * A positive integer or 0.
8608# * -1 to disable the quota.
8609# (integer value)
8610# Minimum value: -1
8611# Deprecated group/name - [DEFAULT]/quota_key_pairs
8612#key_pairs = 100
8613
8614#
8615# The maxiumum number of server groups per project.
8616#
8617# Server groups are used to control the affinity and anti-affinity
8618# scheduling
8619# policy for a group of servers or instances. Reducing the quota will
8620# not affect
8621# any existing group, but new servers will not be allowed into groups
8622# that have
8623# become over quota.
8624#
8625# Possible values:
8626#
8627# * A positive integer or 0.
8628# * -1 to disable the quota.
8629# (integer value)
8630# Minimum value: -1
8631# Deprecated group/name - [DEFAULT]/quota_server_groups
8632#server_groups = 10
8633
8634#
8635# The maximum number of servers per server group.
8636#
8637# Possible values:
8638#
8639# * A positive integer or 0.
8640# * -1 to disable the quota.
8641# (integer value)
8642# Minimum value: -1
8643# Deprecated group/name - [DEFAULT]/quota_server_group_members
8644#server_group_members = 10
8645
8646#
8647# The number of seconds until a reservation expires.
8648#
8649# This quota represents the time period for invalidating quota
8650# reservations.
8651# (integer value)
8652#reservation_expire = 86400
8653
8654#
8655# The count of reservations until usage is refreshed.
8656#
8657# This defaults to 0 (off) to avoid additional load but it is useful
8658# to turn on
8659# to help keep quota usage up-to-date and reduce the impact of out of
8660# sync usage
8661# issues.
8662# (integer value)
8663# Minimum value: 0
8664#until_refresh = 0
8665
8666#
8667# The number of seconds between subsequent usage refreshes.
8668#
8669# This defaults to 0 (off) to avoid additional load but it is useful
8670# to turn on
8671# to help keep quota usage up-to-date and reduce the impact of out of
8672# sync usage
8673# issues. Note that quotas are not updated on a periodic task, they
8674# will update
8675# on a new reservation if max_age has passed since the last
8676# reservation.
8677# (integer value)
8678# Minimum value: 0
8679#max_age = 0
8680
8681# DEPRECATED:
8682# The quota enforcer driver.
8683#
8684# Provides abstraction for quota checks. Users can configure a
8685# specific
8686# driver to use for quota checks.
8687#
8688# Possible values:
8689#
8690# * nova.quota.DbQuotaDriver (default) or any string representing
8691# fully
8692# qualified class name.
8693# (string value)
8694# Deprecated group/name - [DEFAULT]/quota_driver
8695# This option is deprecated for removal since 14.0.0.
8696# Its value may be silently ignored in the future.
8697#driver = nova.quota.DbQuotaDriver
8698
8699#
8700# Recheck quota after resource creation to prevent allowing quota to
8701# be exceeded.
8702#
8703# This defaults to True (recheck quota after resource creation) but
8704# can be set to
8705# False to avoid additional load if allowing quota to be exceeded
8706# because of
8707# racing requests is considered acceptable. For example, when set to
8708# False, if a
8709# user makes highly parallel REST API requests to create servers, it
8710# will be
8711# possible for them to create more servers than their allowed quota
8712# during the
8713# race. If their quota is 10 servers, they might be able to create 50
8714# during the
8715# burst. After the burst, they will not be able to create any more
8716# servers but
8717# they will be able to keep their 50 servers until they delete them.
8718#
8719# The initial quota check is done before resources are created, so if
8720# multiple
8721# parallel requests arrive at the same time, all could pass the quota
8722# check and
8723# create resources, potentially exceeding quota. When recheck_quota is
8724# True,
8725# quota will be checked a second time after resources have been
8726# created and if
8727# the resource is over quota, it will be deleted and OverQuota will be
8728# raised,
8729# usually resulting in a 403 response to the REST API user. This makes
8730# it
8731# impossible for a user to exceed their quota with the caveat that it
8732# will,
8733# however, be possible for a REST API user to be rejected with a 403
8734# response in
8735# the event of a collision close to reaching their quota limit, even
8736# if the user
8737# has enough quota available when they made the request.
8738# (boolean value)
8739#recheck_quota = true
8740
8741
8742[rdp]
8743#
8744# Options under this group enable and configure Remote Desktop
8745# Protocol (
8746# RDP) related features.
8747#
8748# This group is only relevant to Hyper-V users.
8749
8750#
8751# From nova.conf
8752#
8753
8754#
8755# Enable Remote Desktop Protocol (RDP) related features.
8756#
8757# Hyper-V, unlike the majority of the hypervisors employed on Nova
8758# compute
8759# nodes, uses RDP instead of VNC and SPICE as a desktop sharing
8760# protocol to
8761# provide instance console access. This option enables RDP for
8762# graphical
8763# console access for virtual machines created by Hyper-V.
8764#
8765# **Note:** RDP should only be enabled on compute nodes that support
8766# the Hyper-V
8767# virtualization platform.
8768#
8769# Related options:
8770#
8771# * ``compute_driver``: Must be hyperv.
8772#
8773# (boolean value)
8774#enabled = false
8775
8776#
8777# The URL an end user would use to connect to the RDP HTML5 console
8778# proxy.
8779# The console proxy service is called with this token-embedded URL and
8780# establishes the connection to the proper instance.
8781#
8782# An RDP HTML5 console proxy service will need to be configured to
8783# listen on the
8784# address configured here. Typically the console proxy service would
8785# be run on a
8786# controller node. The localhost address used as default would only
8787# work in a
8788# single node environment i.e. devstack.
8789#
8790# An RDP HTML5 proxy allows a user to access via the web the text or
8791# graphical
8792# console of any Windows server or workstation using RDP. RDP HTML5
8793# console
8794# proxy services include FreeRDP, wsgate.
8795# See https://github.com/FreeRDP/FreeRDP-WebConnect
8796#
8797# Possible values:
8798#
8799# * <scheme>://<ip-address>:<port-number>/
8800#
8801# The scheme must be identical to the scheme configured for the RDP
8802# HTML5
8803# console proxy service. It is ``http`` or ``https``.
8804#
8805# The IP address must be identical to the address on which the RDP
8806# HTML5
8807# console proxy service is listening.
8808#
8809# The port must be identical to the port on which the RDP HTML5
8810# console proxy
8811# service is listening.
8812#
8813# Related options:
8814#
8815# * ``rdp.enabled``: Must be set to ``True`` for
8816# ``html5_proxy_base_url`` to be
8817# effective.
8818# (uri value)
8819#html5_proxy_base_url = http://127.0.0.1:6083/
8820
8821
8822[remote_debug]
8823
8824#
8825# From nova.conf
8826#
8827
8828#
8829# Debug host (IP or name) to connect to. This command line parameter
8830# is used when
8831# you want to connect to a nova service via a debugger running on a
8832# different
8833# host.
8834#
8835# Note that using the remote debug option changes how Nova uses the
8836# eventlet
8837# library to support async IO. This could result in failures that do
8838# not occur
8839# under normal operation. Use at your own risk.
8840#
8841# Possible Values:
8842#
8843# * IP address of a remote host as a command line parameter
8844# to a nova service. For Example:
8845#
8846# /usr/local/bin/nova-compute --config-file /etc/nova/nova.conf
8847# --remote_debug-host <IP address where the debugger is running>
8848# (unknown value)
8849#host = <None>
8850
8851#
8852# Debug port to connect to. This command line parameter allows you to
8853# specify
8854# the port you want to use to connect to a nova service via a debugger
8855# running
8856# on different host.
8857#
8858# Note that using the remote debug option changes how Nova uses the
8859# eventlet
8860# library to support async IO. This could result in failures that do
8861# not occur
8862# under normal operation. Use at your own risk.
8863#
8864# Possible Values:
8865#
8866# * Port number you want to use as a command line parameter
8867# to a nova service. For Example:
8868#
8869# /usr/local/bin/nova-compute --config-file /etc/nova/nova.conf
8870# --remote_debug-host <IP address where the debugger is running>
8871# --remote_debug-port <port> it's listening on>.
8872# (port value)
8873# Minimum value: 0
8874# Maximum value: 65535
8875#port = <None>
8876
8877
8878[scheduler]
8879
8880#
8881# From nova.conf
8882#
8883
8884#
8885# The scheduler host manager to use.
8886#
8887# The host manager manages the in-memory picture of the hosts that the
8888# scheduler
8889# uses. The options values are chosen from the entry points under the
8890# namespace
8891# 'nova.scheduler.host_manager' in 'setup.cfg'.
8892#
8893# NOTE: The "ironic_host_manager" option is deprecated as of the
8894# 17.0.0 Queens
8895# release.
8896# (string value)
8897# Possible values:
8898# host_manager - <No description provided>
8899# ironic_host_manager - <No description provided>
8900# Deprecated group/name - [DEFAULT]/scheduler_host_manager
8901#host_manager = host_manager
8902
8903#
8904# The class of the driver used by the scheduler. This should be chosen
8905# from one
8906# of the entrypoints under the namespace 'nova.scheduler.driver' of
8907# file
8908# 'setup.cfg'. If nothing is specified in this option, the
8909# 'filter_scheduler' is
8910# used.
8911#
8912# Other options are:
8913#
8914# * 'caching_scheduler' which aggressively caches the system state for
8915# better
8916# individual scheduler performance at the risk of more retries when
8917# running
8918# multiple schedulers. [DEPRECATED]
8919# * 'chance_scheduler' which simply picks a host at random.
8920# [DEPRECATED]
8921# * 'fake_scheduler' which is used for testing.
8922#
8923# Possible values:
8924#
8925# * Any of the drivers included in Nova:
8926# ** filter_scheduler
8927# ** caching_scheduler
8928# ** chance_scheduler
8929# ** fake_scheduler
8930# * You may also set this to the entry point name of a custom
8931# scheduler driver,
8932# but you will be responsible for creating and maintaining it in
8933# your setup.cfg
8934# file.
8935# (string value)
8936# Deprecated group/name - [DEFAULT]/scheduler_driver
8937#driver = filter_scheduler
8938
8939#
8940# Periodic task interval.
8941#
8942# This value controls how often (in seconds) to run periodic tasks in
8943# the
8944# scheduler. The specific tasks that are run for each period are
8945# determined by
8946# the particular scheduler being used.
8947#
8948# If this is larger than the nova-service 'service_down_time' setting,
8949# Nova may
8950# report the scheduler service as down. This is because the scheduler
8951# driver is
8952# responsible for sending a heartbeat and it will only do that as
8953# often as this
8954# option allows. As each scheduler can work a little differently than
8955# the others,
8956# be sure to test this with your selected scheduler.
8957#
8958# Possible values:
8959#
8960# * An integer, where the integer corresponds to periodic task
8961# interval in
8962# seconds. 0 uses the default interval (60 seconds). A negative
8963# value disables
8964# periodic tasks.
8965#
8966# Related options:
8967#
8968# * ``nova-service service_down_time``
8969# (integer value)
8970# Deprecated group/name - [DEFAULT]/scheduler_driver_task_period
8971#periodic_task_interval = 60
8972
8973#
8974# This is the maximum number of attempts that will be made for a given
8975# instance
8976# build/move operation. It limits the number of alternate hosts
8977# returned by the
8978# scheduler. When that list of hosts is exhausted, a
8979# MaxRetriesExceeded
8980# exception is raised and the instance is set to an error state.
8981#
8982# Possible values:
8983#
8984# * A positive integer, where the integer corresponds to the max
8985# number of
8986# attempts that can be made when building or moving an instance.
8987# (integer value)
8988# Minimum value: 1
8989# Deprecated group/name - [DEFAULT]/scheduler_max_attempts
8990#max_attempts = 3
8991
8992#
8993# Periodic task interval.
8994#
8995# This value controls how often (in seconds) the scheduler should
8996# attempt
8997# to discover new hosts that have been added to cells. If negative
8998# (the
8999# default), no automatic discovery will occur.
9000#
9001# Deployments where compute nodes come and go frequently may want this
9002# enabled, where others may prefer to manually discover hosts when one
9003# is added to avoid any overhead from constantly checking. If enabled,
9004# every time this runs, we will select any unmapped hosts out of each
9005# cell database on every run.
9006# (integer value)
9007# Minimum value: -1
9008#discover_hosts_in_cells_interval = -1
9009
9010#
9011# This setting determines the maximum limit on results received from
9012# the
9013# placement service during a scheduling operation. It effectively
9014# limits
9015# the number of hosts that may be considered for scheduling requests
9016# that
9017# match a large number of candidates.
9018#
9019# A value of 1 (the minimum) will effectively defer scheduling to the
9020# placement
9021# service strictly on "will it fit" grounds. A higher value will put
9022# an upper
9023# cap on the number of results the scheduler will consider during the
9024# filtering
9025# and weighing process. Large deployments may need to set this lower
9026# than the
9027# total number of hosts available to limit memory consumption, network
9028# traffic,
9029# etc. of the scheduler.
9030#
9031# This option is only used by the FilterScheduler; if you use a
9032# different
9033# scheduler, this option has no effect.
9034# (integer value)
9035# Minimum value: 1
9036#max_placement_results = 1000
9037
9038
9039[serial_console]
9040#
9041# The serial console feature allows you to connect to a guest in case
9042# a
9043# graphical console like VNC, RDP or SPICE is not available. This is
9044# only
9045# currently supported for the libvirt, Ironic and hyper-v drivers.
9046
9047#
9048# From nova.conf
9049#
9050
9051#
9052# Enable the serial console feature.
9053#
9054# In order to use this feature, the service ``nova-serialproxy`` needs
9055# to run.
9056# This service is typically executed on the controller node.
9057# (boolean value)
9058#enabled = false
9059
9060#
9061# A range of TCP ports a guest can use for its backend.
9062#
9063# Each instance which gets created will use one port out of this
9064# range. If the
9065# range is not big enough to provide another port for an new instance,
9066# this
9067# instance won't get launched.
9068#
9069# Possible values:
9070#
9071# * Each string which passes the regex ``\d+:\d+`` For example
9072# ``10000:20000``.
9073# Be sure that the first port number is lower than the second port
9074# number
9075# and that both are in range from 0 to 65535.
9076# (string value)
9077#port_range = 10000:20000
9078
9079#
9080# The URL an end user would use to connect to the ``nova-serialproxy``
9081# service.
9082#
9083# The ``nova-serialproxy`` service is called with this token enriched
9084# URL
9085# and establishes the connection to the proper instance.
9086#
9087# Related options:
9088#
9089# * The IP address must be identical to the address to which the
9090# ``nova-serialproxy`` service is listening (see option
9091# ``serialproxy_host``
9092# in this section).
9093# * The port must be the same as in the option ``serialproxy_port`` of
9094# this
9095# section.
9096# * If you choose to use a secured websocket connection, then start
9097# this option
9098# with ``wss://`` instead of the unsecured ``ws://``. The options
9099# ``cert``
9100# and ``key`` in the ``[DEFAULT]`` section have to be set for that.
9101# (uri value)
9102#base_url = ws://127.0.0.1:6083/
9103
9104#
9105# The IP address to which proxy clients (like ``nova-serialproxy``)
9106# should
9107# connect to get the serial console of an instance.
9108#
9109# This is typically the IP address of the host of a ``nova-compute``
9110# service.
9111# (string value)
9112#proxyclient_address = 127.0.0.1
9113
9114#
9115# The IP address which is used by the ``nova-serialproxy`` service to
9116# listen
9117# for incoming requests.
9118#
9119# The ``nova-serialproxy`` service listens on this IP address for
9120# incoming
9121# connection requests to instances which expose serial console.
9122#
9123# Related options:
9124#
9125# * Ensure that this is the same IP address which is defined in the
9126# option
9127# ``base_url`` of this section or use ``0.0.0.0`` to listen on all
9128# addresses.
9129# (string value)
9130#serialproxy_host = 0.0.0.0
9131
9132#
9133# The port number which is used by the ``nova-serialproxy`` service to
9134# listen
9135# for incoming requests.
9136#
9137# The ``nova-serialproxy`` service listens on this port number for
9138# incoming
9139# connection requests to instances which expose serial console.
9140#
9141# Related options:
9142#
9143# * Ensure that this is the same port number which is defined in the
9144# option
9145# ``base_url`` of this section.
9146# (port value)
9147# Minimum value: 0
9148# Maximum value: 65535
9149#serialproxy_port = 6083
9150
9151
9152[service_user]
9153#
9154# Configuration options for service to service authentication using a
9155# service
9156# token. These options allow sending a service token along with the
9157# user's token
9158# when contacting external REST APIs.
9159
9160#
9161# From nova.conf
9162#
9163
9164#
9165# When True, if sending a user token to a REST API, also send a
9166# service token.
9167#
9168# Nova often reuses the user token provided to the nova-api to talk to
9169# other REST
9170# APIs, such as Cinder, Glance and Neutron. It is possible that while
9171# the user
9172# token was valid when the request was made to Nova, the token may
9173# expire before
9174# it reaches the other service. To avoid any failures, and to make it
9175# clear it is
9176# Nova calling the service on the user's behalf, we include a service
9177# token along
9178# with the user token. Should the user's token have expired, a valid
9179# service
9180# token ensures the REST API request will still be accepted by the
9181# keystone
9182# middleware.
9183# (boolean value)
9184#send_service_user_token = false
9185
9186# PEM encoded Certificate Authority to use when verifying HTTPs
9187# connections. (string value)
9188#cafile = <None>
9189
9190# PEM encoded client certificate cert file (string value)
9191#certfile = <None>
9192
9193# PEM encoded client certificate key file (string value)
9194#keyfile = <None>
9195
9196# Verify HTTPS connections. (boolean value)
9197#insecure = false
9198
9199# Timeout value for http requests (integer value)
9200#timeout = <None>
9201
9202# Authentication type to load (string value)
9203# Deprecated group/name - [service_user]/auth_plugin
9204#auth_type = <None>
9205
9206# Config Section from which to load plugin specific options (string
9207# value)
9208#auth_section = <None>
9209
9210# Authentication URL (string value)
9211#auth_url = <None>
9212
9213# Scope for system operations (string value)
9214#system_scope = <None>
9215
9216# Domain ID to scope to (string value)
9217#domain_id = <None>
9218
9219# Domain name to scope to (string value)
9220#domain_name = <None>
9221
9222# Project ID to scope to (string value)
9223#project_id = <None>
9224
9225# Project name to scope to (string value)
9226#project_name = <None>
9227
9228# Domain ID containing project (string value)
9229#project_domain_id = <None>
9230
9231# Domain name containing project (string value)
9232#project_domain_name = <None>
9233
9234# Trust ID (string value)
9235#trust_id = <None>
9236
9237# Optional domain ID to use with v3 and v2 parameters. It will be used
9238# for both the user and project domain in v3 and ignored in v2
9239# authentication. (string value)
9240#default_domain_id = <None>
9241
9242# Optional domain name to use with v3 API and v2 parameters. It will
9243# be used for both the user and project domain in v3 and ignored in v2
9244# authentication. (string value)
9245#default_domain_name = <None>
9246
9247# User ID (string value)
9248#user_id = <None>
9249
9250# Username (string value)
9251# Deprecated group/name - [service_user]/user_name
9252#username = <None>
9253
9254# User's domain id (string value)
9255#user_domain_id = <None>
9256
9257# User's domain name (string value)
9258#user_domain_name = <None>
9259
9260# User's password (string value)
9261#password = <None>
9262
9263# Tenant ID (string value)
9264#tenant_id = <None>
9265
9266# Tenant Name (string value)
9267#tenant_name = <None>
9268
9269
9270[spice]
9271#
9272# SPICE console feature allows you to connect to a guest virtual
9273# machine.
9274# SPICE is a replacement for fairly limited VNC protocol.
9275#
9276# Following requirements must be met in order to use SPICE:
9277#
9278# * Virtualization driver must be libvirt
9279# * spice.enabled set to True
9280# * vnc.enabled set to False
9281# * update html5proxy_base_url
9282# * update server_proxyclient_address
9283
9284#
9285# From nova.conf
9286#
9287
9288#
9289# Enable SPICE related features.
9290#
9291# Related options:
9292#
9293# * VNC must be explicitly disabled to get access to the SPICE
9294# console. Set the
9295# enabled option to False in the [vnc] section to disable the VNC
9296# console.
9297# (boolean value)
9298#enabled = false
9299enabled = false
9300#
9301# Enable the SPICE guest agent support on the instances.
9302#
9303# The Spice agent works with the Spice protocol to offer a better
9304# guest console
9305# experience. However, the Spice console can still be used without the
9306# Spice
9307# Agent. With the Spice agent installed the following features are
9308# enabled:
9309#
9310# * Copy & Paste of text and images between the guest and client
9311# machine
9312# * Automatic adjustment of resolution when the client screen changes
9313# - e.g.
9314# if you make the Spice console full screen the guest resolution
9315# will adjust to
9316# match it rather than letterboxing.
9317# * Better mouse integration - The mouse can be captured and released
9318# without
9319# needing to click inside the console or press keys to release it.
9320# The
9321# performance of mouse movement is also improved.
9322# (boolean value)
9323#agent_enabled = true
9324
9325#
9326# Location of the SPICE HTML5 console proxy.
9327#
9328# End user would use this URL to connect to the `nova-
9329# spicehtml5proxy``
9330# service. This service will forward request to the console of an
9331# instance.
9332#
9333# In order to use SPICE console, the service ``nova-spicehtml5proxy``
9334# should be
9335# running. This service is typically launched on the controller node.
9336#
9337# Possible values:
9338#
9339# * Must be a valid URL of the form:
9340# ``http://host:port/spice_auto.html``
9341# where host is the node running ``nova-spicehtml5proxy`` and the
9342# port is
9343# typically 6082. Consider not using default value as it is not well
9344# defined
9345# for any real deployment.
9346#
9347# Related options:
9348#
9349# * This option depends on ``html5proxy_host`` and ``html5proxy_port``
9350# options.
9351# The access URL returned by the compute node must have the host
9352# and port where the ``nova-spicehtml5proxy`` service is listening.
9353# (uri value)
9354#html5proxy_base_url = http://127.0.0.1:6082/spice_auto.html
9355{%- if compute.vncproxy_url is defined %}
9356html5proxy_base_url = {{ compute.vncproxy_url }}/spice_auto.html
9357{%- endif %}
9358
9359#
9360# The address where the SPICE server running on the instances should
9361# listen.
9362#
9363# Typically, the ``nova-spicehtml5proxy`` proxy client runs on the
9364# controller
9365# node and connects over the private network to this address on the
9366# compute
9367# node(s).
9368#
9369# Possible values:
9370#
9371# * IP address to listen on.
9372# (string value)
9373#server_listen = 127.0.0.1
9374
9375#
9376# The address used by ``nova-spicehtml5proxy`` client to connect to
9377# instance
9378# console.
9379#
9380# Typically, the ``nova-spicehtml5proxy`` proxy client runs on the
9381# controller node and connects over the private network to this
9382# address on the
9383# compute node(s).
9384#
9385# Possible values:
9386#
9387# * Any valid IP address on the compute node.
9388#
9389# Related options:
9390#
9391# * This option depends on the ``server_listen`` option.
9392# The proxy client must be able to access the address specified in
9393# ``server_listen`` using the value of this option.
9394# (string value)
9395#server_proxyclient_address = 127.0.0.1
9396
9397#
9398# A keyboard layout which is supported by the underlying hypervisor on
9399# this
9400# node.
9401#
9402# Possible values:
9403# * This is usually an 'IETF language tag' (default is 'en-us'). If
9404# you
9405# use QEMU as hypervisor, you should find the list of supported
9406# keyboard
9407# layouts at /usr/share/qemu/keymaps.
9408# (string value)
9409#keymap = en-us
9410
9411#
9412# IP address or a hostname on which the ``nova-spicehtml5proxy``
9413# service
9414# listens for incoming requests.
9415#
9416# Related options:
9417#
9418# * This option depends on the ``html5proxy_base_url`` option.
9419# The ``nova-spicehtml5proxy`` service must be listening on a host
9420# that is
9421# accessible from the HTML5 client.
9422# (unknown value)
9423#html5proxy_host = 0.0.0.0
9424
9425#
9426# Port on which the ``nova-spicehtml5proxy`` service listens for
9427# incoming
9428# requests.
9429#
9430# Related options:
9431#
9432# * This option depends on the ``html5proxy_base_url`` option.
9433# The ``nova-spicehtml5proxy`` service must be listening on a port
9434# that is
9435# accessible from the HTML5 client.
9436# (port value)
9437# Minimum value: 0
9438# Maximum value: 65535
9439#html5proxy_port = 6082
9440
9441
9442[upgrade_levels]
9443
9444{%- if compute.upgrade_levels is defined %}
9445{%- for key, value in compute.upgrade_levels.iteritems() %}
9446{{ key }}={{ value }}
9447{%- endfor %}
9448{%- endif %}
9449#
9450# upgrade_levels options are used to set version cap for RPC
9451# messages sent between different nova services.
9452#
9453# By default all services send messages using the latest version
9454# they know about.
9455#
9456# The compute upgrade level is an important part of rolling upgrades
9457# where old and new nova-compute services run side by side.
9458#
9459# The other options can largely be ignored, and are only kept to
9460# help with a possible future backport issue.
9461
9462#
9463# From nova.conf
9464#
9465
9466#
9467# Compute RPC API version cap.
9468#
9469# By default, we always send messages using the most recent version
9470# the client knows about.
9471#
9472# Where you have old and new compute services running, you should set
9473# this to the lowest deployed version. This is to guarantee that all
9474# services never send messages that one of the compute nodes can't
9475# understand. Note that we only support upgrading from release N to
9476# release N+1.
9477#
9478# Set this option to "auto" if you want to let the compute RPC module
9479# automatically determine what version to use based on the service
9480# versions in the deployment.
9481#
9482# Possible values:
9483#
9484# * By default send the latest version the client knows about
9485# * 'auto': Automatically determines what version to use based on
9486# the service versions in the deployment.
9487# * A string representing a version number in the format 'N.N';
9488# for example, possible values might be '1.12' or '2.0'.
9489# * An OpenStack release name, in lower case, such as 'mitaka' or
9490# 'liberty'.
9491# (string value)
9492#compute = <None>
9493
9494# Cells RPC API version cap (string value)
9495#cells = <None>
9496
9497# Intercell RPC API version cap (string value)
9498#intercell = <None>
9499
9500# Cert RPC API version cap (string value)
9501#cert = <None>
9502
9503# Scheduler RPC API version cap (string value)
9504#scheduler = <None>
9505
9506# Conductor RPC API version cap (string value)
9507#conductor = <None>
9508
9509# Console RPC API version cap (string value)
9510#console = <None>
9511
9512# Consoleauth RPC API version cap (string value)
9513#consoleauth = <None>
9514
9515# Network RPC API version cap (string value)
9516#network = <None>
9517
9518# Base API RPC API version cap (string value)
9519#baseapi = <None>
9520
9521
9522[vault]
9523
9524#
9525# From nova.conf
9526#
9527
9528# root token for vault (string value)
9529#root_token_id = <None>
9530
9531# Use this endpoint to connect to Vault, for example:
9532# "http://127.0.0.1:8200" (string value)
9533#vault_url = http://127.0.0.1:8200
9534
9535# Absolute path to ca cert file (string value)
9536#ssl_ca_crt_file = <None>
9537
9538# SSL Enabled/Disabled (boolean value)
9539#use_ssl = false
9540
9541
9542[vendordata_dynamic_auth]
9543#
9544# Options within this group control the authentication of the
9545# vendordata
9546# subsystem of the metadata API server (and config drive) with
9547# external systems.
9548
9549#
9550# From nova.conf
9551#
9552
9553# PEM encoded Certificate Authority to use when verifying HTTPs
9554# connections. (string value)
9555#cafile = <None>
9556
9557# PEM encoded client certificate cert file (string value)
9558#certfile = <None>
9559
9560# PEM encoded client certificate key file (string value)
9561#keyfile = <None>
9562
9563# Verify HTTPS connections. (boolean value)
9564#insecure = false
9565
9566# Timeout value for http requests (integer value)
9567#timeout = <None>
9568
9569# Authentication type to load (string value)
9570# Deprecated group/name - [vendordata_dynamic_auth]/auth_plugin
9571#auth_type = <None>
9572
9573# Config Section from which to load plugin specific options (string
9574# value)
9575#auth_section = <None>
9576
9577# Authentication URL (string value)
9578#auth_url = <None>
9579
9580# Scope for system operations (string value)
9581#system_scope = <None>
9582
9583# Domain ID to scope to (string value)
9584#domain_id = <None>
9585
9586# Domain name to scope to (string value)
9587#domain_name = <None>
9588
9589# Project ID to scope to (string value)
9590#project_id = <None>
9591
9592# Project name to scope to (string value)
9593#project_name = <None>
9594
9595# Domain ID containing project (string value)
9596#project_domain_id = <None>
9597
9598# Domain name containing project (string value)
9599#project_domain_name = <None>
9600
9601# Trust ID (string value)
9602#trust_id = <None>
9603
9604# Optional domain ID to use with v3 and v2 parameters. It will be used
9605# for both the user and project domain in v3 and ignored in v2
9606# authentication. (string value)
9607#default_domain_id = <None>
9608
9609# Optional domain name to use with v3 API and v2 parameters. It will
9610# be used for both the user and project domain in v3 and ignored in v2
9611# authentication. (string value)
9612#default_domain_name = <None>
9613
9614# User ID (string value)
9615#user_id = <None>
9616
9617# Username (string value)
9618# Deprecated group/name - [vendordata_dynamic_auth]/user_name
9619#username = <None>
9620
9621# User's domain id (string value)
9622#user_domain_id = <None>
9623
9624# User's domain name (string value)
9625#user_domain_name = <None>
9626
9627# User's password (string value)
9628#password = <None>
9629
9630# Tenant ID (string value)
9631#tenant_id = <None>
9632
9633# Tenant Name (string value)
9634#tenant_name = <None>
9635
Vasyl Saienkocab3a902018-07-12 13:17:17 +03009636{%- set compute_driver = compute.get('compute_driver', 'libvirt.LibvirtDriver') %}
9637{%- if compute_driver in compute_driver_mapping.keys() %}
9638{%- set _data = compute.get(compute_driver_mapping[compute_driver]) %}
9639{%- include "nova/files/queens/compute/_" + compute_driver_mapping[compute_driver] + ".conf" %}
9640{%- endif %}
Oleh Hryhorov5d0f13a2018-05-21 17:02:54 +00009641
9642[vnc]
9643#
9644# Virtual Network Computer (VNC) can be used to provide remote desktop
9645# console access to instances for tenants and/or administrators.
9646
9647#
9648# From nova.conf
9649#
9650
9651#
9652# Enable VNC related features.
9653#
9654# Guests will get created with graphical devices to support this.
9655# Clients
9656# (for example Horizon) can then establish a VNC connection to the
9657# guest.
9658# (boolean value)
9659# Deprecated group/name - [DEFAULT]/vnc_enabled
9660enabled = true
9661
9662{%- if compute.vncproxy_url is defined %}
9663novncproxy_base_url={{ compute.vncproxy_url }}/vnc_auto.html
9664{%- endif %}
9665{%- if compute.bind is defined and compute.bind.vnc_port is defined %}
9666novncproxy_port={{ compute.bind.vnc_port }}
9667{%- endif %}
9668{%- if compute.bind is defined %}
9669{%- if compute.bind.vnc_address is defined %}
9670vncserver_listen={{ compute.bind.vnc_address }}
9671vncserver_proxyclient_address={{ compute.bind.vnc_address }}
9672{%- else %}
9673vncserver_listen=0.0.0.0
9674{%- endif %}
9675{%- endif %}
9676
9677#
9678# Keymap for VNC.
9679#
9680# The keyboard mapping (keymap) determines which keyboard layout a VNC
9681# session should use by default.
9682#
9683# Possible values:
9684#
9685# * A keyboard layout which is supported by the underlying hypervisor
9686# on
9687# this node. This is usually an 'IETF language tag' (for example
9688# 'en-us'). If you use QEMU as hypervisor, you should find the
9689# list
9690# of supported keyboard layouts at ``/usr/share/qemu/keymaps``.
9691# (string value)
9692# Deprecated group/name - [DEFAULT]/vnc_keymap
9693keymap = {{ compute.get('vnc_keymap', 'en-us') }}
9694
9695#
9696# The IP address or hostname on which an instance should listen to for
9697# incoming VNC connection requests on this node.
9698# (unknown value)
9699# Deprecated group/name - [DEFAULT]/vncserver_listen
9700# Deprecated group/name - [vnc]/vncserver_listen
9701#server_listen = 127.0.0.1
9702
9703#
9704# Private, internal IP address or hostname of VNC console proxy.
9705#
9706# The VNC proxy is an OpenStack component that enables compute service
9707# users to access their instances through VNC clients.
9708#
9709# This option sets the private address to which proxy clients, such as
9710# ``nova-xvpvncproxy``, should connect to.
9711# (unknown value)
9712# Deprecated group/name - [DEFAULT]/vncserver_proxyclient_address
9713# Deprecated group/name - [vnc]/vncserver_proxyclient_address
9714#server_proxyclient_address = 127.0.0.1
9715
9716#
9717# Public address of noVNC VNC console proxy.
9718#
9719# The VNC proxy is an OpenStack component that enables compute service
9720# users to access their instances through VNC clients. noVNC provides
9721# VNC support through a websocket-based client.
9722#
9723# This option sets the public base URL to which client systems will
9724# connect. noVNC clients can use this address to connect to the noVNC
9725# instance and, by extension, the VNC sessions.
9726#
9727# Related options:
9728#
9729# * novncproxy_host
9730# * novncproxy_port
9731# (uri value)
9732#novncproxy_base_url = http://127.0.0.1:6080/vnc_auto.html
9733
9734#
9735# IP address or hostname that the XVP VNC console proxy should bind
9736# to.
9737#
9738# The VNC proxy is an OpenStack component that enables compute service
9739# users to access their instances through VNC clients. Xen provides
9740# the Xenserver VNC Proxy, or XVP, as an alternative to the
9741# websocket-based noVNC proxy used by Libvirt. In contrast to noVNC,
9742# XVP clients are Java-based.
9743#
9744# This option sets the private address to which the XVP VNC console
9745# proxy
9746# service should bind to.
9747#
9748# Related options:
9749#
9750# * xvpvncproxy_port
9751# * xvpvncproxy_base_url
9752# (unknown value)
9753#xvpvncproxy_host = 0.0.0.0
9754
9755#
9756# Port that the XVP VNC console proxy should bind to.
9757#
9758# The VNC proxy is an OpenStack component that enables compute service
9759# users to access their instances through VNC clients. Xen provides
9760# the Xenserver VNC Proxy, or XVP, as an alternative to the
9761# websocket-based noVNC proxy used by Libvirt. In contrast to noVNC,
9762# XVP clients are Java-based.
9763#
9764# This option sets the private port to which the XVP VNC console proxy
9765# service should bind to.
9766#
9767# Related options:
9768#
9769# * xvpvncproxy_host
9770# * xvpvncproxy_base_url
9771# (port value)
9772# Minimum value: 0
9773# Maximum value: 65535
9774#xvpvncproxy_port = 6081
9775
9776#
9777# Public URL address of XVP VNC console proxy.
9778#
9779# The VNC proxy is an OpenStack component that enables compute service
9780# users to access their instances through VNC clients. Xen provides
9781# the Xenserver VNC Proxy, or XVP, as an alternative to the
9782# websocket-based noVNC proxy used by Libvirt. In contrast to noVNC,
9783# XVP clients are Java-based.
9784#
9785# This option sets the public base URL to which client systems will
9786# connect. XVP clients can use this address to connect to the XVP
9787# instance and, by extension, the VNC sessions.
9788#
9789# Related options:
9790#
9791# * xvpvncproxy_host
9792# * xvpvncproxy_port
9793# (uri value)
9794#xvpvncproxy_base_url = http://127.0.0.1:6081/console
9795
9796#
9797# IP address that the noVNC console proxy should bind to.
9798#
9799# The VNC proxy is an OpenStack component that enables compute service
9800# users to access their instances through VNC clients. noVNC provides
9801# VNC support through a websocket-based client.
9802#
9803# This option sets the private address to which the noVNC console
9804# proxy
9805# service should bind to.
9806#
9807# Related options:
9808#
9809# * novncproxy_port
9810# * novncproxy_base_url
9811# (string value)
9812#novncproxy_host = 0.0.0.0
9813
9814#
9815# Port that the noVNC console proxy should bind to.
9816#
9817# The VNC proxy is an OpenStack component that enables compute service
9818# users to access their instances through VNC clients. noVNC provides
9819# VNC support through a websocket-based client.
9820#
9821# This option sets the private port to which the noVNC console proxy
9822# service should bind to.
9823#
9824# Related options:
9825#
9826# * novncproxy_host
9827# * novncproxy_base_url
9828# (port value)
9829# Minimum value: 0
9830# Maximum value: 65535
9831#novncproxy_port = 6080
9832
9833#
9834# The authentication schemes to use with the compute node.
9835#
9836# Control what RFB authentication schemes are permitted for
9837# connections between
9838# the proxy and the compute host. If multiple schemes are enabled, the
9839# first
9840# matching scheme will be used, thus the strongest schemes should be
9841# listed
9842# first.
9843#
9844# Possible values:
9845#
9846# * ``none``: allow connection without authentication
9847# * ``vencrypt``: use VeNCrypt authentication scheme
9848#
9849# Related options:
9850#
9851# * ``[vnc]vencrypt_client_key``, ``[vnc]vencrypt_client_cert``: must
9852# also be set
9853# (list value)
9854#auth_schemes = none
9855
9856# The path to the client certificate PEM file (for x509)
9857#
9858# The fully qualified path to a PEM file containing the private key
9859# which the VNC
9860# proxy server presents to the compute node during VNC authentication.
9861#
9862# Related options:
9863#
9864# * ``vnc.auth_schemes``: must include ``vencrypt``
9865# * ``vnc.vencrypt_client_cert``: must also be set
9866# (string value)
9867#vencrypt_client_key = <None>
9868
9869# The path to the client key file (for x509)
9870#
9871# The fully qualified path to a PEM file containing the x509
9872# certificate which
9873# the VNC proxy server presents to the compute node during VNC
9874# authentication.
9875#
9876# Realted options:
9877#
9878# * ``vnc.auth_schemes``: must include ``vencrypt``
9879# * ``vnc.vencrypt_client_key``: must also be set
9880# (string value)
9881#vencrypt_client_cert = <None>
9882
9883# The path to the CA certificate PEM file
9884#
9885# The fully qualified path to a PEM file containing one or more x509
9886# certificates
9887# for the certificate authorities used by the compute node VNC server.
9888#
9889# Related options:
9890#
9891# * ``vnc.auth_schemes``: must include ``vencrypt``
9892# (string value)
9893#vencrypt_ca_certs = <None>
9894
9895
9896[workarounds]
9897#
9898# A collection of workarounds used to mitigate bugs or issues found in
9899# system
9900# tools (e.g. Libvirt or QEMU) or Nova itself under certain
9901# conditions. These
9902# should only be enabled in exceptional circumstances. All options are
9903# linked
9904# against bug IDs, where more information on the issue can be found.
9905
9906#
9907# From nova.conf
9908#
9909
9910#
9911# Use sudo instead of rootwrap.
9912#
9913# Allow fallback to sudo for performance reasons.
9914#
9915# For more information, refer to the bug report:
9916#
9917# https://bugs.launchpad.net/nova/+bug/1415106
9918#
9919# Possible values:
9920#
9921# * True: Use sudo instead of rootwrap
9922# * False: Use rootwrap as usual
9923#
9924# Interdependencies to other options:
9925#
9926# * Any options that affect 'rootwrap' will be ignored.
9927# (boolean value)
9928#disable_rootwrap = false
9929
9930#
9931# Disable live snapshots when using the libvirt driver.
9932#
9933# Live snapshots allow the snapshot of the disk to happen without an
9934# interruption to the guest, using coordination with a guest agent to
9935# quiesce the filesystem.
9936#
9937# When using libvirt 1.2.2 live snapshots fail intermittently under
9938# load
9939# (likely related to concurrent libvirt/qemu operations). This config
9940# option provides a mechanism to disable live snapshot, in favor of
9941# cold
9942# snapshot, while this is resolved. Cold snapshot causes an instance
9943# outage while the guest is going through the snapshotting process.
9944#
9945# For more information, refer to the bug report:
9946#
9947# https://bugs.launchpad.net/nova/+bug/1334398
9948#
9949# Possible values:
9950#
9951# * True: Live snapshot is disabled when using libvirt
9952# * False: Live snapshots are always used when snapshotting (as long
9953# as
9954# there is a new enough libvirt and the backend storage supports it)
9955# (boolean value)
9956#disable_libvirt_livesnapshot = false
9957disable_libvirt_livesnapshot = {{ compute.get('workaround', {}).get('disable_libvirt_livesnapshot', True)|lower }}
9958
9959#
9960# Enable handling of events emitted from compute drivers.
9961#
9962# Many compute drivers emit lifecycle events, which are events that
9963# occur when,
9964# for example, an instance is starting or stopping. If the instance is
9965# going
9966# through task state changes due to an API operation, like resize, the
9967# events
9968# are ignored.
9969#
9970# This is an advanced feature which allows the hypervisor to signal to
9971# the
9972# compute service that an unexpected state change has occurred in an
9973# instance
9974# and that the instance can be shutdown automatically. Unfortunately,
9975# this can
9976# race in some conditions, for example in reboot operations or when
9977# the compute
9978# service or when host is rebooted (planned or due to an outage). If
9979# such races
9980# are common, then it is advisable to disable this feature.
9981#
9982# Care should be taken when this feature is disabled and
9983# 'sync_power_state_interval' is set to a negative value. In this
9984# case, any
9985# instances that get out of sync between the hypervisor and the Nova
9986# database
9987# will have to be synchronized manually.
9988#
9989# For more information, refer to the bug report:
9990#
9991# https://bugs.launchpad.net/bugs/1444630
9992#
9993# Interdependencies to other options:
9994#
9995# * If ``sync_power_state_interval`` is negative and this feature is
9996# disabled,
9997# then instances that get out of sync between the hypervisor and the
9998# Nova
9999# database will have to be synchronized manually.
10000# (boolean value)
10001#handle_virt_lifecycle_events = true
10002
10003#
10004# Disable the server group policy check upcall in compute.
10005#
10006# In order to detect races with server group affinity policy, the
10007# compute
10008# service attempts to validate that the policy was not violated by the
10009# scheduler. It does this by making an upcall to the API database to
10010# list
10011# the instances in the server group for one that it is booting, which
10012# violates
10013# our api/cell isolation goals. Eventually this will be solved by
10014# proper affinity
10015# guarantees in the scheduler and placement service, but until then,
10016# this late
10017# check is needed to ensure proper affinity policy.
10018#
10019# Operators that desire api/cell isolation over this check should
10020# enable this flag, which will avoid making that upcall from compute.
10021#
10022# Related options:
10023#
10024# * [filter_scheduler]/track_instance_changes also relies on upcalls
10025# from the
10026# compute service to the scheduler service.
10027# (boolean value)
10028#disable_group_policy_check_upcall = false
10029
10030
10031[wsgi]
10032#
10033# Options under this group are used to configure WSGI (Web Server
10034# Gateway
10035# Interface). WSGI is used to serve API requests.
10036
10037#
10038# From nova.conf
10039#
10040
10041#
10042# This option represents a file name for the paste.deploy config for
10043# nova-api.
10044#
10045# Possible values:
10046#
10047# * A string representing file name for the paste.deploy config.
10048# (string value)
10049api_paste_config = /etc/nova/api-paste.ini
10050
10051# DEPRECATED:
10052# It represents a python format string that is used as the template to
10053# generate
10054# log lines. The following values can be formatted into it: client_ip,
10055# date_time, request_line, status_code, body_length, wall_seconds.
10056#
10057# This option is used for building custom request loglines when
10058# running
10059# nova-api under eventlet. If used under uwsgi or apache, this option
10060# has no effect.
10061#
10062# Possible values:
10063#
10064# * '%(client_ip)s "%(request_line)s" status: %(status_code)s'
10065# 'len: %(body_length)s time: %(wall_seconds).7f' (default)
10066# * Any formatted string formed by specific values.
10067# (string value)
10068# This option is deprecated for removal since 16.0.0.
10069# Its value may be silently ignored in the future.
10070# Reason:
10071# This option only works when running nova-api under eventlet, and
10072# encodes very eventlet specific pieces of information. Starting in
10073# Pike
10074# the preferred model for running nova-api is under uwsgi or apache
10075# mod_wsgi.
10076#wsgi_log_format = %(client_ip)s "%(request_line)s" status: %(status_code)s len: %(body_length)s time: %(wall_seconds).7f
10077
10078#
10079# This option specifies the HTTP header used to determine the protocol
10080# scheme
10081# for the original request, even if it was removed by a SSL
10082# terminating proxy.
10083#
10084# Possible values:
10085#
10086# * None (default) - the request scheme is not influenced by any HTTP
10087# headers
10088# * Valid HTTP header, like HTTP_X_FORWARDED_PROTO
10089#
10090# WARNING: Do not set this unless you know what you are doing.
10091#
10092# Make sure ALL of the following are true before setting this
10093# (assuming the
10094# values from the example above):
10095# * Your API is behind a proxy.
10096# * Your proxy strips the X-Forwarded-Proto header from all incoming
10097# requests.
10098# In other words, if end users include that header in their
10099# requests, the proxy
10100# will discard it.
10101# * Your proxy sets the X-Forwarded-Proto header and sends it to API,
10102# but only
10103# for requests that originally come in via HTTPS.
10104#
10105# If any of those are not true, you should keep this setting set to
10106# None.
10107#
10108# (string value)
10109#secure_proxy_ssl_header = <None>
10110
10111#
10112# This option allows setting path to the CA certificate file that
10113# should be used
10114# to verify connecting clients.
10115#
10116# Possible values:
10117#
10118# * String representing path to the CA certificate file.
10119#
10120# Related options:
10121#
10122# * enabled_ssl_apis
10123# (string value)
10124#ssl_ca_file = <None>
10125
10126#
10127# This option allows setting path to the SSL certificate of API
10128# server.
10129#
10130# Possible values:
10131#
10132# * String representing path to the SSL certificate.
10133#
10134# Related options:
10135#
10136# * enabled_ssl_apis
10137# (string value)
10138#ssl_cert_file = <None>
10139
10140#
10141# This option specifies the path to the file where SSL private key of
10142# API
10143# server is stored when SSL is in effect.
10144#
10145# Possible values:
10146#
10147# * String representing path to the SSL private key.
10148#
10149# Related options:
10150#
10151# * enabled_ssl_apis
10152# (string value)
10153#ssl_key_file = <None>
10154
10155#
10156# This option sets the value of TCP_KEEPIDLE in seconds for each
10157# server socket.
10158# It specifies the duration of time to keep connection active. TCP
10159# generates a
10160# KEEPALIVE transmission for an application that requests to keep
10161# connection
10162# active. Not supported on OS X.
10163#
10164# Related options:
10165#
10166# * keep_alive
10167# (integer value)
10168# Minimum value: 0
10169#tcp_keepidle = 600
10170
10171#
10172# This option specifies the size of the pool of greenthreads used by
10173# wsgi.
10174# It is possible to limit the number of concurrent connections using
10175# this
10176# option.
10177# (integer value)
10178# Minimum value: 0
10179# Deprecated group/name - [DEFAULT]/wsgi_default_pool_size
10180#default_pool_size = 1000
10181
10182#
10183# This option specifies the maximum line size of message headers to be
10184# accepted.
10185# max_header_line may need to be increased when using large tokens
10186# (typically
10187# those generated by the Keystone v3 API with big service catalogs).
10188#
10189# Since TCP is a stream based protocol, in order to reuse a
10190# connection, the HTTP
10191# has to have a way to indicate the end of the previous response and
10192# beginning
10193# of the next. Hence, in a keep_alive case, all messages must have a
10194# self-defined message length.
10195# (integer value)
10196# Minimum value: 0
10197#max_header_line = 16384
10198
10199#
10200# This option allows using the same TCP connection to send and receive
10201# multiple
10202# HTTP requests/responses, as opposed to opening a new one for every
10203# single
10204# request/response pair. HTTP keep-alive indicates HTTP connection
10205# reuse.
10206#
10207# Possible values:
10208#
10209# * True : reuse HTTP connection.
10210# * False : closes the client socket connection explicitly.
10211#
10212# Related options:
10213#
10214# * tcp_keepidle
10215# (boolean value)
10216# Deprecated group/name - [DEFAULT]/wsgi_keep_alive
10217#keep_alive = true
10218
10219#
10220# This option specifies the timeout for client connections' socket
10221# operations.
10222# If an incoming connection is idle for this number of seconds it will
10223# be
10224# closed. It indicates timeout on individual read/writes on the socket
10225# connection. To wait forever set to 0.
10226# (integer value)
10227# Minimum value: 0
10228#client_socket_timeout = 900
10229
10230
10231[xenserver]
10232#
10233# XenServer options are used when the compute_driver is set to use
10234# XenServer (compute_driver=xenapi.XenAPIDriver).
10235#
10236# Must specify connection_url, connection_password and
10237# ovs_integration_bridge to
10238# use compute_driver=xenapi.XenAPIDriver.
10239
10240#
10241# From nova.conf
10242#
10243
10244#
10245# Number of seconds to wait for agent's reply to a request.
10246#
10247# Nova configures/performs certain administrative actions on a server
10248# with the
10249# help of an agent that's installed on the server. The communication
10250# between
10251# Nova and the agent is achieved via sharing messages, called records,
10252# over
10253# xenstore, a shared storage across all the domains on a Xenserver
10254# host.
10255# Operations performed by the agent on behalf of nova are: 'version','
10256# key_init',
10257# 'password','resetnetwork','inject_file', and 'agentupdate'.
10258#
10259# To perform one of the above operations, the xapi 'agent' plugin
10260# writes the
10261# command and its associated parameters to a certain location known to
10262# the domain
10263# and awaits response. On being notified of the message, the agent
10264# performs
10265# appropriate actions on the server and writes the result back to
10266# xenstore. This
10267# result is then read by the xapi 'agent' plugin to determine the
10268# success/failure
10269# of the operation.
10270#
10271# This config option determines how long the xapi 'agent' plugin shall
10272# wait to
10273# read the response off of xenstore for a given request/command. If
10274# the agent on
10275# the instance fails to write the result in this time period, the
10276# operation is
10277# considered to have timed out.
10278#
10279# Related options:
10280#
10281# * ``agent_version_timeout``
10282# * ``agent_resetnetwork_timeout``
10283#
10284# (integer value)
10285# Minimum value: 0
10286#agent_timeout = 30
10287
10288#
10289# Number of seconds to wait for agent't reply to version request.
10290#
10291# This indicates the amount of time xapi 'agent' plugin waits for the
10292# agent to
10293# respond to the 'version' request specifically. The generic timeout
10294# for agent
10295# communication ``agent_timeout`` is ignored in this case.
10296#
10297# During the build process the 'version' request is used to determine
10298# if the
10299# agent is available/operational to perform other requests such as
10300# 'resetnetwork', 'password', 'key_init' and 'inject_file'. If the
10301# 'version' call
10302# fails, the other configuration is skipped. So, this configuration
10303# option can
10304# also be interpreted as time in which agent is expected to be fully
10305# operational.
10306# (integer value)
10307# Minimum value: 0
10308#agent_version_timeout = 300
10309
10310#
10311# Number of seconds to wait for agent's reply to resetnetwork
10312# request.
10313#
10314# This indicates the amount of time xapi 'agent' plugin waits for the
10315# agent to
10316# respond to the 'resetnetwork' request specifically. The generic
10317# timeout for
10318# agent communication ``agent_timeout`` is ignored in this case.
10319# (integer value)
10320# Minimum value: 0
10321#agent_resetnetwork_timeout = 60
10322
10323#
10324# Path to locate guest agent on the server.
10325#
10326# Specifies the path in which the XenAPI guest agent should be
10327# located. If the
10328# agent is present, network configuration is not injected into the
10329# image.
10330#
10331# Related options:
10332#
10333# For this option to have an effect:
10334# * ``flat_injected`` should be set to ``True``
10335# * ``compute_driver`` should be set to ``xenapi.XenAPIDriver``
10336#
10337# (string value)
10338#agent_path = usr/sbin/xe-update-networking
10339
10340#
10341# Disables the use of XenAPI agent.
10342#
10343# This configuration option suggests whether the use of agent should
10344# be enabled
10345# or not regardless of what image properties are present. Image
10346# properties have
10347# an effect only when this is set to ``True``. Read description of
10348# config option
10349# ``use_agent_default`` for more information.
10350#
10351# Related options:
10352#
10353# * ``use_agent_default``
10354#
10355# (boolean value)
10356#disable_agent = false
10357
10358#
10359# Whether or not to use the agent by default when its usage is enabled
10360# but not
10361# indicated by the image.
10362#
10363# The use of XenAPI agent can be disabled altogether using the
10364# configuration
10365# option ``disable_agent``. However, if it is not disabled, the use of
10366# an agent
10367# can still be controlled by the image in use through one of its
10368# properties,
10369# ``xenapi_use_agent``. If this property is either not present or
10370# specified
10371# incorrectly on the image, the use of agent is determined by this
10372# configuration
10373# option.
10374#
10375# Note that if this configuration is set to ``True`` when the agent is
10376# not
10377# present, the boot times will increase significantly.
10378#
10379# Related options:
10380#
10381# * ``disable_agent``
10382#
10383# (boolean value)
10384#use_agent_default = false
10385
10386# Timeout in seconds for XenAPI login. (integer value)
10387# Minimum value: 0
10388#login_timeout = 10
10389
10390#
10391# Maximum number of concurrent XenAPI connections.
10392#
10393# In nova, multiple XenAPI requests can happen at a time.
10394# Configuring this option will parallelize access to the XenAPI
10395# session, which allows you to make concurrent XenAPI connections.
10396# (integer value)
10397# Minimum value: 1
10398#connection_concurrent = 5
10399
10400#
10401# Cache glance images locally.
10402#
10403# The value for this option must be chosen from the choices listed
10404# here. Configuring a value other than these will default to 'all'.
10405#
10406# Note: There is nothing that deletes these images.
10407#
10408# Possible values:
10409#
10410# * `all`: will cache all images.
10411# * `some`: will only cache images that have the
10412# image_property `cache_in_nova=True`.
10413# * `none`: turns off caching entirely.
10414# (string value)
10415# Possible values:
10416# all - <No description provided>
10417# some - <No description provided>
10418# none - <No description provided>
10419#cache_images = all
10420
10421#
10422# Compression level for images.
10423#
10424# By setting this option we can configure the gzip compression level.
10425# This option sets GZIP environment variable before spawning tar -cz
10426# to force the compression level. It defaults to none, which means the
10427# GZIP environment variable is not set and the default (usually -6)
10428# is used.
10429#
10430# Possible values:
10431#
10432# * Range is 1-9, e.g., 9 for gzip -9, 9 being most
10433# compressed but most CPU intensive on dom0.
10434# * Any values out of this range will default to None.
10435# (integer value)
10436# Minimum value: 1
10437# Maximum value: 9
10438#image_compression_level = <None>
10439
10440# Default OS type used when uploading an image to glance (string
10441# value)
10442#default_os_type = linux
10443
10444# Time in secs to wait for a block device to be created (integer
10445# value)
10446# Minimum value: 1
10447#block_device_creation_timeout = 10
10448{%- if compute.block_device_creation_timeout is defined %}
10449block_device_creation_timeout = {{ compute.block_device_creation_timeout }}
10450{%- endif %}
10451
10452#
10453# Maximum size in bytes of kernel or ramdisk images.
10454#
10455# Specifying the maximum size of kernel or ramdisk will avoid copying
10456# large files to dom0 and fill up /boot/guest.
10457# (integer value)
10458#max_kernel_ramdisk_size = 16777216
10459
10460#
10461# Filter for finding the SR to be used to install guest instances on.
10462#
10463# Possible values:
10464#
10465# * To use the Local Storage in default XenServer/XCP installations
10466# set this flag to other-config:i18n-key=local-storage.
10467# * To select an SR with a different matching criteria, you could
10468# set it to other-config:my_favorite_sr=true.
10469# * To fall back on the Default SR, as displayed by XenCenter,
10470# set this flag to: default-sr:true.
10471# (string value)
10472#sr_matching_filter = default-sr:true
10473
10474#
10475# Whether to use sparse_copy for copying data on a resize down.
10476# (False will use standard dd). This speeds up resizes down
10477# considerably since large runs of zeros won't have to be rsynced.
10478# (boolean value)
10479#sparse_copy = true
10480
10481#
10482# Maximum number of retries to unplug VBD.
10483# If set to 0, should try once, no retries.
10484# (integer value)
10485# Minimum value: 0
10486#num_vbd_unplug_retries = 10
10487
10488#
10489# Name of network to use for booting iPXE ISOs.
10490#
10491# An iPXE ISO is a specially crafted ISO which supports iPXE booting.
10492# This feature gives a means to roll your own image.
10493#
10494# By default this option is not set. Enable this option to
10495# boot an iPXE ISO.
10496#
10497# Related Options:
10498#
10499# * `ipxe_boot_menu_url`
10500# * `ipxe_mkisofs_cmd`
10501# (string value)
10502#ipxe_network_name = <None>
10503
10504#
10505# URL to the iPXE boot menu.
10506#
10507# An iPXE ISO is a specially crafted ISO which supports iPXE booting.
10508# This feature gives a means to roll your own image.
10509#
10510# By default this option is not set. Enable this option to
10511# boot an iPXE ISO.
10512#
10513# Related Options:
10514#
10515# * `ipxe_network_name`
10516# * `ipxe_mkisofs_cmd`
10517# (string value)
10518#ipxe_boot_menu_url = <None>
10519
10520#
10521# Name and optionally path of the tool used for ISO image creation.
10522#
10523# An iPXE ISO is a specially crafted ISO which supports iPXE booting.
10524# This feature gives a means to roll your own image.
10525#
10526# Note: By default `mkisofs` is not present in the Dom0, so the
10527# package can either be manually added to Dom0 or include the
10528# `mkisofs` binary in the image itself.
10529#
10530# Related Options:
10531#
10532# * `ipxe_network_name`
10533# * `ipxe_boot_menu_url`
10534# (string value)
10535#ipxe_mkisofs_cmd = mkisofs
10536
10537#
10538# URL for connection to XenServer/Xen Cloud Platform. A special value
10539# of unix://local can be used to connect to the local unix socket.
10540#
10541# Possible values:
10542#
10543# * Any string that represents a URL. The connection_url is
10544# generally the management network IP address of the XenServer.
10545# * This option must be set if you chose the XenServer driver.
10546# (string value)
10547#connection_url = <None>
10548
10549# Username for connection to XenServer/Xen Cloud Platform (string
10550# value)
10551#connection_username = root
10552
10553# Password for connection to XenServer/Xen Cloud Platform (string
10554# value)
10555#connection_password = <None>
10556
10557#
10558# The interval used for polling of coalescing vhds.
10559#
10560# This is the interval after which the task of coalesce VHD is
10561# performed, until it reaches the max attempts that is set by
10562# vhd_coalesce_max_attempts.
10563#
10564# Related options:
10565#
10566# * `vhd_coalesce_max_attempts`
10567# (floating point value)
10568# Minimum value: 0
10569#vhd_coalesce_poll_interval = 5.0
10570
10571#
10572# Ensure compute service is running on host XenAPI connects to.
10573# This option must be set to false if the 'independent_compute'
10574# option is set to true.
10575#
10576# Possible values:
10577#
10578# * Setting this option to true will make sure that compute service
10579# is running on the same host that is specified by connection_url.
10580# * Setting this option to false, doesn't perform the check.
10581#
10582# Related options:
10583#
10584# * `independent_compute`
10585# (boolean value)
10586#check_host = true
10587
10588#
10589# Max number of times to poll for VHD to coalesce.
10590#
10591# This option determines the maximum number of attempts that can be
10592# made for coalescing the VHD before giving up.
10593#
10594# Related opitons:
10595#
10596# * `vhd_coalesce_poll_interval`
10597# (integer value)
10598# Minimum value: 0
10599#vhd_coalesce_max_attempts = 20
10600
10601# Base path to the storage repository on the XenServer host. (string
10602# value)
10603#sr_base_path = /var/run/sr-mount
10604
10605#
10606# The iSCSI Target Host.
10607#
10608# This option represents the hostname or ip of the iSCSI Target.
10609# If the target host is not present in the connection information from
10610# the volume provider then the value from this option is taken.
10611#
10612# Possible values:
10613#
10614# * Any string that represents hostname/ip of Target.
10615# (unknown value)
10616#target_host = <None>
10617
10618#
10619# The iSCSI Target Port.
10620#
10621# This option represents the port of the iSCSI Target. If the
10622# target port is not present in the connection information from the
10623# volume provider then the value from this option is taken.
10624# (port value)
10625# Minimum value: 0
10626# Maximum value: 65535
10627#target_port = 3260
10628
10629#
10630# Used to prevent attempts to attach VBDs locally, so Nova can
10631# be run in a VM on a different host.
10632#
10633# Related options:
10634#
10635# * ``CONF.flat_injected`` (Must be False)
10636# * ``CONF.xenserver.check_host`` (Must be False)
10637# * ``CONF.default_ephemeral_format`` (Must be unset or 'ext3')
10638# * Joining host aggregates (will error if attempted)
10639# * Swap disks for Windows VMs (will error if attempted)
10640# * Nova-based auto_configure_disk (will error if attempted)
10641# (boolean value)
10642#independent_compute = false
10643
10644#
10645# Wait time for instances to go to running state.
10646#
10647# Provide an integer value representing time in seconds to set the
10648# wait time for an instance to go to running state.
10649#
10650# When a request to create an instance is received by nova-api and
10651# communicated to nova-compute, the creation of the instance occurs
10652# through interaction with Xen via XenAPI in the compute node. Once
10653# the node on which the instance(s) are to be launched is decided by
10654# nova-schedule and the launch is triggered, a certain amount of wait
10655# time is involved until the instance(s) can become available and
10656# 'running'. This wait time is defined by running_timeout. If the
10657# instances do not go to running state within this specified wait
10658# time, the launch expires and the instance(s) are set to 'error'
10659# state.
10660# (integer value)
10661# Minimum value: 0
10662#running_timeout = 60
10663
10664# DEPRECATED:
10665# The XenAPI VIF driver using XenServer Network APIs.
10666#
10667# Provide a string value representing the VIF XenAPI vif driver to use
10668# for
10669# plugging virtual network interfaces.
10670#
10671# Xen configuration uses bridging within the backend domain to allow
10672# all VMs to appear on the network as individual hosts. Bridge
10673# interfaces are used to create a XenServer VLAN network in which
10674# the VIFs for the VM instances are plugged. If no VIF bridge driver
10675# is plugged, the bridge is not made available. This configuration
10676# option takes in a value for the VIF driver.
10677#
10678# Possible values:
10679#
10680# * nova.virt.xenapi.vif.XenAPIOpenVswitchDriver (default)
10681# * nova.virt.xenapi.vif.XenAPIBridgeDriver (deprecated)
10682#
10683# Related options:
10684#
10685# * ``vlan_interface``
10686# * ``ovs_integration_bridge``
10687# (string value)
10688# This option is deprecated for removal since 15.0.0.
10689# Its value may be silently ignored in the future.
10690# Reason:
10691# There are only two in-tree vif drivers for XenServer.
10692# XenAPIBridgeDriver is for
10693# nova-network which is deprecated and XenAPIOpenVswitchDriver is for
10694# Neutron
10695# which is the default configuration for Nova since the 15.0.0 Ocata
10696# release. In
10697# the future the "use_neutron" configuration option will be used to
10698# determine
10699# which vif driver to use.
10700#vif_driver = nova.virt.xenapi.vif.XenAPIOpenVswitchDriver
10701
10702#
10703# Dom0 plugin driver used to handle image uploads.
10704#
10705# Provide a string value representing a plugin driver required to
10706# handle the image uploading to GlanceStore.
10707#
10708# Images, and snapshots from XenServer need to be uploaded to the data
10709# store for use. image_upload_handler takes in a value for the Dom0
10710# plugin driver. This driver is then called to uplaod images to the
10711# GlanceStore.
10712# (string value)
10713#image_upload_handler = nova.virt.xenapi.image.glance.GlanceStore
10714
10715#
10716# Number of seconds to wait for SR to settle if the VDI
10717# does not exist when first introduced.
10718#
10719# Some SRs, particularly iSCSI connections are slow to see the VDIs
10720# right after they got introduced. Setting this option to a
10721# time interval will make the SR to wait for that time period
10722# before raising VDI not found exception.
10723# (integer value)
10724# Minimum value: 0
10725#introduce_vdi_retry_wait = 20
10726
10727#
10728# The name of the integration Bridge that is used with xenapi
10729# when connecting with Open vSwitch.
10730#
10731# Note: The value of this config option is dependent on the
10732# environment, therefore this configuration value must be set
10733# accordingly if you are using XenAPI.
10734#
10735# Possible values:
10736#
10737# * Any string that represents a bridge name.
10738# (string value)
10739#ovs_integration_bridge = <None>
10740
10741#
10742# When adding new host to a pool, this will append a --force flag to
10743# the
10744# command, forcing hosts to join a pool, even if they have different
10745# CPUs.
10746#
10747# Since XenServer version 5.6 it is possible to create a pool of hosts
10748# that have
10749# different CPU capabilities. To accommodate CPU differences,
10750# XenServer limited
10751# features it uses to determine CPU compatibility to only the ones
10752# that are
10753# exposed by CPU and support for CPU masking was added.
10754# Despite this effort to level differences between CPUs, it is still
10755# possible
10756# that adding new host will fail, thus option to force join was
10757# introduced.
10758# (boolean value)
10759#use_join_force = true
10760
10761#
10762# Publicly visible name for this console host.
10763#
10764# Possible values:
10765#
10766# * Current hostname (default) or any string representing hostname.
10767# (string value)
10768#console_public_hostname = <current_hostname>
10769
10770
10771[xvp]
10772#
10773# Configuration options for XVP.
10774#
10775# xvp (Xen VNC Proxy) is a proxy server providing password-protected
10776# VNC-based
10777# access to the consoles of virtual machines hosted on Citrix
10778# XenServer.
10779
10780#
10781# From nova.conf
10782#
10783
10784# XVP conf template (string value)
10785#console_xvp_conf_template = $pybasedir/nova/console/xvp.conf.template
10786
10787# Generated XVP conf file (string value)
10788#console_xvp_conf = /etc/xvp.conf
10789
10790# XVP master process pid file (string value)
10791#console_xvp_pid = /var/run/xvp.pid
10792
10793# XVP log file (string value)
10794#console_xvp_log = /var/log/xvp.log
10795
10796# Port for XVP to multiplex VNC connections on (port value)
10797# Minimum value: 0
10798# Maximum value: 65535
10799#console_xvp_multiplex_port = 5900
10800
10801[matchmaker_redis]
10802{#- include "oslo_templates/oslo/_matchmaker_redis.conf" #}
10803
10804[oslo_messaging_notifications]
10805{%- set _data = compute.notification %}
10806{%- include "oslo_templates/files/queens/oslo/messaging/_notifications.conf" %}
10807
Oleh Hryhorov2fdc3522018-05-23 14:09:37 +000010808{%- if compute.message_queue is defined %}
Oleh Hryhorov5d0f13a2018-05-21 17:02:54 +000010809{%- set _data = compute.message_queue %}
10810{%- if _data.engine == 'rabbitmq' %}
10811 {%- set messaging_engine = 'rabbit' %}
10812{%- else %}
10813 {%- set messaging_engine = _data.engine %}
10814{%- endif %}
10815[oslo_messaging_{{ messaging_engine }}]
Oleksandr Bryndziie539a912018-09-25 17:45:48 +000010816{%- if _data.ssl is defined and 'cacert_file' not in _data.get('ssl', {}).keys() %}{% do _data['ssl'].update({'cacert_file': compute.cacert_file}) %}{% endif %}
Oleh Hryhorov5d0f13a2018-05-21 17:02:54 +000010817{%- include "oslo_templates/files/queens/oslo/messaging/_" + messaging_engine + ".conf" %}
10818{%- endif %}
10819
10820[oslo_policy]
10821{%- if compute.policy is defined %}
10822{%- set _data = compute.policy %}
10823{%- include "oslo_templates/files/queens/oslo/_policy.conf" %}
10824{%- endif %}
10825
10826[database]
10827{%- set _data = compute.database %}
10828{%- if _data.ssl is defined and 'cacert_file' not in _data.get('ssl', {}).keys() %}{% do _data['ssl'].update({'cacert_file': compute.cacert_file}) %}{% endif %}
10829{%- include "oslo_templates/files/queens/oslo/_database.conf" %}
10830
10831[oslo_middleware]
10832{%- set _data = compute %}
10833{%- include "oslo_templates/files/queens/oslo/_middleware.conf" %}
10834
10835[keystone_authtoken]
10836{%- set _data = compute.identity %}
Mykyta Karpinb3bc4512018-06-14 22:06:17 +030010837{%- if 'cacert_file' not in _data.keys() %}{% do _data.update({'cacert_file': compute.cacert_file}) %}{% endif %}
Oleh Hryhorov5d0f13a2018-05-21 17:02:54 +000010838{%- set auth_type = _data.get('auth_type', 'password') %}
10839{%- include "oslo_templates/files/queens/keystonemiddleware/_auth_token.conf" %}
10840{%- include "oslo_templates/files/queens/keystoneauth/_type_" + auth_type + ".conf" %}
10841