blob: ea48184f25365a944c2584d67d0f27c1328c5b12 [file] [log] [blame]
Vasyl Saienko8c36b422018-07-24 09:38:00 +03001{%- from "nova/map.jinja" import compute,compute_driver_mapping with context %}
Oleh Hryhorov5d0f13a2018-05-21 17:02:54 +00002[DEFAULT]
3
4#
5# From nova.conf
6#
7compute_manager=nova.compute.manager.ComputeManager
8network_device_mtu=65000
9use_neutron = True
10security_group_api=neutron
11image_service=nova.image.glance.GlanceImageService
12
13#
14# Availability zone for internal services.
15#
16# This option determines the availability zone for the various
17# internal nova
18# services, such as 'nova-scheduler', 'nova-conductor', etc.
19#
20# Possible values:
21#
22# * Any string representing an existing availability zone name.
23# (string value)
24#internal_service_availability_zone = internal
25
26#
27# Default availability zone for compute services.
28#
29# This option determines the default availability zone for 'nova-
30# compute'
31# services, which will be used if the service(s) do not belong to
32# aggregates with
33# availability zone metadata.
34#
35# Possible values:
36#
37# * Any string representing an existing availability zone name.
38# (string value)
39#default_availability_zone = nova
40
41#
42# Default availability zone for instances.
43#
44# This option determines the default availability zone for instances,
45# which will
46# be used when a user does not specify one when creating an instance.
47# The
48# instance(s) will be bound to this availability zone for their
49# lifetime.
50#
51# Possible values:
52#
53# * Any string representing an existing availability zone name.
54# * None, which means that the instance can move from one availability
55# zone to
56# another during its lifetime if it is moved from one compute node
57# to another.
58# (string value)
59#default_schedule_zone = <None>
60
61# Length of generated instance admin passwords. (integer value)
62# Minimum value: 0
63#password_length = 12
64
65#
66# Time period to generate instance usages for. It is possible to
67# define optional
68# offset to given period by appending @ character followed by a number
69# defining
70# offset.
71#
72# Possible values:
73#
74# * period, example: ``hour``, ``day``, ``month` or ``year``
75# * period with offset, example: ``month@15`` will result in monthly
76# audits
77# starting on 15th day of month.
78# (string value)
79#instance_usage_audit_period = month
80{% if pillar.ceilometer is defined %}
81instance_usage_audit = True
82instance_usage_audit_period = hour
83{%- endif %}
84
85#
86# Start and use a daemon that can run the commands that need to be run
87# with
88# root privileges. This option is usually enabled on nodes that run
89# nova compute
90# processes.
91# (boolean value)
92#use_rootwrap_daemon = false
93
94#
95# Path to the rootwrap configuration file.
96#
97# Goal of the root wrapper is to allow a service-specific unprivileged
98# user to
99# run a number of actions as the root user in the safest manner
100# possible.
101# The configuration file used here must match the one defined in the
102# sudoers
103# entry.
104# (string value)
105rootwrap_config = /etc/nova/rootwrap.conf
106
107# Explicitly specify the temporary working directory. (string value)
108#tempdir = <None>
109
110# DEPRECATED:
111# Determine if monkey patching should be applied.
112#
113# Related options:
114#
115# * ``monkey_patch_modules``: This must have values set for this
116# option to
117# have any effect
118# (boolean value)
119# This option is deprecated for removal since 17.0.0.
120# Its value may be silently ignored in the future.
121# Reason:
122# Monkey patching nova is not tested, not supported, and is a barrier
123# for interoperability.
124#monkey_patch = false
125
126# DEPRECATED:
127# List of modules/decorators to monkey patch.
128#
129# This option allows you to patch a decorator for all functions in
130# specified
131# modules.
132#
133# Possible values:
134#
135# * nova.compute.api:nova.notifications.notify_decorator
136# * [...]
137#
138# Related options:
139#
140# * ``monkey_patch``: This must be set to ``True`` for this option to
141# have any effect
142# (list value)
143# This option is deprecated for removal since 17.0.0.
144# Its value may be silently ignored in the future.
145# Reason:
146# Monkey patching nova is not tested, not supported, and is a barrier
147# for interoperability.
148#monkey_patch_modules = nova.compute.api:nova.notifications.notify_decorator
149
150#
151# Defines which driver to use for controlling virtualization.
152#
153# Possible values:
154#
155# * ``libvirt.LibvirtDriver``
156# * ``xenapi.XenAPIDriver``
157# * ``fake.FakeDriver``
158# * ``ironic.IronicDriver``
159# * ``vmwareapi.VMwareVCDriver``
160# * ``hyperv.HyperVDriver``
161# * ``powervm.PowerVMDriver``
162# (string value)
163#compute_driver = <None>
164compute_driver = {{ compute.get('compute_driver', 'libvirt.LibvirtDriver') }}
165
166#
167# Allow destination machine to match source for resize. Useful when
168# testing in single-host environments. By default it is not allowed
169# to resize to the same host. Setting this option to true will add
170# the same host to the destination options. Also set to true
171# if you allow the ServerGroupAffinityFilter and need to resize.
172# (boolean value)
173#allow_resize_to_same_host = false
174allow_resize_to_same_host = true
175
176#
177# Image properties that should not be inherited from the instance
178# when taking a snapshot.
179#
180# This option gives an opportunity to select which image-properties
181# should not be inherited by newly created snapshots.
182#
183# Possible values:
184#
185# * A comma-separated list whose item is an image property. Usually
186# only
187# the image properties that are only needed by base images can be
188# included
189# here, since the snapshots that are created from the base images
190# don't
191# need them.
192# * Default list: cache_in_nova, bittorrent,
193# img_signature_hash_method,
194# img_signature, img_signature_key_type,
195# img_signature_certificate_uuid
196#
197# (list value)
198#non_inheritable_image_properties = cache_in_nova,bittorrent,img_signature_hash_method,img_signature,img_signature_key_type,img_signature_certificate_uuid
199
200# DEPRECATED:
201# When creating multiple instances with a single request using the
202# os-multiple-create API extension, this template will be used to
203# build
204# the display name for each instance. The benefit is that the
205# instances
206# end up with different hostnames. Example display names when creating
207# two VM's: name-1, name-2.
208#
209# Possible values:
210#
211# * Valid keys for the template are: name, uuid, count.
212# (string value)
213# This option is deprecated for removal since 15.0.0.
214# Its value may be silently ignored in the future.
215# Reason:
216# This config changes API behaviour. All changes in API behaviour
217# should be
218# discoverable.
219#multi_instance_display_name_template = %(name)s-%(count)d
220
221#
222# Maximum number of devices that will result in a local image being
223# created on the hypervisor node.
224#
225# A negative number means unlimited. Setting max_local_block_devices
226# to 0 means that any request that attempts to create a local disk
227# will fail. This option is meant to limit the number of local discs
228# (so root local disc that is the result of --image being used, and
229# any other ephemeral and swap disks). 0 does not mean that images
230# will be automatically converted to volumes and boot instances from
231# volumes - it just means that all requests that attempt to create a
232# local disk will fail.
233#
234# Possible values:
235#
236# * 0: Creating a local disk is not allowed.
237# * Negative number: Allows unlimited number of local discs.
238# * Positive number: Allows only these many number of local discs.
239# (Default value is 3).
240# (integer value)
241#max_local_block_devices = 3
242
243#
244# A comma-separated list of monitors that can be used for getting
245# compute metrics. You can use the alias/name from the setuptools
246# entry points for nova.compute.monitors.* namespaces. If no
247# namespace is supplied, the "cpu." namespace is assumed for
248# backwards-compatibility.
249#
250# NOTE: Only one monitor per namespace (For example: cpu) can be
251# loaded at
252# a time.
253#
254# Possible values:
255#
256# * An empty list will disable the feature (Default).
257# * An example value that would enable both the CPU and NUMA memory
258# bandwidth monitors that use the virt driver variant:
259#
260# compute_monitors = cpu.virt_driver, numa_mem_bw.virt_driver
261# (list value)
262#compute_monitors =
263
264#
265# The default format an ephemeral_volume will be formatted with on
266# creation.
267#
268# Possible values:
269#
270# * ``ext2``
271# * ``ext3``
272# * ``ext4``
273# * ``xfs``
274# * ``ntfs`` (only for Windows guests)
275# (string value)
276#default_ephemeral_format = <None>
277
278#
279# Determine if instance should boot or fail on VIF plugging timeout.
280#
281# Nova sends a port update to Neutron after an instance has been
282# scheduled,
283# providing Neutron with the necessary information to finish setup of
284# the port.
285# Once completed, Neutron notifies Nova that it has finished setting
286# up the
287# port, at which point Nova resumes the boot of the instance since
288# network
289# connectivity is now supposed to be present. A timeout will occur if
290# the reply
291# is not received after a given interval.
292#
293# This option determines what Nova does when the VIF plugging timeout
294# event
295# happens. When enabled, the instance will error out. When disabled,
296# the
297# instance will continue to boot on the assumption that the port is
298# ready.
299#
300# Possible values:
301#
302# * True: Instances should fail after VIF plugging timeout
303# * False: Instances should continue booting after VIF plugging
304# timeout
305# (boolean value)
306vif_plugging_is_fatal = {{ compute.get('vif_plugging_is_fatal', 'true') }}
307
308#
309# Timeout for Neutron VIF plugging event message arrival.
310#
311# Number of seconds to wait for Neutron vif plugging events to
312# arrive before continuing or failing (see 'vif_plugging_is_fatal').
313#
314# Related options:
315#
316# * vif_plugging_is_fatal - If ``vif_plugging_timeout`` is set to zero
317# and
318# ``vif_plugging_is_fatal`` is False, events should not be expected
319# to
320# arrive at all.
321# (integer value)
322# Minimum value: 0
323vif_plugging_timeout = {{ compute.get('vif_plugging_timeout', '300') }}
324
325# Path to '/etc/network/interfaces' template.
326#
327# The path to a template file for the '/etc/network/interfaces'-style
328# file, which
329# will be populated by nova and subsequently used by cloudinit. This
330# provides a
331# method to configure network connectivity in environments without a
332# DHCP server.
333#
334# The template will be rendered using Jinja2 template engine, and
335# receive a
336# top-level key called ``interfaces``. This key will contain a list of
337# dictionaries, one for each interface.
338#
339# Refer to the cloudinit documentaion for more information:
340#
341# https://cloudinit.readthedocs.io/en/latest/topics/datasources.html
342#
343# Possible values:
344#
345# * A path to a Jinja2-formatted template for a Debian
346# '/etc/network/interfaces'
347# file. This applies even if using a non Debian-derived guest.
348#
349# Related options:
350#
351# * ``flat_inject``: This must be set to ``True`` to ensure nova
352# embeds network
353# configuration information in the metadata provided through the
354# config drive.
355# (string value)
356#injected_network_template = $pybasedir/nova/virt/interfaces.template
357
358#
359# The image preallocation mode to use.
360#
361# Image preallocation allows storage for instance images to be
362# allocated up front
363# when the instance is initially provisioned. This ensures immediate
364# feedback is
365# given if enough space isn't available. In addition, it should
366# significantly
367# improve performance on writes to new blocks and may even improve I/O
368# performance to prewritten blocks due to reduced fragmentation.
369#
370# Possible values:
371#
372# * "none" => no storage provisioning is done up front
373# * "space" => storage is fully allocated at instance start
374# (string value)
375# Possible values:
376# none - <No description provided>
377# space - <No description provided>
378#preallocate_images = none
Michael Polenchuk159c2542018-06-09 15:31:51 +0400379{%- if compute.preallocate_images is defined %}
380preallocate_images = {{ compute.preallocate_images }}
381{%- endif %}
Oleh Hryhorov5d0f13a2018-05-21 17:02:54 +0000382
383#
384# Enable use of copy-on-write (cow) images.
385#
386# QEMU/KVM allow the use of qcow2 as backing files. By disabling this,
387# backing files will not be used.
388# (boolean value)
389#use_cow_images = true
390{%- if compute.image is defined and compute.image.use_cow is defined %}
391use_cow_images = {{ compute.image.use_cow }}
392{%- endif %}
393
394#
395# Force conversion of backing images to raw format.
396#
397# Possible values:
398#
399# * True: Backing image files will be converted to raw image format
400# * False: Backing image files will not be converted
401#
402# Related options:
403#
404# * ``compute_driver``: Only the libvirt driver uses this option.
405# (boolean value)
406#force_raw_images = true
407force_raw_images={{ compute.get('image', {}).get('force_raw', True)|lower }}
408
409#
410# Name of the mkfs commands for ephemeral device.
411#
412# The format is <os_type>=<mkfs command>
413# (multi valued)
414#virt_mkfs =
415
416#
417# Enable resizing of filesystems via a block device.
418#
419# If enabled, attempt to resize the filesystem by accessing the image
420# over a
421# block device. This is done by the host and may not be necessary if
422# the image
423# contains a recent version of cloud-init. Possible mechanisms require
424# the nbd
425# driver (for qcow and raw), or loop (for raw).
426# (boolean value)
427#resize_fs_using_block_device = false
428
429# Amount of time, in seconds, to wait for NBD device start up.
430# (integer value)
431# Minimum value: 0
432#timeout_nbd = 10
Mykyta Karpin5ef9f982019-02-07 18:40:00 +0200433{%- if compute.timeout_nbd is defined %}
434timeout_nbd = {{ compute.timeout_nbd }}
435{%- endif %}
Oleh Hryhorov5d0f13a2018-05-21 17:02:54 +0000436
437#
438# Location of cached images.
439#
440# This is NOT the full path - just a folder name relative to
441# '$instances_path'.
442# For per-compute-host cached images, set to '_base_$my_ip'
443# (string value)
444#image_cache_subdirectory_name = _base
445
446# Should unused base images be removed? (boolean value)
447#remove_unused_base_images = true
448
449#
450# Unused unresized base images younger than this will not be removed.
451# (integer value)
452remove_unused_original_minimum_age_seconds = 86400
453
454#
455# Generic property to specify the pointer type.
456#
457# Input devices allow interaction with a graphical framebuffer. For
458# example to provide a graphic tablet for absolute cursor movement.
459#
460# If set, the 'hw_pointer_model' image property takes precedence over
461# this configuration option.
462#
463# Possible values:
464#
465# * None: Uses default behavior provided by drivers (mouse on PS2 for
466# libvirt x86)
467# * ps2mouse: Uses relative movement. Mouse connected by PS2
468# * usbtablet: Uses absolute movement. Tablet connect by USB
469#
470# Related options:
471#
472# * usbtablet must be configured with VNC enabled or SPICE enabled and
473# SPICE
474# agent disabled. When used with libvirt the instance mode should be
475# configured as HVM.
476# (string value)
477# Possible values:
478# <None> - <No description provided>
479# ps2mouse - <No description provided>
480# usbtablet - <No description provided>
481#pointer_model = usbtablet
482
483#
484# Defines which physical CPUs (pCPUs) can be used by instance
485# virtual CPUs (vCPUs).
486#
487# Possible values:
488#
489# * A comma-separated list of physical CPU numbers that virtual CPUs
490# can be
491# allocated to by default. Each element should be either a single
492# CPU number,
493# a range of CPU numbers, or a caret followed by a CPU number to be
494# excluded from a previous range. For example:
495#
496# vcpu_pin_set = "4-12,^8,15"
497# (string value)
498#vcpu_pin_set = <None>
499{%- if compute.vcpu_pin_set is defined %}
500vcpu_pin_set={{ compute.vcpu_pin_set }}
501{%- endif %}
502
503#
504# Number of huge/large memory pages to reserved per NUMA host cell.
505#
506# Possible values:
507#
508# * A list of valid key=value which reflect NUMA node ID, page size
509# (Default unit is KiB) and number of pages to be reserved.
510#
511# reserved_huge_pages = node:0,size:2048,count:64
512# reserved_huge_pages = node:1,size:1GB,count:1
513#
514# In this example we are reserving on NUMA node 0 64 pages of 2MiB
515# and on NUMA node 1 1 page of 1GiB.
516# (dict value)
517#reserved_huge_pages = <None>
518
519#
520# Amount of disk resources in MB to make them always available to
521# host. The
522# disk usage gets reported back to the scheduler from nova-compute
523# running
524# on the compute nodes. To prevent the disk resources from being
525# considered
526# as available, this option can be used to reserve disk space for that
527# host.
528#
529# Possible values:
530#
531# * Any positive integer representing amount of disk in MB to reserve
532# for the host.
533# (integer value)
534# Minimum value: 0
535#reserved_host_disk_mb = 0
536
537#
538# Amount of memory in MB to reserve for the host so that it is always
539# available
540# to host processes. The host resources usage is reported back to the
541# scheduler
542# continuously from nova-compute running on the compute node. To
543# prevent the host
544# memory from being considered as available, this option is used to
545# reserve
546# memory for the host.
547#
548# Possible values:
549#
550# * Any positive integer representing amount of memory in MB to
551# reserve
552# for the host.
553# (integer value)
554# Minimum value: 0
555#reserved_host_memory_mb = 512
556reserved_host_memory_mb = {{ compute.get('reserved_host_memory_mb', '512') }}
557
558#
559# Number of physical CPUs to reserve for the host. The host resources
560# usage is
561# reported back to the scheduler continuously from nova-compute
562# running on the
563# compute node. To prevent the host CPU from being considered as
564# available,
565# this option is used to reserve random pCPU(s) for the host.
566#
567# Possible values:
568#
569# * Any positive integer representing number of physical CPUs to
570# reserve
571# for the host.
572# (integer value)
573# Minimum value: 0
574#reserved_host_cpus = 0
575
576#
577# This option helps you specify virtual CPU to physical CPU allocation
578# ratio.
579#
580# From Ocata (15.0.0) this is used to influence the hosts selected by
581# the Placement API. Note that when Placement is used, the CoreFilter
582# is redundant, because the Placement API will have already filtered
583# out hosts that would have failed the CoreFilter.
584#
585# This configuration specifies ratio for CoreFilter which can be set
586# per compute node. For AggregateCoreFilter, it will fall back to this
587# configuration value if no per-aggregate setting is found.
588#
589# NOTE: This can be set per-compute, or if set to 0.0, the value
590# set on the scheduler node(s) or compute node(s) will be used
591# and defaulted to 16.0.
592#
593# NOTE: As of the 16.0.0 Pike release, this configuration option is
594# ignored
595# for the ironic.IronicDriver compute driver and is hardcoded to 1.0.
596#
597# Possible values:
598#
599# * Any valid positive integer or float value
600# (floating point value)
601# Minimum value: 0
602#cpu_allocation_ratio = 0.0
603{%- if compute.cpu_allocation_ratio is defined %}
604cpu_allocation_ratio = {{ compute.cpu_allocation_ratio }}
605{%- else %}
606#cpu_allocation_ratio=0.0
607{%- endif %}
608
609#
610# This option helps you specify virtual RAM to physical RAM
611# allocation ratio.
612#
613# From Ocata (15.0.0) this is used to influence the hosts selected by
614# the Placement API. Note that when Placement is used, the RamFilter
615# is redundant, because the Placement API will have already filtered
616# out hosts that would have failed the RamFilter.
617#
618# This configuration specifies ratio for RamFilter which can be set
619# per compute node. For AggregateRamFilter, it will fall back to this
620# configuration value if no per-aggregate setting found.
621#
622# NOTE: This can be set per-compute, or if set to 0.0, the value
623# set on the scheduler node(s) or compute node(s) will be used and
624# defaulted to 1.5.
625#
626# NOTE: As of the 16.0.0 Pike release, this configuration option is
627# ignored
628# for the ironic.IronicDriver compute driver and is hardcoded to 1.0.
629#
630# Possible values:
631#
632# * Any valid positive integer or float value
633# (floating point value)
634# Minimum value: 0
635#ram_allocation_ratio = 0.0
636{%- if compute.ram_allocation_ratio is defined %}
637ram_allocation_ratio = {{ compute.ram_allocation_ratio }}
638{%- else %}
639#ram_allocation_ratio=0.0
640{%- endif %}
641
642#
643# This option helps you specify virtual disk to physical disk
644# allocation ratio.
645#
646# From Ocata (15.0.0) this is used to influence the hosts selected by
647# the Placement API. Note that when Placement is used, the DiskFilter
648# is redundant, because the Placement API will have already filtered
649# out hosts that would have failed the DiskFilter.
650#
651# A ratio greater than 1.0 will result in over-subscription of the
652# available physical disk, which can be useful for more
653# efficiently packing instances created with images that do not
654# use the entire virtual disk, such as sparse or compressed
655# images. It can be set to a value between 0.0 and 1.0 in order
656# to preserve a percentage of the disk for uses other than
657# instances.
658#
659# NOTE: This can be set per-compute, or if set to 0.0, the value
660# set on the scheduler node(s) or compute node(s) will be used and
661# defaulted to 1.0.
662#
663# NOTE: As of the 16.0.0 Pike release, this configuration option is
664# ignored
665# for the ironic.IronicDriver compute driver and is hardcoded to 1.0.
666#
667# Possible values:
668#
669# * Any valid positive integer or float value
670# (floating point value)
671# Minimum value: 0
672#disk_allocation_ratio = 0.0
673
674#
675# Console proxy host to be used to connect to instances on this host.
676# It is the
677# publicly visible name for the console host.
678#
679# Possible values:
680#
681# * Current hostname (default) or any string representing hostname.
682# (string value)
683#console_host = <current_hostname>
684
685#
686# Name of the network to be used to set access IPs for instances. If
687# there are
688# multiple IPs to choose from, an arbitrary one will be chosen.
689#
690# Possible values:
691#
692# * None (default)
693# * Any string representing network name.
694# (string value)
695#default_access_ip_network_name = <None>
696
697#
698# Whether to batch up the application of IPTables rules during a host
699# restart
700# and apply all at the end of the init phase.
701# (boolean value)
702#defer_iptables_apply = false
703
704#
705# Specifies where instances are stored on the hypervisor's disk.
706# It can point to locally attached storage or a directory on NFS.
707#
708# Possible values:
709#
710# * $state_path/instances where state_path is a config option that
711# specifies
712# the top-level directory for maintaining nova's state. (default) or
713# Any string representing directory path.
714# (string value)
715instances_path = {{ compute.instances_path }}
716
717#
718# This option enables periodic compute.instance.exists notifications.
719# Each
720# compute node must be configured to generate system usage data. These
721# notifications are consumed by OpenStack Telemetry service.
722# (boolean value)
723#instance_usage_audit = false
724
725#
726# Maximum number of 1 second retries in live_migration. It specifies
727# number
728# of retries to iptables when it complains. It happens when an user
729# continuously
730# sends live-migration request to same host leading to concurrent
731# request
732# to iptables.
733#
734# Possible values:
735#
736# * Any positive integer representing retry count.
737# (integer value)
738# Minimum value: 0
739#live_migration_retry_count = 30
740
741#
742# This option specifies whether to start guests that were running
743# before the
744# host rebooted. It ensures that all of the instances on a Nova
745# compute node
746# resume their state each time the compute node boots or restarts.
747# (boolean value)
748resume_guests_state_on_host_boot = {{ compute.get('resume_guests_state_on_host_boot', True) }}
749
750#
751# Number of times to retry network allocation. It is required to
752# attempt network
753# allocation retries if the virtual interface plug fails.
754#
755# Possible values:
756#
757# * Any positive integer representing retry count.
758# (integer value)
759# Minimum value: 0
760#network_allocate_retries = 0
761
762#
763# Limits the maximum number of instance builds to run concurrently by
764# nova-compute. Compute service can attempt to build an infinite
765# number of
766# instances, if asked to do so. This limit is enforced to avoid
767# building
768# unlimited instance concurrently on a compute node. This value can be
769# set
770# per compute node.
771#
772# Possible Values:
773#
774# * 0 : treated as unlimited.
775# * Any positive integer representing maximum concurrent builds.
776# (integer value)
777# Minimum value: 0
778#max_concurrent_builds = 10
779
780#
781# Maximum number of live migrations to run concurrently. This limit is
782# enforced
783# to avoid outbound live migrations overwhelming the host/network and
784# causing
785# failures. It is not recommended that you change this unless you are
786# very sure
787# that doing so is safe and stable in your environment.
788#
789# Possible values:
790#
791# * 0 : treated as unlimited.
792# * Negative value defaults to 0.
793# * Any positive integer representing maximum number of live
794# migrations
795# to run concurrently.
796# (integer value)
797#max_concurrent_live_migrations = 1
798{%- if compute.max_concurrent_live_migrations is defined %}
799max_concurrent_live_migrations = {{ compute.max_concurrent_live_migrations }}
800{%- endif %}
801
802#
803# Number of times to retry block device allocation on failures.
804# Starting with
805# Liberty, Cinder can use image volume cache. This may help with block
806# device
807# allocation performance. Look at the cinder
808# image_volume_cache_enabled
809# configuration option.
810#
811# Possible values:
812#
813# * 60 (default)
814# * If value is 0, then one attempt is made.
815# * Any negative value is treated as 0.
816# * For any value > 0, total attempts are (value + 1)
817# (integer value)
818block_device_allocate_retries = {{ compute.get('block_device_allocate_retries', '600') }}
819
820#
821# Number of greenthreads available for use to sync power states.
822#
823# This option can be used to reduce the number of concurrent requests
824# made to the hypervisor or system with real instance power states
825# for performance reasons, for example, with Ironic.
826#
827# Possible values:
828#
829# * Any positive integer representing greenthreads count.
830# (integer value)
831#sync_power_state_pool_size = 1000
832
833#
834# Number of seconds to wait between runs of the image cache manager.
835#
836# Possible values:
837# * 0: run at the default rate.
838# * -1: disable
839# * Any other value
840# (integer value)
841# Minimum value: -1
842image_cache_manager_interval = 0
843
844#
845# Interval to pull network bandwidth usage info.
846#
847# Not supported on all hypervisors. If a hypervisor doesn't support
848# bandwidth
849# usage, it will not get the info in the usage events.
850#
851# Possible values:
852#
853# * 0: Will run at the default periodic interval.
854# * Any value < 0: Disables the option.
855# * Any positive integer in seconds.
856# (integer value)
857#bandwidth_poll_interval = 600
858
859#
860# Interval to sync power states between the database and the
861# hypervisor.
862#
863# The interval that Nova checks the actual virtual machine power state
864# and the power state that Nova has in its database. If a user powers
865# down their VM, Nova updates the API to report the VM has been
866# powered down. Should something turn on the VM unexpectedly,
867# Nova will turn the VM back off to keep the system in the expected
868# state.
869#
870# Possible values:
871#
872# * 0: Will run at the default periodic interval.
873# * Any value < 0: Disables the option.
874# * Any positive integer in seconds.
875#
876# Related options:
877#
878# * If ``handle_virt_lifecycle_events`` in workarounds_group is
879# false and this option is negative, then instances that get out
880# of sync between the hypervisor and the Nova database will have
881# to be synchronized manually.
882# (integer value)
883#sync_power_state_interval = 600
884
885#
886# Interval between instance network information cache updates.
887#
888# Number of seconds after which each compute node runs the task of
889# querying Neutron for all of its instances networking information,
890# then updates the Nova db with that information. Nova will never
891# update it's cache if this option is set to 0. If we don't update the
892# cache, the metadata service and nova-api endpoints will be proxying
893# incorrect network data about the instance. So, it is not recommended
894# to set this option to 0.
895#
896# Possible values:
897#
898# * Any positive integer in seconds.
899# * Any value <=0 will disable the sync. This is not recommended.
900# (integer value)
901#heal_instance_info_cache_interval = 60
902heal_instance_info_cache_interval = {{ compute.heal_instance_info_cache_interval }}
903
904#
905# Interval for reclaiming deleted instances.
906#
907# A value greater than 0 will enable SOFT_DELETE of instances.
908# This option decides whether the server to be deleted will be put
909# into
910# the SOFT_DELETED state. If this value is greater than 0, the deleted
911# server will not be deleted immediately, instead it will be put into
912# a queue until it's too old (deleted time greater than the value of
913# reclaim_instance_interval). The server can be recovered from the
914# delete queue by using the restore action. If the deleted server
915# remains
916# longer than the value of reclaim_instance_interval, it will be
917# deleted by a periodic task in the compute service automatically.
918#
919# Note that this option is read from both the API and compute nodes,
920# and
921# must be set globally otherwise servers could be put into a soft
922# deleted
923# state in the API and never actually reclaimed (deleted) on the
924# compute
925# node.
926#
927# Possible values:
928#
929# * Any positive integer(in seconds) greater than 0 will enable
930# this option.
931# * Any value <=0 will disable the option.
932# (integer value)
933#reclaim_instance_interval = 0
Machi Hoshinoe2b3cb32018-12-12 10:14:23 +0800934{%- if compute.reclaim_instance_interval is defined %}
935reclaim_instance_interval = {{ compute.reclaim_instance_interval }}
936{%- endif %}
Oleh Hryhorov5d0f13a2018-05-21 17:02:54 +0000937
938#
939# Interval for gathering volume usages.
940#
941# This option updates the volume usage cache for every
942# volume_usage_poll_interval number of seconds.
943#
944# Possible values:
945#
946# * Any positive integer(in seconds) greater than 0 will enable
947# this option.
948# * Any value <=0 will disable the option.
949# (integer value)
950#volume_usage_poll_interval = 0
951
952#
953# Interval for polling shelved instances to offload.
954#
955# The periodic task runs for every shelved_poll_interval number
956# of seconds and checks if there are any shelved instances. If it
957# finds a shelved instance, based on the 'shelved_offload_time' config
958# value it offloads the shelved instances. Check
959# 'shelved_offload_time'
960# config option description for details.
961#
962# Possible values:
963#
964# * Any value <= 0: Disables the option.
965# * Any positive integer in seconds.
966#
967# Related options:
968#
969# * ``shelved_offload_time``
970# (integer value)
971#shelved_poll_interval = 3600
972
973#
974# Time before a shelved instance is eligible for removal from a host.
975#
976# By default this option is set to 0 and the shelved instance will be
977# removed from the hypervisor immediately after shelve operation.
978# Otherwise, the instance will be kept for the value of
979# shelved_offload_time(in seconds) so that during the time period the
980# unshelve action will be faster, then the periodic task will remove
981# the instance from hypervisor after shelved_offload_time passes.
982#
983# Possible values:
984#
985# * 0: Instance will be immediately offloaded after being
986# shelved.
987# * Any value < 0: An instance will never offload.
988# * Any positive integer in seconds: The instance will exist for
989# the specified number of seconds before being offloaded.
990# (integer value)
991#shelved_offload_time = 0
992
993#
994# Interval for retrying failed instance file deletes.
995#
996# This option depends on 'maximum_instance_delete_attempts'.
997# This option specifies how often to retry deletes whereas
998# 'maximum_instance_delete_attempts' specifies the maximum number
999# of retry attempts that can be made.
1000#
1001# Possible values:
1002#
1003# * 0: Will run at the default periodic interval.
1004# * Any value < 0: Disables the option.
1005# * Any positive integer in seconds.
1006#
1007# Related options:
1008#
1009# * ``maximum_instance_delete_attempts`` from instance_cleaning_opts
1010# group.
1011# (integer value)
1012#instance_delete_interval = 300
1013
1014#
1015# Interval (in seconds) between block device allocation retries on
1016# failures.
1017#
1018# This option allows the user to specify the time interval between
1019# consecutive retries. 'block_device_allocate_retries' option
1020# specifies
1021# the maximum number of retries.
1022#
1023# Possible values:
1024#
1025# * 0: Disables the option.
1026# * Any positive integer in seconds enables the option.
1027#
1028# Related options:
1029#
1030# * ``block_device_allocate_retries`` in compute_manager_opts group.
1031# (integer value)
1032# Minimum value: 0
1033block_device_allocate_retries_interval = {{ compute.get('block_device_allocate_retries_interval', '10') }}
1034
1035#
1036# Interval between sending the scheduler a list of current instance
1037# UUIDs to
1038# verify that its view of instances is in sync with nova.
1039#
1040# If the CONF option 'scheduler_tracks_instance_changes' is
1041# False, the sync calls will not be made. So, changing this option
1042# will
1043# have no effect.
1044#
1045# If the out of sync situations are not very common, this interval
1046# can be increased to lower the number of RPC messages being sent.
1047# Likewise, if sync issues turn out to be a problem, the interval
1048# can be lowered to check more frequently.
1049#
1050# Possible values:
1051#
1052# * 0: Will run at the default periodic interval.
1053# * Any value < 0: Disables the option.
1054# * Any positive integer in seconds.
1055#
1056# Related options:
1057#
1058# * This option has no impact if ``scheduler_tracks_instance_changes``
1059# is set to False.
1060# (integer value)
1061#scheduler_instance_sync_interval = 120
1062
1063#
1064# Interval for updating compute resources.
1065#
1066# This option specifies how often the update_available_resources
1067# periodic task should run. A number less than 0 means to disable the
1068# task completely. Leaving this at the default of 0 will cause this to
1069# run at the default periodic interval. Setting it to any positive
1070# value will cause it to run at approximately that number of seconds.
1071#
1072# Possible values:
1073#
1074# * 0: Will run at the default periodic interval.
1075# * Any value < 0: Disables the option.
1076# * Any positive integer in seconds.
1077# (integer value)
1078#update_resources_interval = 0
1079
1080#
1081# Time interval after which an instance is hard rebooted
1082# automatically.
1083#
1084# When doing a soft reboot, it is possible that a guest kernel is
1085# completely hung in a way that causes the soft reboot task
1086# to not ever finish. Setting this option to a time period in seconds
1087# will automatically hard reboot an instance if it has been stuck
1088# in a rebooting state longer than N seconds.
1089#
1090# Possible values:
1091#
1092# * 0: Disables the option (default).
1093# * Any positive integer in seconds: Enables the option.
1094# (integer value)
1095# Minimum value: 0
1096#reboot_timeout = 0
1097
1098#
1099# Maximum time in seconds that an instance can take to build.
1100#
1101# If this timer expires, instance status will be changed to ERROR.
1102# Enabling this option will make sure an instance will not be stuck
1103# in BUILD state for a longer period.
1104#
1105# Possible values:
1106#
1107# * 0: Disables the option (default)
1108# * Any positive integer in seconds: Enables the option.
1109# (integer value)
1110# Minimum value: 0
Vasyl Saienko2adac3f2019-02-18 12:32:52 +02001111{%- if compute.instance_build_timeout is defined %}
1112instance_build_timeout = {{ compute.instance_build_timeout }}
1113{%- else %}
Oleh Hryhorov5d0f13a2018-05-21 17:02:54 +00001114#instance_build_timeout = 0
Vasyl Saienko2adac3f2019-02-18 12:32:52 +02001115{%- endif %}
Oleh Hryhorov5d0f13a2018-05-21 17:02:54 +00001116
1117#
1118# Interval to wait before un-rescuing an instance stuck in RESCUE.
1119#
1120# Possible values:
1121#
1122# * 0: Disables the option (default)
1123# * Any positive integer in seconds: Enables the option.
1124# (integer value)
1125# Minimum value: 0
1126#rescue_timeout = 0
1127
1128#
1129# Automatically confirm resizes after N seconds.
1130#
1131# Resize functionality will save the existing server before resizing.
1132# After the resize completes, user is requested to confirm the resize.
1133# The user has the opportunity to either confirm or revert all
1134# changes. Confirm resize removes the original server and changes
1135# server status from resized to active. Setting this option to a time
1136# period (in seconds) will automatically confirm the resize if the
1137# server is in resized state longer than that time.
1138#
1139# Possible values:
1140#
1141# * 0: Disables the option (default)
1142# * Any positive integer in seconds: Enables the option.
1143# (integer value)
1144# Minimum value: 0
1145#resize_confirm_window = 0
1146
1147#
1148# Total time to wait in seconds for an instance toperform a clean
1149# shutdown.
1150#
1151# It determines the overall period (in seconds) a VM is allowed to
1152# perform a clean shutdown. While performing stop, rescue and shelve,
1153# rebuild operations, configuring this option gives the VM a chance
1154# to perform a controlled shutdown before the instance is powered off.
1155# The default timeout is 60 seconds.
1156#
1157# The timeout value can be overridden on a per image basis by means
1158# of os_shutdown_timeout that is an image metadata setting allowing
1159# different types of operating systems to specify how much time they
1160# need to shut down cleanly.
1161#
1162# Possible values:
1163#
1164# * Any positive integer in seconds (default value is 60).
1165# (integer value)
1166# Minimum value: 1
1167#shutdown_timeout = 60
1168
1169#
1170# The compute service periodically checks for instances that have been
1171# deleted in the database but remain running on the compute node. The
1172# above option enables action to be taken when such instances are
1173# identified.
1174#
1175# Possible values:
1176#
1177# * reap: Powers down the instances and deletes them(default)
1178# * log: Logs warning message about deletion of the resource
1179# * shutdown: Powers down instances and marks them as non-
1180# bootable which can be later used for debugging/analysis
1181# * noop: Takes no action
1182#
1183# Related options:
1184#
1185# * running_deleted_instance_poll_interval
1186# * running_deleted_instance_timeout
1187# (string value)
1188# Possible values:
1189# noop - <No description provided>
1190# log - <No description provided>
1191# shutdown - <No description provided>
1192# reap - <No description provided>
1193#running_deleted_instance_action = reap
1194
1195#
1196# Time interval in seconds to wait between runs for the clean up
1197# action.
1198# If set to 0, above check will be disabled. If
1199# "running_deleted_instance
1200# _action" is set to "log" or "reap", a value greater than 0 must be
1201# set.
1202#
1203# Possible values:
1204#
1205# * Any positive integer in seconds enables the option.
1206# * 0: Disables the option.
1207# * 1800: Default value.
1208#
1209# Related options:
1210#
1211# * running_deleted_instance_action
1212# (integer value)
1213#running_deleted_instance_poll_interval = 1800
1214
1215#
1216# Time interval in seconds to wait for the instances that have
1217# been marked as deleted in database to be eligible for cleanup.
1218#
1219# Possible values:
1220#
1221# * Any positive integer in seconds(default is 0).
1222#
1223# Related options:
1224#
1225# * "running_deleted_instance_action"
1226# (integer value)
1227#running_deleted_instance_timeout = 0
1228
1229#
1230# The number of times to attempt to reap an instance's files.
1231#
1232# This option specifies the maximum number of retry attempts
1233# that can be made.
1234#
1235# Possible values:
1236#
1237# * Any positive integer defines how many attempts are made.
1238# * Any value <=0 means no delete attempts occur, but you should use
1239# ``instance_delete_interval`` to disable the delete attempts.
1240#
1241# Related options:
1242# * ``instance_delete_interval`` in interval_opts group can be used to
1243# disable
1244# this option.
1245# (integer value)
1246#maximum_instance_delete_attempts = 5
1247
1248#
1249# Sets the scope of the check for unique instance names.
1250#
1251# The default doesn't check for unique names. If a scope for the name
1252# check is
1253# set, a launch of a new instance or an update of an existing instance
1254# with a
1255# duplicate name will result in an ''InstanceExists'' error. The
1256# uniqueness is
1257# case-insensitive. Setting this option can increase the usability for
1258# end
1259# users as they don't have to distinguish among instances with the
1260# same name
1261# by their IDs.
1262#
1263# Possible values:
1264#
1265# * '': An empty value means that no uniqueness check is done and
1266# duplicate
1267# names are possible.
1268# * "project": The instance name check is done only for instances
1269# within the
1270# same project.
1271# * "global": The instance name check is done for all instances
1272# regardless of
1273# the project.
1274# (string value)
1275# Possible values:
1276# '' - <No description provided>
1277# project - <No description provided>
1278# global - <No description provided>
1279#osapi_compute_unique_server_name_scope =
1280
1281#
1282# Enable new nova-compute services on this host automatically.
1283#
1284# When a new nova-compute service starts up, it gets
1285# registered in the database as an enabled service. Sometimes it can
1286# be useful
1287# to register new compute services in disabled state and then enabled
1288# them at a
1289# later point in time. This option only sets this behavior for nova-
1290# compute
1291# services, it does not auto-disable other services like nova-
1292# conductor,
1293# nova-scheduler, nova-consoleauth, or nova-osapi_compute.
1294#
1295# Possible values:
1296#
1297# * ``True``: Each new compute service is enabled as soon as it
1298# registers itself.
1299# * ``False``: Compute services must be enabled via an os-services
1300# REST API call
1301# or with the CLI with ``nova service-enable <hostname> <binary>``,
1302# otherwise
1303# they are not ready to use.
1304# (boolean value)
1305#enable_new_services = true
1306
1307#
1308# Template string to be used to generate instance names.
1309#
1310# This template controls the creation of the database name of an
1311# instance. This
1312# is *not* the display name you enter when creating an instance (via
1313# Horizon
1314# or CLI). For a new deployment it is advisable to change the default
1315# value
1316# (which uses the database autoincrement) to another value which makes
1317# use
1318# of the attributes of an instance, like ``instance-%(uuid)s``. If you
1319# already have instances in your deployment when you change this, your
1320# deployment will break.
1321#
1322# Possible values:
1323#
1324# * A string which either uses the instance database ID (like the
1325# default)
1326# * A string with a list of named database columns, for example
1327# ``%(id)d``
1328# or ``%(uuid)s`` or ``%(hostname)s``.
1329#
1330# Related options:
1331#
1332# * not to be confused with: ``multi_instance_display_name_template``
1333# (string value)
1334#instance_name_template = instance-%08x
1335
1336#
1337# Number of times to retry live-migration before failing.
1338#
1339# Possible values:
1340#
1341# * If == -1, try until out of hosts (default)
1342# * If == 0, only try once, no retries
1343# * Integer greater than 0
1344# (integer value)
1345# Minimum value: -1
1346#migrate_max_retries = -1
1347
1348#
1349# Configuration drive format
1350#
1351# Configuration drive format that will contain metadata attached to
1352# the
1353# instance when it boots.
1354#
1355# Possible values:
1356#
1357# * iso9660: A file system image standard that is widely supported
1358# across
1359# operating systems. NOTE: Mind the libvirt bug
1360# (https://bugs.launchpad.net/nova/+bug/1246201) - If your
1361# hypervisor
1362# driver is libvirt, and you want live migrate to work without
1363# shared storage,
1364# then use VFAT.
1365# * vfat: For legacy reasons, you can configure the configuration
1366# drive to
1367# use VFAT format instead of ISO 9660.
1368#
1369# Related options:
1370#
1371# * This option is meaningful when one of the following alternatives
1372# occur:
1373# 1. force_config_drive option set to 'true'
1374# 2. the REST API call to create the instance contains an enable
1375# flag for
1376# config drive option
1377# 3. the image used to create the instance requires a config drive,
1378# this is defined by img_config_drive property for that image.
1379# * A compute node running Hyper-V hypervisor can be configured to
1380# attach
1381# configuration drive as a CD drive. To attach the configuration
1382# drive as a CD
1383# drive, set config_drive_cdrom option at hyperv section, to true.
1384# (string value)
1385# Possible values:
1386# iso9660 - <No description provided>
1387# vfat - <No description provided>
1388#config_drive_format = iso9660
1389config_drive_format={{ compute.get('config_drive_format', compute.get('config_drive', {}).get('format', 'vfat')) }}
1390
1391#
1392# Force injection to take place on a config drive
1393#
1394# When this option is set to true configuration drive functionality
1395# will be
1396# forced enabled by default, otherwise user can still enable
1397# configuration
1398# drives via the REST API or image metadata properties.
1399#
1400# Possible values:
1401#
1402# * True: Force to use of configuration drive regardless the user's
1403# input in the
1404# REST API call.
1405# * False: Do not force use of configuration drive. Config drives can
1406# still be
1407# enabled via the REST API or image metadata properties.
1408#
1409# Related options:
1410#
1411# * Use the 'mkisofs_cmd' flag to set the path where you install the
1412# genisoimage program. If genisoimage is in same path as the
1413# nova-compute service, you do not need to set this flag.
1414# * To use configuration drive with Hyper-V, you must set the
1415# 'mkisofs_cmd' value to the full path to an mkisofs.exe
1416# installation.
1417# Additionally, you must set the qemu_img_cmd value in the hyperv
1418# configuration section to the full path to an qemu-img command
1419# installation.
1420# (boolean value)
1421#force_config_drive = false
1422force_config_drive={{ compute.get('config_drive', {}).get('forced', True)|lower }}
1423
1424#
1425# Name or path of the tool used for ISO image creation
1426#
1427# Use the mkisofs_cmd flag to set the path where you install the
1428# genisoimage
1429# program. If genisoimage is on the system path, you do not need to
1430# change
1431# the default value.
1432#
1433# To use configuration drive with Hyper-V, you must set the
1434# mkisofs_cmd value
1435# to the full path to an mkisofs.exe installation. Additionally, you
1436# must set
1437# the qemu_img_cmd value in the hyperv configuration section to the
1438# full path
1439# to an qemu-img command installation.
1440#
1441# Possible values:
1442#
1443# * Name of the ISO image creator program, in case it is in the same
1444# directory
1445# as the nova-compute service
1446# * Path to ISO image creator program
1447#
1448# Related options:
1449#
1450# * This option is meaningful when config drives are enabled.
1451# * To use configuration drive with Hyper-V, you must set the
1452# qemu_img_cmd
1453# value in the hyperv configuration section to the full path to an
1454# qemu-img
1455# command installation.
1456# (string value)
1457#mkisofs_cmd = genisoimage
1458
1459# DEPRECATED: The driver to use for database access (string value)
1460# This option is deprecated for removal since 13.0.0.
1461# Its value may be silently ignored in the future.
1462#db_driver = nova.db
1463
1464# DEPRECATED:
1465# Default flavor to use for the EC2 API only.
1466# The Nova API does not support a default flavor.
1467# (string value)
1468# This option is deprecated for removal since 14.0.0.
1469# Its value may be silently ignored in the future.
1470# Reason: The EC2 API is deprecated.
1471#default_flavor = m1.small
1472
1473#
1474# The IP address which the host is using to connect to the management
1475# network.
1476#
1477# Possible values:
1478#
1479# * String with valid IP address. Default is IPv4 address of this
1480# host.
1481#
1482# Related options:
1483#
1484# * metadata_host
1485# * my_block_storage_ip
1486# * routing_source_ip
1487# * vpn_ip
1488# (string value)
1489#my_ip = <host_ipv4>
1490{%- if compute.my_ip is defined %}
1491my_ip={{ compute.my_ip }}
1492{%- endif %}
1493
1494#
1495# The IP address which is used to connect to the block storage
1496# network.
1497#
1498# Possible values:
1499#
1500# * String with valid IP address. Default is IP address of this host.
1501#
1502# Related options:
1503#
1504# * my_ip - if my_block_storage_ip is not set, then my_ip value is
1505# used.
1506# (string value)
1507#my_block_storage_ip = $my_ip
1508
1509#
1510# Hostname, FQDN or IP address of this host.
1511#
1512# Used as:
1513#
1514# * the oslo.messaging queue name for nova-compute worker
1515# * we use this value for the binding_host sent to neutron. This means
1516# if you use
1517# a neutron agent, it should have the same value for host.
1518# * cinder host attachment information
1519#
1520# Must be valid within AMQP key.
1521#
1522# Possible values:
1523#
1524# * String with hostname, FQDN or IP address. Default is hostname of
1525# this host.
1526# (string value)
1527#host = <current_hostname>
1528{%- if compute.host is defined %}
1529host={{ compute.host }}
1530{%- endif %}
1531
1532# DEPRECATED:
1533# This option is a list of full paths to one or more configuration
1534# files for
1535# dhcpbridge. In most cases the default path of '/etc/nova/nova-
1536# dhcpbridge.conf'
1537# should be sufficient, but if you have special needs for configuring
1538# dhcpbridge,
1539# you can change or add to this list.
1540#
1541# Possible values
1542#
1543# * A list of strings, where each string is the full path to a
1544# dhcpbridge
1545# configuration file.
1546# (multi valued)
1547# This option is deprecated for removal since 16.0.0.
1548# Its value may be silently ignored in the future.
1549# Reason:
1550# nova-network is deprecated, as are any related configuration
1551# options.
1552#dhcpbridge_flagfile = /etc/nova/nova.conf
1553
1554# DEPRECATED:
1555# The location where the network configuration files will be kept. The
1556# default is
1557# the 'networks' directory off of the location where nova's Python
1558# module is
1559# installed.
1560#
1561# Possible values
1562#
1563# * A string containing the full path to the desired configuration
1564# directory
1565# (string value)
1566# This option is deprecated for removal since 16.0.0.
1567# Its value may be silently ignored in the future.
1568# Reason:
1569# nova-network is deprecated, as are any related configuration
1570# options.
1571#networks_path = $state_path/networks
1572
1573# DEPRECATED:
1574# This is the name of the network interface for public IP addresses.
1575# The default
1576# is 'eth0'.
1577#
1578# Possible values:
1579#
1580# * Any string representing a network interface name
1581# (string value)
1582# This option is deprecated for removal since 16.0.0.
1583# Its value may be silently ignored in the future.
1584# Reason:
1585# nova-network is deprecated, as are any related configuration
1586# options.
1587#public_interface = eth0
1588
1589# DEPRECATED:
1590# The location of the binary nova-dhcpbridge. By default it is the
1591# binary named
1592# 'nova-dhcpbridge' that is installed with all the other nova
1593# binaries.
1594#
1595# Possible values:
1596#
1597# * Any string representing the full path to the binary for dhcpbridge
1598# (string value)
1599# This option is deprecated for removal since 16.0.0.
1600# Its value may be silently ignored in the future.
1601# Reason:
1602# nova-network is deprecated, as are any related configuration
1603# options.
1604#dhcpbridge = $bindir/nova-dhcpbridge
1605
1606# DEPRECATED:
1607# The public IP address of the network host.
1608#
1609# This is used when creating an SNAT rule.
1610#
1611# Possible values:
1612#
1613# * Any valid IP address
1614#
1615# Related options:
1616#
1617# * ``force_snat_range``
1618# (string value)
1619# This option is deprecated for removal since 16.0.0.
1620# Its value may be silently ignored in the future.
1621# Reason:
1622# nova-network is deprecated, as are any related configuration
1623# options.
1624#routing_source_ip = $my_ip
1625
1626# DEPRECATED:
1627# The lifetime of a DHCP lease, in seconds. The default is 86400 (one
1628# day).
1629#
1630# Possible values:
1631#
1632# * Any positive integer value.
1633# (integer value)
1634# Minimum value: 1
1635# This option is deprecated for removal since 16.0.0.
1636# Its value may be silently ignored in the future.
1637# Reason:
1638# nova-network is deprecated, as are any related configuration
1639# options.
1640#dhcp_lease_time = 86400
1641
1642# DEPRECATED:
1643# Despite the singular form of the name of this option, it is actually
1644# a list of
1645# zero or more server addresses that dnsmasq will use for DNS
1646# nameservers. If
1647# this is not empty, dnsmasq will not read /etc/resolv.conf, but will
1648# only use
1649# the servers specified in this option. If the option
1650# use_network_dns_servers is
1651# True, the dns1 and dns2 servers from the network will be appended to
1652# this list,
1653# and will be used as DNS servers, too.
1654#
1655# Possible values:
1656#
1657# * A list of strings, where each string is either an IP address or a
1658# FQDN.
1659#
1660# Related options:
1661#
1662# * ``use_network_dns_servers``
1663# (multi valued)
1664# This option is deprecated for removal since 16.0.0.
1665# Its value may be silently ignored in the future.
1666# Reason:
1667# nova-network is deprecated, as are any related configuration
1668# options.
1669#dns_server =
1670
1671# DEPRECATED:
1672# When this option is set to True, the dns1 and dns2 servers for the
1673# network
1674# specified by the user on boot will be used for DNS, as well as any
1675# specified in
1676# the `dns_server` option.
1677#
1678# Related options:
1679#
1680# * ``dns_server``
1681# (boolean value)
1682# This option is deprecated for removal since 16.0.0.
1683# Its value may be silently ignored in the future.
1684# Reason:
1685# nova-network is deprecated, as are any related configuration
1686# options.
1687#use_network_dns_servers = false
1688
1689# DEPRECATED:
1690# This option is a list of zero or more IP address ranges in your
1691# network's DMZ
1692# that should be accepted.
1693#
1694# Possible values:
1695#
1696# * A list of strings, each of which should be a valid CIDR.
1697# (list value)
1698# This option is deprecated for removal since 16.0.0.
1699# Its value may be silently ignored in the future.
1700# Reason:
1701# nova-network is deprecated, as are any related configuration
1702# options.
1703#dmz_cidr =
1704
1705# DEPRECATED:
1706# This is a list of zero or more IP ranges that traffic from the
1707# `routing_source_ip` will be SNATted to. If the list is empty, then
1708# no SNAT
1709# rules are created.
1710#
1711# Possible values:
1712#
1713# * A list of strings, each of which should be a valid CIDR.
1714#
1715# Related options:
1716#
1717# * ``routing_source_ip``
1718# (multi valued)
1719# This option is deprecated for removal since 16.0.0.
1720# Its value may be silently ignored in the future.
1721# Reason:
1722# nova-network is deprecated, as are any related configuration
1723# options.
1724#force_snat_range =
1725
1726# DEPRECATED:
1727# The path to the custom dnsmasq configuration file, if any.
1728#
1729# Possible values:
1730#
1731# * The full path to the configuration file, or an empty string if
1732# there is no
1733# custom dnsmasq configuration file.
1734# (string value)
1735# This option is deprecated for removal since 16.0.0.
1736# Its value may be silently ignored in the future.
1737# Reason:
1738# nova-network is deprecated, as are any related configuration
1739# options.
1740#dnsmasq_config_file =
1741
1742# DEPRECATED:
1743# This is the class used as the ethernet device driver for linuxnet
1744# bridge
1745# operations. The default value should be all you need for most cases,
1746# but if you
1747# wish to use a customized class, set this option to the full dot-
1748# separated
1749# import path for that class.
1750#
1751# Possible values:
1752#
1753# * Any string representing a dot-separated class path that Nova can
1754# import.
1755# (string value)
1756# This option is deprecated for removal since 16.0.0.
1757# Its value may be silently ignored in the future.
1758# Reason:
1759# nova-network is deprecated, as are any related configuration
1760# options.
1761#linuxnet_interface_driver = nova.network.linux_net.LinuxBridgeInterfaceDriver
1762
1763# DEPRECATED:
1764# The name of the Open vSwitch bridge that is used with linuxnet when
1765# connecting
1766# with Open vSwitch."
1767#
1768# Possible values:
1769#
1770# * Any string representing a valid bridge name.
1771# (string value)
1772# This option is deprecated for removal since 16.0.0.
1773# Its value may be silently ignored in the future.
1774# Reason:
1775# nova-network is deprecated, as are any related configuration
1776# options.
1777#linuxnet_ovs_integration_bridge = br-int
1778
1779#
1780# When True, when a device starts up, and upon binding floating IP
1781# addresses, arp
1782# messages will be sent to ensure that the arp caches on the compute
1783# hosts are
1784# up-to-date.
1785#
1786# Related options:
1787#
1788# * ``send_arp_for_ha_count``
1789# (boolean value)
1790#send_arp_for_ha = false
1791
1792#
1793# When arp messages are configured to be sent, they will be sent with
1794# the count
1795# set to the value of this option. Of course, if this is set to zero,
1796# no arp
1797# messages will be sent.
1798#
1799# Possible values:
1800#
1801# * Any integer greater than or equal to 0
1802#
1803# Related options:
1804#
1805# * ``send_arp_for_ha``
1806# (integer value)
1807#send_arp_for_ha_count = 3
1808
1809# DEPRECATED:
1810# When set to True, only the firt nic of a VM will get its default
1811# gateway from
1812# the DHCP server.
1813# (boolean value)
1814# This option is deprecated for removal since 16.0.0.
1815# Its value may be silently ignored in the future.
1816# Reason:
1817# nova-network is deprecated, as are any related configuration
1818# options.
1819#use_single_default_gateway = false
1820
1821# DEPRECATED:
1822# One or more interfaces that bridges can forward traffic to. If any
1823# of the items
1824# in this list is the special keyword 'all', then all traffic will be
1825# forwarded.
1826#
1827# Possible values:
1828#
1829# * A list of zero or more interface names, or the word 'all'.
1830# (multi valued)
1831# This option is deprecated for removal since 16.0.0.
1832# Its value may be silently ignored in the future.
1833# Reason:
1834# nova-network is deprecated, as are any related configuration
1835# options.
1836#forward_bridge_interface = all
1837
1838#
1839# This option determines the IP address for the network metadata API
1840# server.
1841#
1842# This is really the client side of the metadata host equation that
1843# allows
1844# nova-network to find the metadata server when doing a default multi
1845# host
1846# networking.
1847#
1848# Possible values:
1849#
1850# * Any valid IP address. The default is the address of the Nova API
1851# server.
1852#
1853# Related options:
1854#
1855# * ``metadata_port``
1856# (string value)
1857#metadata_host = $my_ip
1858
1859# DEPRECATED:
1860# This option determines the port used for the metadata API server.
1861#
1862# Related options:
1863#
1864# * ``metadata_host``
1865# (port value)
1866# Minimum value: 0
1867# Maximum value: 65535
1868# This option is deprecated for removal since 16.0.0.
1869# Its value may be silently ignored in the future.
1870# Reason:
1871# nova-network is deprecated, as are any related configuration
1872# options.
1873#metadata_port = 8775
1874
1875# DEPRECATED:
1876# This expression, if defined, will select any matching iptables rules
1877# and place
1878# them at the top when applying metadata changes to the rules.
1879#
1880# Possible values:
1881#
1882# * Any string representing a valid regular expression, or an empty
1883# string
1884#
1885# Related options:
1886#
1887# * ``iptables_bottom_regex``
1888# (string value)
1889# This option is deprecated for removal since 16.0.0.
1890# Its value may be silently ignored in the future.
1891# Reason:
1892# nova-network is deprecated, as are any related configuration
1893# options.
1894#iptables_top_regex =
1895
1896# DEPRECATED:
1897# This expression, if defined, will select any matching iptables rules
1898# and place
1899# them at the bottom when applying metadata changes to the rules.
1900#
1901# Possible values:
1902#
1903# * Any string representing a valid regular expression, or an empty
1904# string
1905#
1906# Related options:
1907#
1908# * iptables_top_regex
1909# (string value)
1910# This option is deprecated for removal since 16.0.0.
1911# Its value may be silently ignored in the future.
1912# Reason:
1913# nova-network is deprecated, as are any related configuration
1914# options.
1915#iptables_bottom_regex =
1916
1917# DEPRECATED:
1918# By default, packets that do not pass the firewall are DROPped. In
1919# many cases,
1920# though, an operator may find it more useful to change this from DROP
1921# to REJECT,
1922# so that the user issuing those packets may have a better idea as to
1923# what's
1924# going on, or LOGDROP in order to record the blocked traffic before
1925# DROPping.
1926#
1927# Possible values:
1928#
1929# * A string representing an iptables chain. The default is DROP.
1930# (string value)
1931# This option is deprecated for removal since 16.0.0.
1932# Its value may be silently ignored in the future.
1933# Reason:
1934# nova-network is deprecated, as are any related configuration
1935# options.
1936#iptables_drop_action = DROP
1937
1938# DEPRECATED:
1939# This option represents the period of time, in seconds, that the
1940# ovs_vsctl calls
1941# will wait for a response from the database before timing out. A
1942# setting of 0
1943# means that the utility should wait forever for a response.
1944#
1945# Possible values:
1946#
1947# * Any positive integer if a limited timeout is desired, or zero if
1948# the calls
1949# should wait forever for a response.
1950# (integer value)
1951# Minimum value: 0
1952# This option is deprecated for removal since 16.0.0.
1953# Its value may be silently ignored in the future.
1954# Reason:
1955# nova-network is deprecated, as are any related configuration
1956# options.
1957#ovs_vsctl_timeout = 120
1958
1959# DEPRECATED:
1960# This option is used mainly in testing to avoid calls to the
1961# underlying network
1962# utilities.
1963# (boolean value)
1964# This option is deprecated for removal since 16.0.0.
1965# Its value may be silently ignored in the future.
1966# Reason:
1967# nova-network is deprecated, as are any related configuration
1968# options.
1969#fake_network = false
1970
1971# DEPRECATED:
1972# This option determines the number of times to retry ebtables
1973# commands before
1974# giving up. The minimum number of retries is 1.
1975#
1976# Possible values:
1977#
1978# * Any positive integer
1979#
1980# Related options:
1981#
1982# * ``ebtables_retry_interval``
1983# (integer value)
1984# Minimum value: 1
1985# This option is deprecated for removal since 16.0.0.
1986# Its value may be silently ignored in the future.
1987# Reason:
1988# nova-network is deprecated, as are any related configuration
1989# options.
1990#ebtables_exec_attempts = 3
1991
1992# DEPRECATED:
1993# This option determines the time, in seconds, that the system will
1994# sleep in
1995# between ebtables retries. Note that each successive retry waits a
1996# multiple of
1997# this value, so for example, if this is set to the default of 1.0
1998# seconds, and
1999# ebtables_exec_attempts is 4, after the first failure, the system
2000# will sleep for
2001# 1 * 1.0 seconds, after the second failure it will sleep 2 * 1.0
2002# seconds, and
2003# after the third failure it will sleep 3 * 1.0 seconds.
2004#
2005# Possible values:
2006#
2007# * Any non-negative float or integer. Setting this to zero will
2008# result in no
2009# waiting between attempts.
2010#
2011# Related options:
2012#
2013# * ebtables_exec_attempts
2014# (floating point value)
2015# This option is deprecated for removal since 16.0.0.
2016# Its value may be silently ignored in the future.
2017# Reason:
2018# nova-network is deprecated, as are any related configuration
2019# options.
2020#ebtables_retry_interval = 1.0
2021
2022# DEPRECATED:
2023# Enable neutron as the backend for networking.
2024#
2025# Determine whether to use Neutron or Nova Network as the back end.
2026# Set to true
2027# to use neutron.
2028# (boolean value)
2029# This option is deprecated for removal since 15.0.0.
2030# Its value may be silently ignored in the future.
2031# Reason:
2032# nova-network is deprecated, as are any related configuration
2033# options.
2034#use_neutron = true
2035
2036#
2037# This option determines whether the network setup information is
2038# injected into
2039# the VM before it is booted. While it was originally designed to be
2040# used only
2041# by nova-network, it is also used by the vmware and xenapi virt
2042# drivers to
2043# control whether network information is injected into a VM. The
2044# libvirt virt
2045# driver also uses it when we use config_drive to configure network to
2046# control
2047# whether network information is injected into a VM.
2048# (boolean value)
2049#flat_injected = false
2050
2051# DEPRECATED:
2052# This option determines the bridge used for simple network interfaces
2053# when no
2054# bridge is specified in the VM creation request.
2055#
2056# Please note that this option is only used when using nova-network
2057# instead of
2058# Neutron in your deployment.
2059#
2060# Possible values:
2061#
2062# * Any string representing a valid network bridge, such as 'br100'
2063#
2064# Related options:
2065#
2066# * ``use_neutron``
2067# (string value)
2068# This option is deprecated for removal since 15.0.0.
2069# Its value may be silently ignored in the future.
2070# Reason:
2071# nova-network is deprecated, as are any related configuration
2072# options.
2073#flat_network_bridge = <None>
2074
2075# DEPRECATED:
2076# This is the address of the DNS server for a simple network. If this
2077# option is
2078# not specified, the default of '8.8.4.4' is used.
2079#
2080# Please note that this option is only used when using nova-network
2081# instead of
2082# Neutron in your deployment.
2083#
2084# Possible values:
2085#
2086# * Any valid IP address.
2087#
2088# Related options:
2089#
2090# * ``use_neutron``
2091# (string value)
2092# This option is deprecated for removal since 15.0.0.
2093# Its value may be silently ignored in the future.
2094# Reason:
2095# nova-network is deprecated, as are any related configuration
2096# options.
2097#flat_network_dns = 8.8.4.4
2098
2099# DEPRECATED:
2100# This option is the name of the virtual interface of the VM on which
2101# the bridge
2102# will be built. While it was originally designed to be used only by
2103# nova-network, it is also used by libvirt for the bridge interface
2104# name.
2105#
2106# Possible values:
2107#
2108# * Any valid virtual interface name, such as 'eth0'
2109# (string value)
2110# This option is deprecated for removal since 15.0.0.
2111# Its value may be silently ignored in the future.
2112# Reason:
2113# nova-network is deprecated, as are any related configuration
2114# options.
2115#flat_interface = <None>
2116
2117# DEPRECATED:
2118# This is the VLAN number used for private networks. Note that the
2119# when creating
2120# the networks, if the specified number has already been assigned,
2121# nova-network
2122# will increment this number until it finds an available VLAN.
2123#
2124# Please note that this option is only used when using nova-network
2125# instead of
2126# Neutron in your deployment. It also will be ignored if the
2127# configuration option
2128# for `network_manager` is not set to the default of
2129# 'nova.network.manager.VlanManager'.
2130#
2131# Possible values:
2132#
2133# * Any integer between 1 and 4094. Values outside of that range will
2134# raise a
2135# ValueError exception.
2136#
2137# Related options:
2138#
2139# * ``network_manager``
2140# * ``use_neutron``
2141# (integer value)
2142# Minimum value: 1
2143# Maximum value: 4094
2144# This option is deprecated for removal since 15.0.0.
2145# Its value may be silently ignored in the future.
2146# Reason:
2147# nova-network is deprecated, as are any related configuration
2148# options.
2149#vlan_start = 100
2150
2151# DEPRECATED:
2152# This option is the name of the virtual interface of the VM on which
2153# the VLAN
2154# bridge will be built. While it was originally designed to be used
2155# only by
2156# nova-network, it is also used by libvirt and xenapi for the bridge
2157# interface
2158# name.
2159#
2160# Please note that this setting will be ignored in nova-network if the
2161# configuration option for `network_manager` is not set to the default
2162# of
2163# 'nova.network.manager.VlanManager'.
2164#
2165# Possible values:
2166#
2167# * Any valid virtual interface name, such as 'eth0'
2168# (string value)
2169# This option is deprecated for removal since 15.0.0.
2170# Its value may be silently ignored in the future.
2171# Reason:
2172# nova-network is deprecated, as are any related configuration
2173# options. While
2174# this option has an effect when using neutron, it incorrectly
2175# override the value
2176# provided by neutron and should therefore not be used.
2177#vlan_interface = <None>
2178
2179# DEPRECATED:
2180# This option represents the number of networks to create if not
2181# explicitly
2182# specified when the network is created. The only time this is used is
2183# if a CIDR
2184# is specified, but an explicit network_size is not. In that case, the
2185# subnets
2186# are created by diving the IP address space of the CIDR by
2187# num_networks. The
2188# resulting subnet sizes cannot be larger than the configuration
2189# option
2190# `network_size`; in that event, they are reduced to `network_size`,
2191# and a
2192# warning is logged.
2193#
2194# Please note that this option is only used when using nova-network
2195# instead of
2196# Neutron in your deployment.
2197#
2198# Possible values:
2199#
2200# * Any positive integer is technically valid, although there are
2201# practical
2202# limits based upon available IP address space and virtual
2203# interfaces.
2204#
2205# Related options:
2206#
2207# * ``use_neutron``
2208# * ``network_size``
2209# (integer value)
2210# Minimum value: 1
2211# This option is deprecated for removal since 15.0.0.
2212# Its value may be silently ignored in the future.
2213# Reason:
2214# nova-network is deprecated, as are any related configuration
2215# options.
2216#num_networks = 1
2217
2218# DEPRECATED:
2219# This option is no longer used since the /os-cloudpipe API was
2220# removed in the
2221# 16.0.0 Pike release. This is the public IP address for the cloudpipe
2222# VPN
2223# servers. It defaults to the IP address of the host.
2224#
2225# Please note that this option is only used when using nova-network
2226# instead of
2227# Neutron in your deployment. It also will be ignored if the
2228# configuration option
2229# for `network_manager` is not set to the default of
2230# 'nova.network.manager.VlanManager'.
2231#
2232# Possible values:
2233#
2234# * Any valid IP address. The default is ``$my_ip``, the IP address of
2235# the VM.
2236#
2237# Related options:
2238#
2239# * ``network_manager``
2240# * ``use_neutron``
2241# * ``vpn_start``
2242# (string value)
2243# This option is deprecated for removal since 15.0.0.
2244# Its value may be silently ignored in the future.
2245# Reason:
2246# nova-network is deprecated, as are any related configuration
2247# options.
2248#vpn_ip = $my_ip
2249
2250# DEPRECATED:
2251# This is the port number to use as the first VPN port for private
2252# networks.
2253#
2254# Please note that this option is only used when using nova-network
2255# instead of
2256# Neutron in your deployment. It also will be ignored if the
2257# configuration option
2258# for `network_manager` is not set to the default of
2259# 'nova.network.manager.VlanManager', or if you specify a value the
2260# 'vpn_start'
2261# parameter when creating a network.
2262#
2263# Possible values:
2264#
2265# * Any integer representing a valid port number. The default is 1000.
2266#
2267# Related options:
2268#
2269# * ``use_neutron``
2270# * ``vpn_ip``
2271# * ``network_manager``
2272# (port value)
2273# Minimum value: 0
2274# Maximum value: 65535
2275# This option is deprecated for removal since 15.0.0.
2276# Its value may be silently ignored in the future.
2277# Reason:
2278# nova-network is deprecated, as are any related configuration
2279# options.
2280#vpn_start = 1000
2281
2282# DEPRECATED:
2283# This option determines the number of addresses in each private
2284# subnet.
2285#
2286# Please note that this option is only used when using nova-network
2287# instead of
2288# Neutron in your deployment.
2289#
2290# Possible values:
2291#
2292# * Any positive integer that is less than or equal to the available
2293# network
2294# size. Note that if you are creating multiple networks, they must
2295# all fit in
2296# the available IP address space. The default is 256.
2297#
2298# Related options:
2299#
2300# * ``use_neutron``
2301# * ``num_networks``
2302# (integer value)
2303# Minimum value: 1
2304# This option is deprecated for removal since 15.0.0.
2305# Its value may be silently ignored in the future.
2306# Reason:
2307# nova-network is deprecated, as are any related configuration
2308# options.
2309#network_size = 256
2310
2311# DEPRECATED:
2312# This option determines the fixed IPv6 address block when creating a
2313# network.
2314#
2315# Please note that this option is only used when using nova-network
2316# instead of
2317# Neutron in your deployment.
2318#
2319# Possible values:
2320#
2321# * Any valid IPv6 CIDR
2322#
2323# Related options:
2324#
2325# * ``use_neutron``
2326# (string value)
2327# This option is deprecated for removal since 15.0.0.
2328# Its value may be silently ignored in the future.
2329# Reason:
2330# nova-network is deprecated, as are any related configuration
2331# options.
2332#fixed_range_v6 = fd00::/48
2333
2334# DEPRECATED:
2335# This is the default IPv4 gateway. It is used only in the testing
2336# suite.
2337#
2338# Please note that this option is only used when using nova-network
2339# instead of
2340# Neutron in your deployment.
2341#
2342# Possible values:
2343#
2344# * Any valid IP address.
2345#
2346# Related options:
2347#
2348# * ``use_neutron``
2349# * ``gateway_v6``
2350# (string value)
2351# This option is deprecated for removal since 15.0.0.
2352# Its value may be silently ignored in the future.
2353# Reason:
2354# nova-network is deprecated, as are any related configuration
2355# options.
2356#gateway = <None>
2357
2358# DEPRECATED:
2359# This is the default IPv6 gateway. It is used only in the testing
2360# suite.
2361#
2362# Please note that this option is only used when using nova-network
2363# instead of
2364# Neutron in your deployment.
2365#
2366# Possible values:
2367#
2368# * Any valid IP address.
2369#
2370# Related options:
2371#
2372# * ``use_neutron``
2373# * ``gateway``
2374# (string value)
2375# This option is deprecated for removal since 15.0.0.
2376# Its value may be silently ignored in the future.
2377# Reason:
2378# nova-network is deprecated, as are any related configuration
2379# options.
2380#gateway_v6 = <None>
2381
2382# DEPRECATED:
2383# This option represents the number of IP addresses to reserve at the
2384# top of the
2385# address range for VPN clients. It also will be ignored if the
2386# configuration
2387# option for `network_manager` is not set to the default of
2388# 'nova.network.manager.VlanManager'.
2389#
2390# Possible values:
2391#
2392# * Any integer, 0 or greater.
2393#
2394# Related options:
2395#
2396# * ``use_neutron``
2397# * ``network_manager``
2398# (integer value)
2399# Minimum value: 0
2400# This option is deprecated for removal since 15.0.0.
2401# Its value may be silently ignored in the future.
2402# Reason:
2403# nova-network is deprecated, as are any related configuration
2404# options.
2405#cnt_vpn_clients = 0
2406
2407# DEPRECATED:
2408# This is the number of seconds to wait before disassociating a
2409# deallocated fixed
2410# IP address. This is only used with the nova-network service, and has
2411# no effect
2412# when using neutron for networking.
2413#
2414# Possible values:
2415#
2416# * Any integer, zero or greater.
2417#
2418# Related options:
2419#
2420# * ``use_neutron``
2421# (integer value)
2422# Minimum value: 0
2423# This option is deprecated for removal since 15.0.0.
2424# Its value may be silently ignored in the future.
2425# Reason:
2426# nova-network is deprecated, as are any related configuration
2427# options.
2428#fixed_ip_disassociate_timeout = 600
2429
2430# DEPRECATED:
2431# This option determines how many times nova-network will attempt to
2432# create a
2433# unique MAC address before giving up and raising a
2434# `VirtualInterfaceMacAddressException` error.
2435#
2436# Possible values:
2437#
2438# * Any positive integer. The default is 5.
2439#
2440# Related options:
2441#
2442# * ``use_neutron``
2443# (integer value)
2444# Minimum value: 1
2445# This option is deprecated for removal since 15.0.0.
2446# Its value may be silently ignored in the future.
2447# Reason:
2448# nova-network is deprecated, as are any related configuration
2449# options.
2450#create_unique_mac_address_attempts = 5
2451
2452# DEPRECATED:
2453# Determines whether unused gateway devices, both VLAN and bridge, are
2454# deleted if
2455# the network is in nova-network VLAN mode and is multi-hosted.
2456#
2457# Related options:
2458#
2459# * ``use_neutron``
2460# * ``vpn_ip``
2461# * ``fake_network``
2462# (boolean value)
2463# This option is deprecated for removal since 15.0.0.
2464# Its value may be silently ignored in the future.
2465# Reason:
2466# nova-network is deprecated, as are any related configuration
2467# options.
2468#teardown_unused_network_gateway = false
2469
2470# DEPRECATED:
2471# When this option is True, a call is made to release the DHCP for the
2472# instance
2473# when that instance is terminated.
2474#
2475# Related options:
2476#
2477# * ``use_neutron``
2478# (boolean value)
2479# This option is deprecated for removal since 15.0.0.
2480# Its value may be silently ignored in the future.
2481# Reason:
2482# nova-network is deprecated, as are any related configuration
2483# options.
2484force_dhcp_release = {{ compute.get('force_dhcp_release', 'true') }}
2485
2486# DEPRECATED:
2487# When this option is True, whenever a DNS entry must be updated, a
2488# fanout cast
2489# message is sent to all network hosts to update their DNS entries in
2490# multi-host
2491# mode.
2492#
2493# Related options:
2494#
2495# * ``use_neutron``
2496# (boolean value)
2497# This option is deprecated for removal since 15.0.0.
2498# Its value may be silently ignored in the future.
2499# Reason:
2500# nova-network is deprecated, as are any related configuration
2501# options.
2502#update_dns_entries = false
2503
2504# DEPRECATED:
2505# This option determines the time, in seconds, to wait between
2506# refreshing DNS
2507# entries for the network.
2508#
2509# Possible values:
2510#
2511# * A positive integer
2512# * -1 to disable updates
2513#
2514# Related options:
2515#
2516# * ``use_neutron``
2517# (integer value)
2518# Minimum value: -1
2519# This option is deprecated for removal since 15.0.0.
2520# Its value may be silently ignored in the future.
2521# Reason:
2522# nova-network is deprecated, as are any related configuration
2523# options.
2524#dns_update_periodic_interval = -1
2525
2526# DEPRECATED:
2527# This option allows you to specify the domain for the DHCP server.
2528#
2529# Possible values:
2530#
2531# * Any string that is a valid domain name.
2532#
2533# Related options:
2534#
2535# * ``use_neutron``
2536# (string value)
2537# This option is deprecated for removal since 15.0.0.
2538# Its value may be silently ignored in the future.
2539# Reason:
2540# nova-network is deprecated, as are any related configuration
2541# options.
2542#dhcp_domain = novalocal
2543dhcp_domain={{ compute.get('dhcp_domain', 'novalocal') }}
2544
2545# DEPRECATED:
2546# This option allows you to specify the L3 management library to be
2547# used.
2548#
2549# Possible values:
2550#
2551# * Any dot-separated string that represents the import path to an L3
2552# networking
2553# library.
2554#
2555# Related options:
2556#
2557# * ``use_neutron``
2558# (string value)
2559# This option is deprecated for removal since 15.0.0.
2560# Its value may be silently ignored in the future.
2561# Reason:
2562# nova-network is deprecated, as are any related configuration
2563# options.
2564#l3_lib = nova.network.l3.LinuxNetL3
2565
2566# DEPRECATED:
2567# THIS VALUE SHOULD BE SET WHEN CREATING THE NETWORK.
2568#
2569# If True in multi_host mode, all compute hosts share the same dhcp
2570# address. The
2571# same IP address used for DHCP will be added on each nova-network
2572# node which is
2573# only visible to the VMs on the same host.
2574#
2575# The use of this configuration has been deprecated and may be removed
2576# in any
2577# release after Mitaka. It is recommended that instead of relying on
2578# this option,
2579# an explicit value should be passed to 'create_networks()' as a
2580# keyword argument
2581# with the name 'share_address'.
2582# (boolean value)
2583# This option is deprecated for removal since 2014.2.
2584# Its value may be silently ignored in the future.
2585#share_dhcp_address = false
2586
2587# DEPRECATED:
2588# URL for LDAP server which will store DNS entries
2589#
2590# Possible values:
2591#
2592# * A valid LDAP URL representing the server
2593# (uri value)
2594# This option is deprecated for removal since 16.0.0.
2595# Its value may be silently ignored in the future.
2596# Reason:
2597# nova-network is deprecated, as are any related configuration
2598# options.
2599#ldap_dns_url = ldap://ldap.example.com:389
2600
2601# DEPRECATED: Bind user for LDAP server (string value)
2602# This option is deprecated for removal since 16.0.0.
2603# Its value may be silently ignored in the future.
2604# Reason:
2605# nova-network is deprecated, as are any related configuration
2606# options.
2607#ldap_dns_user = uid=admin,ou=people,dc=example,dc=org
2608
2609# DEPRECATED: Bind user's password for LDAP server (string value)
2610# This option is deprecated for removal since 16.0.0.
2611# Its value may be silently ignored in the future.
2612# Reason:
2613# nova-network is deprecated, as are any related configuration
2614# options.
2615#ldap_dns_password = password
2616
2617# DEPRECATED:
2618# Hostmaster for LDAP DNS driver Statement of Authority
2619#
2620# Possible values:
2621#
2622# * Any valid string representing LDAP DNS hostmaster.
2623# (string value)
2624# This option is deprecated for removal since 16.0.0.
2625# Its value may be silently ignored in the future.
2626# Reason:
2627# nova-network is deprecated, as are any related configuration
2628# options.
2629#ldap_dns_soa_hostmaster = hostmaster@example.org
2630
2631# DEPRECATED:
2632# DNS Servers for LDAP DNS driver
2633#
2634# Possible values:
2635#
2636# * A valid URL representing a DNS server
2637# (multi valued)
2638# This option is deprecated for removal since 16.0.0.
2639# Its value may be silently ignored in the future.
2640# Reason:
2641# nova-network is deprecated, as are any related configuration
2642# options.
2643#ldap_dns_servers = dns.example.org
2644
2645# DEPRECATED:
2646# Base distinguished name for the LDAP search query
2647#
2648# This option helps to decide where to look up the host in LDAP.
2649# (string value)
2650# This option is deprecated for removal since 16.0.0.
2651# Its value may be silently ignored in the future.
2652# Reason:
2653# nova-network is deprecated, as are any related configuration
2654# options.
2655#ldap_dns_base_dn = ou=hosts,dc=example,dc=org
2656
2657# DEPRECATED:
2658# Refresh interval (in seconds) for LDAP DNS driver Start of Authority
2659#
2660# Time interval, a secondary/slave DNS server waits before requesting
2661# for
2662# primary DNS server's current SOA record. If the records are
2663# different,
2664# secondary DNS server will request a zone transfer from primary.
2665#
2666# NOTE: Lower values would cause more traffic.
2667# (integer value)
2668# This option is deprecated for removal since 16.0.0.
2669# Its value may be silently ignored in the future.
2670# Reason:
2671# nova-network is deprecated, as are any related configuration
2672# options.
2673#ldap_dns_soa_refresh = 1800
2674
2675# DEPRECATED:
2676# Retry interval (in seconds) for LDAP DNS driver Start of Authority
2677#
2678# Time interval, a secondary/slave DNS server should wait, if an
2679# attempt to transfer zone failed during the previous refresh
2680# interval.
2681# (integer value)
2682# This option is deprecated for removal since 16.0.0.
2683# Its value may be silently ignored in the future.
2684# Reason:
2685# nova-network is deprecated, as are any related configuration
2686# options.
2687#ldap_dns_soa_retry = 3600
2688
2689# DEPRECATED:
2690# Expiry interval (in seconds) for LDAP DNS driver Start of Authority
2691#
2692# Time interval, a secondary/slave DNS server holds the information
2693# before it is no longer considered authoritative.
2694# (integer value)
2695# This option is deprecated for removal since 16.0.0.
2696# Its value may be silently ignored in the future.
2697# Reason:
2698# nova-network is deprecated, as are any related configuration
2699# options.
2700#ldap_dns_soa_expiry = 86400
2701
2702# DEPRECATED:
2703# Minimum interval (in seconds) for LDAP DNS driver Start of Authority
2704#
2705# It is Minimum time-to-live applies for all resource records in the
2706# zone file. This value is supplied to other servers how long they
2707# should keep the data in cache.
2708# (integer value)
2709# This option is deprecated for removal since 16.0.0.
2710# Its value may be silently ignored in the future.
2711# Reason:
2712# nova-network is deprecated, as are any related configuration
2713# options.
2714#ldap_dns_soa_minimum = 7200
2715
2716# DEPRECATED:
2717# Default value for multi_host in networks.
2718#
2719# nova-network service can operate in a multi-host or single-host
2720# mode.
2721# In multi-host mode each compute node runs a copy of nova-network and
2722# the
2723# instances on that compute node use the compute node as a gateway to
2724# the
2725# Internet. Where as in single-host mode, a central server runs the
2726# nova-network
2727# service. All compute nodes forward traffic from the instances to the
2728# cloud controller which then forwards traffic to the Internet.
2729#
2730# If this options is set to true, some rpc network calls will be sent
2731# directly
2732# to host.
2733#
2734# Note that this option is only used when using nova-network instead
2735# of
2736# Neutron in your deployment.
2737#
2738# Related options:
2739#
2740# * ``use_neutron``
2741# (boolean value)
2742# This option is deprecated for removal since 15.0.0.
2743# Its value may be silently ignored in the future.
2744# Reason:
2745# nova-network is deprecated, as are any related configuration
2746# options.
2747#multi_host = false
2748
2749# DEPRECATED:
2750# Driver to use for network creation.
2751#
2752# Network driver initializes (creates bridges and so on) only when the
2753# first VM lands on a host node. All network managers configure the
2754# network using network drivers. The driver is not tied to any
2755# particular
2756# network manager.
2757#
2758# The default Linux driver implements vlans, bridges, and iptables
2759# rules
2760# using linux utilities.
2761#
2762# Note that this option is only used when using nova-network instead
2763# of Neutron in your deployment.
2764#
2765# Related options:
2766#
2767# * ``use_neutron``
2768# (string value)
2769# This option is deprecated for removal since 15.0.0.
2770# Its value may be silently ignored in the future.
2771# Reason:
2772# nova-network is deprecated, as are any related configuration
2773# options.
2774#network_driver = nova.network.linux_net
2775
2776# DEPRECATED:
2777# Firewall driver to use with ``nova-network`` service.
2778#
2779# This option only applies when using the ``nova-network`` service.
2780# When using
2781# another networking services, such as Neutron, this should be to set
2782# to the
2783# ``nova.virt.firewall.NoopFirewallDriver``.
2784#
2785# Possible values:
2786#
2787# * ``nova.virt.firewall.IptablesFirewallDriver``
2788# * ``nova.virt.firewall.NoopFirewallDriver``
2789# * ``nova.virt.libvirt.firewall.IptablesFirewallDriver``
2790# * [...]
2791#
2792# Related options:
2793#
2794# * ``use_neutron``: This must be set to ``False`` to enable ``nova-
2795# network``
2796# networking
2797# (string value)
2798# This option is deprecated for removal since 16.0.0.
2799# Its value may be silently ignored in the future.
2800# Reason:
2801# nova-network is deprecated, as are any related configuration
2802# options.
2803firewall_driver = nova.virt.firewall.NoopFirewallDriver
2804
2805# DEPRECATED:
2806# Determine whether to allow network traffic from same network.
2807#
2808# When set to true, hosts on the same subnet are not filtered and are
2809# allowed
2810# to pass all types of traffic between them. On a flat network, this
2811# allows
2812# all instances from all projects unfiltered communication. With VLAN
2813# networking, this allows access between instances within the same
2814# project.
2815#
2816# This option only applies when using the ``nova-network`` service.
2817# When using
2818# another networking services, such as Neutron, security groups or
2819# other
2820# approaches should be used.
2821#
2822# Possible values:
2823#
2824# * True: Network traffic should be allowed pass between all instances
2825# on the
2826# same network, regardless of their tenant and security policies
2827# * False: Network traffic should not be allowed pass between
2828# instances unless
2829# it is unblocked in a security group
2830#
2831# Related options:
2832#
2833# * ``use_neutron``: This must be set to ``False`` to enable ``nova-
2834# network``
2835# networking
2836# * ``firewall_driver``: This must be set to
2837# ``nova.virt.libvirt.firewall.IptablesFirewallDriver`` to ensure
2838# the
2839# libvirt firewall driver is enabled.
2840# (boolean value)
2841# This option is deprecated for removal since 16.0.0.
2842# Its value may be silently ignored in the future.
2843# Reason:
2844# nova-network is deprecated, as are any related configuration
2845# options.
2846#allow_same_net_traffic = true
2847
2848# DEPRECATED:
2849# Default pool for floating IPs.
2850#
2851# This option specifies the default floating IP pool for allocating
2852# floating IPs.
2853#
2854# While allocating a floating ip, users can optionally pass in the
2855# name of the
2856# pool they want to allocate from, otherwise it will be pulled from
2857# the
2858# default pool.
2859#
2860# If this option is not set, then 'nova' is used as default floating
2861# pool.
2862#
2863# Possible values:
2864#
2865# * Any string representing a floating IP pool name
2866# (string value)
2867# This option is deprecated for removal since 16.0.0.
2868# Its value may be silently ignored in the future.
2869# Reason:
2870# This option was used for two purposes: to set the floating IP pool
2871# name for
2872# nova-network and to do the same for neutron. nova-network is
2873# deprecated, as are
2874# any related configuration options. Users of neutron, meanwhile,
2875# should use the
2876# 'default_floating_pool' option in the '[neutron]' group.
2877#default_floating_pool = nova
2878
2879# DEPRECATED:
2880# Autoassigning floating IP to VM
2881#
2882# When set to True, floating IP is auto allocated and associated
2883# to the VM upon creation.
2884#
2885# Related options:
2886#
2887# * use_neutron: this options only works with nova-network.
2888# (boolean value)
2889# This option is deprecated for removal since 15.0.0.
2890# Its value may be silently ignored in the future.
2891# Reason:
2892# nova-network is deprecated, as are any related configuration
2893# options.
2894#auto_assign_floating_ip = false
2895
2896# DEPRECATED:
2897# Full class name for the DNS Manager for floating IPs.
2898#
2899# This option specifies the class of the driver that provides
2900# functionality
2901# to manage DNS entries associated with floating IPs.
2902#
2903# When a user adds a DNS entry for a specified domain to a floating
2904# IP,
2905# nova will add a DNS entry using the specified floating DNS driver.
2906# When a floating IP is deallocated, its DNS entry will automatically
2907# be deleted.
2908#
2909# Possible values:
2910#
2911# * Full Python path to the class to be used
2912#
2913# Related options:
2914#
2915# * use_neutron: this options only works with nova-network.
2916# (string value)
2917# This option is deprecated for removal since 15.0.0.
2918# Its value may be silently ignored in the future.
2919# Reason:
2920# nova-network is deprecated, as are any related configuration
2921# options.
2922#floating_ip_dns_manager = nova.network.noop_dns_driver.NoopDNSDriver
2923
2924# DEPRECATED:
2925# Full class name for the DNS Manager for instance IPs.
2926#
2927# This option specifies the class of the driver that provides
2928# functionality
2929# to manage DNS entries for instances.
2930#
2931# On instance creation, nova will add DNS entries for the instance
2932# name and
2933# id, using the specified instance DNS driver and domain. On instance
2934# deletion,
2935# nova will remove the DNS entries.
2936#
2937# Possible values:
2938#
2939# * Full Python path to the class to be used
2940#
2941# Related options:
2942#
2943# * use_neutron: this options only works with nova-network.
2944# (string value)
2945# This option is deprecated for removal since 15.0.0.
2946# Its value may be silently ignored in the future.
2947# Reason:
2948# nova-network is deprecated, as are any related configuration
2949# options.
2950#instance_dns_manager = nova.network.noop_dns_driver.NoopDNSDriver
2951
2952# DEPRECATED:
2953# If specified, Nova checks if the availability_zone of every instance
2954# matches
2955# what the database says the availability_zone should be for the
2956# specified
2957# dns_domain.
2958#
2959# Related options:
2960#
2961# * use_neutron: this options only works with nova-network.
2962# (string value)
2963# This option is deprecated for removal since 15.0.0.
2964# Its value may be silently ignored in the future.
2965# Reason:
2966# nova-network is deprecated, as are any related configuration
2967# options.
2968#instance_dns_domain =
2969
2970# DEPRECATED:
2971# Assign IPv6 and IPv4 addresses when creating instances.
2972#
2973# Related options:
2974#
2975# * use_neutron: this only works with nova-network.
2976# (boolean value)
2977# This option is deprecated for removal since 16.0.0.
2978# Its value may be silently ignored in the future.
2979# Reason:
2980# nova-network is deprecated, as are any related configuration
2981# options.
2982#use_ipv6 = false
2983
2984# DEPRECATED:
2985# Abstracts out IPv6 address generation to pluggable backends.
2986#
2987# nova-network can be put into dual-stack mode, so that it uses
2988# both IPv4 and IPv6 addresses. In dual-stack mode, by default,
2989# instances
2990# acquire IPv6 global unicast addresses with the help of stateless
2991# address
2992# auto-configuration mechanism.
2993#
2994# Related options:
2995#
2996# * use_neutron: this option only works with nova-network.
2997# * use_ipv6: this option only works if ipv6 is enabled for nova-
2998# network.
2999# (string value)
3000# Possible values:
3001# rfc2462 - <No description provided>
3002# account_identifier - <No description provided>
3003# This option is deprecated for removal since 16.0.0.
3004# Its value may be silently ignored in the future.
3005# Reason:
3006# nova-network is deprecated, as are any related configuration
3007# options.
3008#ipv6_backend = rfc2462
3009
3010# DEPRECATED:
3011# This option is used to enable or disable quota checking for tenant
3012# networks.
3013#
3014# Related options:
3015#
3016# * quota_networks
3017# (boolean value)
3018# This option is deprecated for removal since 14.0.0.
3019# Its value may be silently ignored in the future.
3020# Reason:
3021# CRUD operations on tenant networks are only available when using
3022# nova-network
3023# and nova-network is itself deprecated.
3024#enable_network_quota = false
3025
3026# DEPRECATED:
3027# This option controls the number of private networks that can be
3028# created per
3029# project (or per tenant).
3030#
3031# Related options:
3032#
3033# * enable_network_quota
3034# (integer value)
3035# Minimum value: 0
3036# This option is deprecated for removal since 14.0.0.
3037# Its value may be silently ignored in the future.
3038# Reason:
3039# CRUD operations on tenant networks are only available when using
3040# nova-network
3041# and nova-network is itself deprecated.
3042#quota_networks = 3
3043
3044#
3045# Filename that will be used for storing websocket frames received
3046# and sent by a proxy service (like VNC, spice, serial) running on
3047# this host.
3048# If this is not set, no recording will be done.
3049# (string value)
3050#record = <None>
3051
3052# Run as a background process. (boolean value)
3053#daemon = false
3054
3055# Disallow non-encrypted connections. (boolean value)
3056#ssl_only = false
3057
3058# Set to True if source host is addressed with IPv6. (boolean value)
3059#source_is_ipv6 = false
3060
3061# Path to SSL certificate file. (string value)
3062#cert = self.pem
3063
3064# SSL key file (if separate from cert). (string value)
3065#key = <None>
3066
3067#
3068# Path to directory with content which will be served by a web server.
3069# (string value)
3070#web = /usr/share/spice-html5
3071
3072#
3073# The directory where the Nova python modules are installed.
3074#
3075# This directory is used to store template files for networking and
3076# remote
3077# console access. It is also the default path for other config options
3078# which
3079# need to persist Nova internal data. It is very unlikely that you
3080# need to
3081# change this option from its default value.
3082#
3083# Possible values:
3084#
3085# * The full path to a directory.
3086#
3087# Related options:
3088#
3089# * ``state_path``
3090# (string value)
3091#pybasedir = /usr/lib/python2.7/dist-packages
3092
3093#
3094# The directory where the Nova binaries are installed.
3095#
3096# This option is only relevant if the networking capabilities from
3097# Nova are
3098# used (see services below). Nova's networking capabilities are
3099# targeted to
3100# be fully replaced by Neutron in the future. It is very unlikely that
3101# you need
3102# to change this option from its default value.
3103#
3104# Possible values:
3105#
3106# * The full path to a directory.
3107# (string value)
3108#bindir = /usr/local/bin
3109
3110#
3111# The top-level directory for maintaining Nova's state.
3112#
3113# This directory is used to store Nova's internal state. It is used by
3114# a
3115# variety of other config options which derive from this. In some
3116# scenarios
3117# (for example migrations) it makes sense to use a storage location
3118# which is
3119# shared between multiple compute hosts (for example via NFS). Unless
3120# the
3121# option ``instances_path`` gets overwritten, this directory can grow
3122# very
3123# large.
3124#
3125# Possible values:
3126#
3127# * The full path to a directory. Defaults to value provided in
3128# ``pybasedir``.
3129# (string value)
3130state_path = /var/lib/nova
3131
3132#
3133# Number of seconds indicating how frequently the state of services on
3134# a
3135# given hypervisor is reported. Nova needs to know this to determine
3136# the
3137# overall health of the deployment.
3138#
3139# Related Options:
3140#
3141# * service_down_time
3142# report_interval should be less than service_down_time. If
3143# service_down_time
3144# is less than report_interval, services will routinely be
3145# considered down,
3146# because they report in too rarely.
3147# (integer value)
3148#report_interval = 10
3149report_interval = {{ compute.get('report_interval', '60') }}
3150
3151#
3152# Maximum time in seconds since last check-in for up service
3153#
3154# Each compute node periodically updates their database status based
3155# on the
3156# specified report interval. If the compute node hasn't updated the
3157# status
3158# for more than service_down_time, then the compute node is considered
3159# down.
3160#
3161# Related Options:
3162#
3163# * report_interval (service_down_time should not be less than
3164# report_interval)
3165# (integer value)
3166service_down_time = 90
3167
3168#
3169# Enable periodic tasks.
3170#
3171# If set to true, this option allows services to periodically run
3172# tasks
3173# on the manager.
3174#
3175# In case of running multiple schedulers or conductors you may want to
3176# run
3177# periodic tasks on only one host - in this case disable this option
3178# for all
3179# hosts but one.
3180# (boolean value)
3181#periodic_enable = true
3182
3183#
3184# Number of seconds to randomly delay when starting the periodic task
3185# scheduler to reduce stampeding.
3186#
3187# When compute workers are restarted in unison across a cluster,
3188# they all end up running the periodic tasks at the same time
3189# causing problems for the external services. To mitigate this
3190# behavior, periodic_fuzzy_delay option allows you to introduce a
3191# random initial delay when starting the periodic task scheduler.
3192#
3193# Possible Values:
3194#
3195# * Any positive integer (in seconds)
3196# * 0 : disable the random delay
3197# (integer value)
3198# Minimum value: 0
3199#periodic_fuzzy_delay = 60
3200
3201# List of APIs to be enabled by default. (list value)
3202enabled_apis = osapi_compute,metadata
3203
3204#
3205# List of APIs with enabled SSL.
3206#
3207# Nova provides SSL support for the API servers. enabled_ssl_apis
3208# option
3209# allows configuring the SSL support.
3210# (list value)
3211#enabled_ssl_apis =
3212
3213#
3214# IP address on which the OpenStack API will listen.
3215#
3216# The OpenStack API service listens on this IP address for incoming
3217# requests.
3218# (string value)
3219#osapi_compute_listen = 0.0.0.0
3220
3221#
3222# Port on which the OpenStack API will listen.
3223#
3224# The OpenStack API service listens on this port number for incoming
3225# requests.
3226# (port value)
3227# Minimum value: 0
3228# Maximum value: 65535
3229#osapi_compute_listen_port = 8774
3230
3231#
3232# Number of workers for OpenStack API service. The default will be the
3233# number
3234# of CPUs available.
3235#
3236# OpenStack API services can be configured to run as multi-process
3237# (workers).
3238# This overcomes the problem of reduction in throughput when API
3239# request
3240# concurrency increases. OpenStack API service will run in the
3241# specified
3242# number of processes.
3243#
3244# Possible Values:
3245#
3246# * Any positive integer
3247# * None (default value)
3248# (integer value)
3249# Minimum value: 1
3250#osapi_compute_workers = <None>
3251
3252#
3253# IP address on which the metadata API will listen.
3254#
3255# The metadata API service listens on this IP address for incoming
3256# requests.
3257# (string value)
3258#metadata_listen = 0.0.0.0
3259
3260#
3261# Port on which the metadata API will listen.
3262#
3263# The metadata API service listens on this port number for incoming
3264# requests.
3265# (port value)
3266# Minimum value: 0
3267# Maximum value: 65535
3268#metadata_listen_port = 8775
3269
3270#
3271# Number of workers for metadata service. If not specified the number
3272# of
3273# available CPUs will be used.
3274#
3275# The metadata service can be configured to run as multi-process
3276# (workers).
3277# This overcomes the problem of reduction in throughput when API
3278# request
3279# concurrency increases. The metadata service will run in the
3280# specified
3281# number of processes.
3282#
3283# Possible Values:
3284#
3285# * Any positive integer
3286# * None (default value)
3287# (integer value)
3288# Minimum value: 1
3289#metadata_workers = <None>
3290
3291# Full class name for the Manager for network (string value)
3292# Possible values:
3293# nova.network.manager.FlatManager - <No description provided>
3294# nova.network.manager.FlatDHCPManager - <No description provided>
3295# nova.network.manager.VlanManager - <No description provided>
3296#network_manager = nova.network.manager.VlanManager
3297
3298#
3299# This option specifies the driver to be used for the servicegroup
3300# service.
3301#
3302# ServiceGroup API in nova enables checking status of a compute node.
3303# When a
3304# compute worker running the nova-compute daemon starts, it calls the
3305# join API
3306# to join the compute group. Services like nova scheduler can query
3307# the
3308# ServiceGroup API to check if a node is alive. Internally, the
3309# ServiceGroup
3310# client driver automatically updates the compute worker status. There
3311# are
3312# multiple backend implementations for this service: Database
3313# ServiceGroup driver
3314# and Memcache ServiceGroup driver.
3315#
3316# Possible Values:
3317#
3318# * db : Database ServiceGroup driver
3319# * mc : Memcache ServiceGroup driver
3320#
3321# Related Options:
3322#
3323# * service_down_time (maximum time since last check-in for up
3324# service)
3325# (string value)
3326# Possible values:
3327# db - <No description provided>
3328# mc - <No description provided>
3329#servicegroup_driver = db
3330
3331#
3332# From oslo.service.periodic_task
3333#
3334
3335# Some periodic tasks can be run in a separate process. Should we run
3336# them here? (boolean value)
3337#run_external_periodic_tasks = true
3338
3339#
3340# From oslo.service.service
3341#
3342
3343# Enable eventlet backdoor. Acceptable values are 0, <port>, and
3344# <start>:<end>, where 0 results in listening on a random tcp port
3345# number; <port> results in listening on the specified port number
3346# (and not enabling backdoor if that port is in use); and
3347# <start>:<end> results in listening on the smallest unused port
3348# number within the specified range of port numbers. The chosen port
3349# is displayed in the service's log file. (string value)
3350#backdoor_port = <None>
3351
3352# Enable eventlet backdoor, using the provided path as a unix socket
3353# that can receive connections. This option is mutually exclusive with
3354# 'backdoor_port' in that only one should be provided. If both are
3355# provided then the existence of this option overrides the usage of
3356# that option. (string value)
3357#backdoor_socket = <None>
3358
3359# Enables or disables logging values of all registered options when
3360# starting a service (at DEBUG level). (boolean value)
3361#log_options = true
3362
3363# Specify a timeout after which a gracefully shutdown server will
3364# exit. Zero value means endless wait. (integer value)
3365#graceful_shutdown_timeout = 60
3366
3367{%- if compute.logging is defined %}
3368{%- set _data = compute.logging %}
3369{%- include "oslo_templates/files/queens/oslo/_log.conf" %}
3370{%- endif %}
3371
3372{%- set _data = compute.message_queue %}
Oleh Hryhorov52ea4c92018-06-04 15:06:59 +00003373{%- include "oslo_templates/files/queens/oslo/messaging/_default.conf" %}
Oleh Hryhorov5d0f13a2018-05-21 17:02:54 +00003374
3375[api]
3376#
3377# Options under this group are used to define Nova API.
3378
3379#
3380# From nova.conf
3381#
3382
3383#
3384# This determines the strategy to use for authentication: keystone or
3385# noauth2.
3386# 'noauth2' is designed for testing only, as it does no actual
3387# credential
3388# checking. 'noauth2' provides administrative credentials only if
3389# 'admin' is
3390# specified as the username.
3391# (string value)
3392# Possible values:
3393# keystone - <No description provided>
3394# noauth2 - <No description provided>
3395auth_strategy = keystone
3396
3397#
3398# When True, the 'X-Forwarded-For' header is treated as the canonical
3399# remote
3400# address. When False (the default), the 'remote_address' header is
3401# used.
3402#
3403# You should only enable this if you have an HTML sanitizing proxy.
3404# (boolean value)
3405#use_forwarded_for = false
3406
3407#
3408# When gathering the existing metadata for a config drive, the
3409# EC2-style
3410# metadata is returned for all versions that don't appear in this
3411# option.
3412# As of the Liberty release, the available versions are:
3413#
3414# * 1.0
3415# * 2007-01-19
3416# * 2007-03-01
3417# * 2007-08-29
3418# * 2007-10-10
3419# * 2007-12-15
3420# * 2008-02-01
3421# * 2008-09-01
3422# * 2009-04-04
3423#
3424# The option is in the format of a single string, with each version
3425# separated
3426# by a space.
3427#
3428# Possible values:
3429#
3430# * Any string that represents zero or more versions, separated by
3431# spaces.
3432# (string value)
3433#config_drive_skip_versions = 1.0 2007-01-19 2007-03-01 2007-08-29 2007-10-10 2007-12-15 2008-02-01 2008-09-01
3434
3435#
3436# A list of vendordata providers.
3437#
3438# vendordata providers are how deployers can provide metadata via
3439# configdrive
3440# and metadata that is specific to their deployment. There are
3441# currently two
3442# supported providers: StaticJSON and DynamicJSON.
3443#
3444# StaticJSON reads a JSON file configured by the flag
3445# vendordata_jsonfile_path
3446# and places the JSON from that file into vendor_data.json and
3447# vendor_data2.json.
3448#
3449# DynamicJSON is configured via the vendordata_dynamic_targets flag,
3450# which is
3451# documented separately. For each of the endpoints specified in that
3452# flag, a
3453# section is added to the vendor_data2.json.
3454#
3455# For more information on the requirements for implementing a
3456# vendordata
3457# dynamic endpoint, please see the vendordata.rst file in the nova
3458# developer
3459# reference.
3460#
3461# Possible values:
3462#
3463# * A list of vendordata providers, with StaticJSON and DynamicJSON
3464# being
3465# current options.
3466#
3467# Related options:
3468#
3469# * vendordata_dynamic_targets
3470# * vendordata_dynamic_ssl_certfile
3471# * vendordata_dynamic_connect_timeout
3472# * vendordata_dynamic_read_timeout
3473# * vendordata_dynamic_failure_fatal
3474# (list value)
3475#vendordata_providers = StaticJSON
3476
3477#
3478# A list of targets for the dynamic vendordata provider. These targets
3479# are of
3480# the form <name>@<url>.
3481#
3482# The dynamic vendordata provider collects metadata by contacting
3483# external REST
3484# services and querying them for information about the instance. This
3485# behaviour
3486# is documented in the vendordata.rst file in the nova developer
3487# reference.
3488# (list value)
3489#vendordata_dynamic_targets =
3490
3491#
3492# Path to an optional certificate file or CA bundle to verify dynamic
3493# vendordata REST services ssl certificates against.
3494#
3495# Possible values:
3496#
3497# * An empty string, or a path to a valid certificate file
3498#
3499# Related options:
3500#
3501# * vendordata_providers
3502# * vendordata_dynamic_targets
3503# * vendordata_dynamic_connect_timeout
3504# * vendordata_dynamic_read_timeout
3505# * vendordata_dynamic_failure_fatal
3506# (string value)
3507#vendordata_dynamic_ssl_certfile =
3508
3509#
3510# Maximum wait time for an external REST service to connect.
3511#
3512# Possible values:
3513#
3514# * Any integer with a value greater than three (the TCP packet
3515# retransmission
3516# timeout). Note that instance start may be blocked during this wait
3517# time,
3518# so this value should be kept small.
3519#
3520# Related options:
3521#
3522# * vendordata_providers
3523# * vendordata_dynamic_targets
3524# * vendordata_dynamic_ssl_certfile
3525# * vendordata_dynamic_read_timeout
3526# * vendordata_dynamic_failure_fatal
3527# (integer value)
3528# Minimum value: 3
3529#vendordata_dynamic_connect_timeout = 5
3530
3531#
3532# Maximum wait time for an external REST service to return data once
3533# connected.
3534#
3535# Possible values:
3536#
3537# * Any integer. Note that instance start is blocked during this wait
3538# time,
3539# so this value should be kept small.
3540#
3541# Related options:
3542#
3543# * vendordata_providers
3544# * vendordata_dynamic_targets
3545# * vendordata_dynamic_ssl_certfile
3546# * vendordata_dynamic_connect_timeout
3547# * vendordata_dynamic_failure_fatal
3548# (integer value)
3549# Minimum value: 0
3550#vendordata_dynamic_read_timeout = 5
3551
3552#
3553# Should failures to fetch dynamic vendordata be fatal to instance
3554# boot?
3555#
3556# Related options:
3557#
3558# * vendordata_providers
3559# * vendordata_dynamic_targets
3560# * vendordata_dynamic_ssl_certfile
3561# * vendordata_dynamic_connect_timeout
3562# * vendordata_dynamic_read_timeout
3563# (boolean value)
3564#vendordata_dynamic_failure_fatal = false
3565
3566#
3567# This option is the time (in seconds) to cache metadata. When set to
3568# 0,
3569# metadata caching is disabled entirely; this is generally not
3570# recommended for
3571# performance reasons. Increasing this setting should improve response
3572# times
3573# of the metadata API when under heavy load. Higher values may
3574# increase memory
3575# usage, and result in longer times for host metadata changes to take
3576# effect.
3577# (integer value)
3578# Minimum value: 0
3579#metadata_cache_expiration = 15
3580
3581#
3582# Cloud providers may store custom data in vendor data file that will
3583# then be
3584# available to the instances via the metadata service, and to the
3585# rendering of
3586# config-drive. The default class for this, JsonFileVendorData, loads
3587# this
3588# information from a JSON file, whose path is configured by this
3589# option. If
3590# there is no path set by this option, the class returns an empty
3591# dictionary.
3592#
3593# Possible values:
3594#
3595# * Any string representing the path to the data file, or an empty
3596# string
3597# (default).
3598# (string value)
3599#vendordata_jsonfile_path = <None>
3600
3601#
3602# As a query can potentially return many thousands of items, you can
3603# limit the
3604# maximum number of items in a single response by setting this option.
3605# (integer value)
3606# Minimum value: 0
3607# Deprecated group/name - [DEFAULT]/osapi_max_limit
3608#max_limit = 1000
3609
3610#
3611# This string is prepended to the normal URL that is returned in links
3612# to the
3613# OpenStack Compute API. If it is empty (the default), the URLs are
3614# returned
3615# unchanged.
3616#
3617# Possible values:
3618#
3619# * Any string, including an empty string (the default).
3620# (string value)
3621# Deprecated group/name - [DEFAULT]/osapi_compute_link_prefix
3622#compute_link_prefix = <None>
3623
3624#
3625# This string is prepended to the normal URL that is returned in links
3626# to
3627# Glance resources. If it is empty (the default), the URLs are
3628# returned
3629# unchanged.
3630#
3631# Possible values:
3632#
3633# * Any string, including an empty string (the default).
3634# (string value)
3635# Deprecated group/name - [DEFAULT]/osapi_glance_link_prefix
3636#glance_link_prefix = <None>
3637
3638# DEPRECATED:
3639# Operators can turn off the ability for a user to take snapshots of
3640# their
3641# instances by setting this option to False. When disabled, any
3642# attempt to
3643# take a snapshot will result in a HTTP 400 response ("Bad Request").
3644# (boolean value)
3645# This option is deprecated for removal since 16.0.0.
3646# Its value may be silently ignored in the future.
3647# Reason: This option disables the createImage server action API in a
3648# non-discoverable way and is thus a barrier to interoperability.
3649# Also, it is not used for other APIs that create snapshots like
3650# shelve or createBackup. Disabling snapshots should be done via
3651# policy if so desired.
3652#allow_instance_snapshots = true
3653
3654# DEPRECATED:
3655# This option is a list of all instance states for which network
3656# address
3657# information should not be returned from the API.
3658#
3659# Possible values:
3660#
3661# A list of strings, where each string is a valid VM state, as
3662# defined in
3663# nova/compute/vm_states.py. As of the Newton release, they are:
3664#
3665# * "active"
3666# * "building"
3667# * "paused"
3668# * "suspended"
3669# * "stopped"
3670# * "rescued"
3671# * "resized"
3672# * "soft-delete"
3673# * "deleted"
3674# * "error"
3675# * "shelved"
3676# * "shelved_offloaded"
3677# (list value)
3678# Deprecated group/name - [DEFAULT]/osapi_hide_server_address_states
3679# This option is deprecated for removal since 17.0.0.
3680# Its value may be silently ignored in the future.
3681# Reason: This option hide the server address in server representation
3682# for configured server states. Which makes GET server API controlled
3683# by this config options. Due to this config options, user would not
3684# be able to discover the API behavior on different clouds which leads
3685# to the interop issue.
3686#hide_server_address_states = building
3687
3688# The full path to the fping binary. (string value)
3689#fping_path = /usr/sbin/fping
3690
3691#
3692# When True, the TenantNetworkController will query the Neutron API to
3693# get the
3694# default networks to use.
3695#
3696# Related options:
3697#
3698# * neutron_default_tenant_id
3699# (boolean value)
3700#use_neutron_default_nets = false
3701
3702#
3703# Tenant ID for getting the default network from Neutron API (also
3704# referred in
3705# some places as the 'project ID') to use.
3706#
3707# Related options:
3708#
3709# * use_neutron_default_nets
3710# (string value)
3711#neutron_default_tenant_id = default
3712
3713#
3714# Enables returning of the instance password by the relevant server
3715# API calls
3716# such as create, rebuild, evacuate, or rescue. If the hypervisor does
3717# not
3718# support password injection, then the password returned will not be
3719# correct,
3720# so if your hypervisor does not support password injection, set this
3721# to False.
3722# (boolean value)
3723#enable_instance_password = true
3724
3725
3726[api_database]
Vasyl Saienko526c80b2018-05-25 13:50:57 +03003727{%- set _data = {} %}
Oleh Hryhorov52ea4c92018-06-04 15:06:59 +00003728{%- do _data.update(compute.database) %}
Vasyl Saienko526c80b2018-05-25 13:50:57 +03003729{%- do _data.update({'name': 'nova_api'}) %}
Oleh Hryhorov52ea4c92018-06-04 15:06:59 +00003730{%- if _data.ssl is defined and 'cacert_file' not in _data.get('ssl', {}).keys() %}{% do _data['ssl'].update({'cacert_file': compute.cacert_file}) %}{% endif %}
Vasyl Saienko526c80b2018-05-25 13:50:57 +03003731{%- include "oslo_templates/files/queens/oslo/_database.conf" %}
Oleh Hryhorov5d0f13a2018-05-21 17:02:54 +00003732
3733{%- if compute.get('barbican', {}).get('enabled', False) %}
3734{%- set _data = compute.identity %}
3735[barbican]
3736{%- include "oslo_templates/files/queens/castellan/_barbican.conf" %}
3737{%- endif %}
3738
3739[cache]
3740
Oleh Hryhorov5d0f13a2018-05-21 17:02:54 +00003741{%- if compute.cache is defined %}
Oleksandr Bryndzii068f1532019-02-18 15:18:42 +02003742{%- set _data = compute.cache %}
3743{%- include "oslo_templates/files/queens/oslo/_cache.conf" %}
Oleh Hryhorov5d0f13a2018-05-21 17:02:54 +00003744{%- endif %}
3745
Oleh Hryhorov5d0f13a2018-05-21 17:02:54 +00003746[cells]
3747#
3748# DEPRECATED: Cells options allow you to use cells v1 functionality in
3749# an
3750# OpenStack deployment.
3751#
3752# Note that the options in this group are only for cells v1
3753# functionality, which
3754# is considered experimental and not recommended for new deployments.
3755# Cells v1
3756# is being replaced with cells v2, which starting in the 15.0.0 Ocata
3757# release is
3758# required and all Nova deployments will be at least a cells v2 cell
3759# of one.
3760#
3761
3762#
3763# From nova.conf
3764#
3765
3766# DEPRECATED:
3767# Enable cell v1 functionality.
3768#
3769# Note that cells v1 is considered experimental and not recommended
3770# for new
3771# Nova deployments. Cells v1 is being replaced by cells v2 which
3772# starting in
3773# the 15.0.0 Ocata release, all Nova deployments are at least a cells
3774# v2 cell
3775# of one. Setting this option, or any other options in the [cells]
3776# group, is
3777# not required for cells v2.
3778#
3779# When this functionality is enabled, it lets you to scale an
3780# OpenStack
3781# Compute cloud in a more distributed fashion without having to use
3782# complicated technologies like database and message queue clustering.
3783# Cells are configured as a tree. The top-level cell should have a
3784# host
3785# that runs a nova-api service, but no nova-compute services. Each
3786# child cell should run all of the typical nova-* services in a
3787# regular
3788# Compute cloud except for nova-api. You can think of cells as a
3789# normal
3790# Compute deployment in that each cell has its own database server and
3791# message queue broker.
3792#
3793# Related options:
3794#
3795# * name: A unique cell name must be given when this functionality
3796# is enabled.
3797# * cell_type: Cell type should be defined for all cells.
3798# (boolean value)
3799# This option is deprecated for removal since 16.0.0.
3800# Its value may be silently ignored in the future.
3801# Reason: Cells v1 is being replaced with Cells v2.
3802#enable = false
3803
3804# DEPRECATED:
3805# Name of the current cell.
3806#
3807# This value must be unique for each cell. Name of a cell is used as
3808# its id, leaving this option unset or setting the same name for
3809# two or more cells may cause unexpected behaviour.
3810#
3811# Related options:
3812#
3813# * enabled: This option is meaningful only when cells service
3814# is enabled
3815# (string value)
3816# This option is deprecated for removal since 16.0.0.
3817# Its value may be silently ignored in the future.
3818# Reason: Cells v1 is being replaced with Cells v2.
3819#name = nova
3820
3821# DEPRECATED:
3822# Cell capabilities.
3823#
3824# List of arbitrary key=value pairs defining capabilities of the
3825# current cell to be sent to the parent cells. These capabilities
3826# are intended to be used in cells scheduler filters/weighers.
3827#
3828# Possible values:
3829#
3830# * key=value pairs list for example;
3831# ``hypervisor=xenserver;kvm,os=linux;windows``
3832# (list value)
3833# This option is deprecated for removal since 16.0.0.
3834# Its value may be silently ignored in the future.
3835# Reason: Cells v1 is being replaced with Cells v2.
3836#capabilities = hypervisor=xenserver;kvm,os=linux;windows
3837
3838# DEPRECATED:
3839# Call timeout.
3840#
3841# Cell messaging module waits for response(s) to be put into the
3842# eventlet queue. This option defines the seconds waited for
3843# response from a call to a cell.
3844#
3845# Possible values:
3846#
3847# * An integer, corresponding to the interval time in seconds.
3848# (integer value)
3849# Minimum value: 0
3850# This option is deprecated for removal since 16.0.0.
3851# Its value may be silently ignored in the future.
3852# Reason: Cells v1 is being replaced with Cells v2.
3853#call_timeout = 60
3854
3855# DEPRECATED:
3856# Reserve percentage
3857#
3858# Percentage of cell capacity to hold in reserve, so the minimum
3859# amount of free resource is considered to be;
3860#
3861# min_free = total * (reserve_percent / 100.0)
3862#
3863# This option affects both memory and disk utilization.
3864#
3865# The primary purpose of this reserve is to ensure some space is
3866# available for users who want to resize their instance to be larger.
3867# Note that currently once the capacity expands into this reserve
3868# space this option is ignored.
3869#
3870# Possible values:
3871#
3872# * An integer or float, corresponding to the percentage of cell
3873# capacity to
3874# be held in reserve.
3875# (floating point value)
3876# This option is deprecated for removal since 16.0.0.
3877# Its value may be silently ignored in the future.
3878# Reason: Cells v1 is being replaced with Cells v2.
3879#reserve_percent = 10.0
3880
3881# DEPRECATED:
3882# Type of cell.
3883#
3884# When cells feature is enabled the hosts in the OpenStack Compute
3885# cloud are partitioned into groups. Cells are configured as a tree.
3886# The top-level cell's cell_type must be set to ``api``. All other
3887# cells are defined as a ``compute cell`` by default.
3888#
3889# Related option:
3890#
3891# * quota_driver: Disable quota checking for the child cells.
3892# (nova.quota.NoopQuotaDriver)
3893# (string value)
3894# Possible values:
3895# api - <No description provided>
3896# compute - <No description provided>
3897# This option is deprecated for removal since 16.0.0.
3898# Its value may be silently ignored in the future.
3899# Reason: Cells v1 is being replaced with Cells v2.
3900#cell_type = compute
3901
3902# DEPRECATED:
3903# Mute child interval.
3904#
3905# Number of seconds after which a lack of capability and capacity
3906# update the child cell is to be treated as a mute cell. Then the
3907# child cell will be weighed as recommend highly that it be skipped.
3908#
3909# Possible values:
3910#
3911# * An integer, corresponding to the interval time in seconds.
3912# (integer value)
3913# This option is deprecated for removal since 16.0.0.
3914# Its value may be silently ignored in the future.
3915# Reason: Cells v1 is being replaced with Cells v2.
3916#mute_child_interval = 300
3917
3918# DEPRECATED:
3919# Bandwidth update interval.
3920#
3921# Seconds between bandwidth usage cache updates for cells.
3922#
3923# Possible values:
3924#
3925# * An integer, corresponding to the interval time in seconds.
3926# (integer value)
3927# This option is deprecated for removal since 16.0.0.
3928# Its value may be silently ignored in the future.
3929# Reason: Cells v1 is being replaced with Cells v2.
3930#bandwidth_update_interval = 600
3931
3932# DEPRECATED:
3933# Instance update sync database limit.
3934#
3935# Number of instances to pull from the database at one time for
3936# a sync. If there are more instances to update the results will
3937# be paged through.
3938#
3939# Possible values:
3940#
3941# * An integer, corresponding to a number of instances.
3942# (integer value)
3943# This option is deprecated for removal since 16.0.0.
3944# Its value may be silently ignored in the future.
3945# Reason: Cells v1 is being replaced with Cells v2.
3946#instance_update_sync_database_limit = 100
3947
3948# DEPRECATED:
3949# Mute weight multiplier.
3950#
3951# Multiplier used to weigh mute children. Mute children cells are
3952# recommended to be skipped so their weight is multiplied by this
3953# negative value.
3954#
3955# Possible values:
3956#
3957# * Negative numeric number
3958# (floating point value)
3959# This option is deprecated for removal since 16.0.0.
3960# Its value may be silently ignored in the future.
3961# Reason: Cells v1 is being replaced with Cells v2.
3962#mute_weight_multiplier = -10000.0
3963
3964# DEPRECATED:
3965# Ram weight multiplier.
3966#
3967# Multiplier used for weighing ram. Negative numbers indicate that
3968# Compute should stack VMs on one host instead of spreading out new
3969# VMs to more hosts in the cell.
3970#
3971# Possible values:
3972#
3973# * Numeric multiplier
3974# (floating point value)
3975# This option is deprecated for removal since 16.0.0.
3976# Its value may be silently ignored in the future.
3977# Reason: Cells v1 is being replaced with Cells v2.
3978#ram_weight_multiplier = 10.0
3979
3980# DEPRECATED:
3981# Offset weight multiplier
3982#
3983# Multiplier used to weigh offset weigher. Cells with higher
3984# weight_offsets in the DB will be preferred. The weight_offset
3985# is a property of a cell stored in the database. It can be used
3986# by a deployer to have scheduling decisions favor or disfavor
3987# cells based on the setting.
3988#
3989# Possible values:
3990#
3991# * Numeric multiplier
3992# (floating point value)
3993# This option is deprecated for removal since 16.0.0.
3994# Its value may be silently ignored in the future.
3995# Reason: Cells v1 is being replaced with Cells v2.
3996#offset_weight_multiplier = 1.0
3997
3998# DEPRECATED:
3999# Instance updated at threshold
4000#
4001# Number of seconds after an instance was updated or deleted to
4002# continue to update cells. This option lets cells manager to only
4003# attempt to sync instances that have been updated recently.
4004# i.e., a threshold of 3600 means to only update instances that
4005# have modified in the last hour.
4006#
4007# Possible values:
4008#
4009# * Threshold in seconds
4010#
4011# Related options:
4012#
4013# * This value is used with the ``instance_update_num_instances``
4014# value in a periodic task run.
4015# (integer value)
4016# This option is deprecated for removal since 16.0.0.
4017# Its value may be silently ignored in the future.
4018# Reason: Cells v1 is being replaced with Cells v2.
4019#instance_updated_at_threshold = 3600
4020
4021# DEPRECATED:
4022# Instance update num instances
4023#
4024# On every run of the periodic task, nova cells manager will attempt
4025# to
4026# sync instance_updated_at_threshold number of instances. When the
4027# manager gets the list of instances, it shuffles them so that
4028# multiple
4029# nova-cells services do not attempt to sync the same instances in
4030# lockstep.
4031#
4032# Possible values:
4033#
4034# * Positive integer number
4035#
4036# Related options:
4037#
4038# * This value is used with the ``instance_updated_at_threshold``
4039# value in a periodic task run.
4040# (integer value)
4041# This option is deprecated for removal since 16.0.0.
4042# Its value may be silently ignored in the future.
4043# Reason: Cells v1 is being replaced with Cells v2.
4044#instance_update_num_instances = 1
4045
4046# DEPRECATED:
4047# Maximum hop count
4048#
4049# When processing a targeted message, if the local cell is not the
4050# target, a route is defined between neighbouring cells. And the
4051# message is processed across the whole routing path. This option
4052# defines the maximum hop counts until reaching the target.
4053#
4054# Possible values:
4055#
4056# * Positive integer value
4057# (integer value)
4058# This option is deprecated for removal since 16.0.0.
4059# Its value may be silently ignored in the future.
4060# Reason: Cells v1 is being replaced with Cells v2.
4061#max_hop_count = 10
4062
4063# DEPRECATED:
4064# Cells scheduler.
4065#
4066# The class of the driver used by the cells scheduler. This should be
4067# the full Python path to the class to be used. If nothing is
4068# specified
4069# in this option, the CellsScheduler is used.
4070# (string value)
4071# This option is deprecated for removal since 16.0.0.
4072# Its value may be silently ignored in the future.
4073# Reason: Cells v1 is being replaced with Cells v2.
4074#scheduler = nova.cells.scheduler.CellsScheduler
4075
4076# DEPRECATED:
4077# RPC driver queue base.
4078#
4079# When sending a message to another cell by JSON-ifying the message
4080# and making an RPC cast to 'process_message', a base queue is used.
4081# This option defines the base queue name to be used when
4082# communicating
4083# between cells. Various topics by message type will be appended to
4084# this.
4085#
4086# Possible values:
4087#
4088# * The base queue name to be used when communicating between cells.
4089# (string value)
4090# This option is deprecated for removal since 16.0.0.
4091# Its value may be silently ignored in the future.
4092# Reason: Cells v1 is being replaced with Cells v2.
4093#rpc_driver_queue_base = cells.intercell
4094
4095# DEPRECATED:
4096# Scheduler filter classes.
4097#
4098# Filter classes the cells scheduler should use. An entry of
4099# "nova.cells.filters.all_filters" maps to all cells filters
4100# included with nova. As of the Mitaka release the following
4101# filter classes are available:
4102#
4103# Different cell filter: A scheduler hint of 'different_cell'
4104# with a value of a full cell name may be specified to route
4105# a build away from a particular cell.
4106#
4107# Image properties filter: Image metadata named
4108# 'hypervisor_version_requires' with a version specification
4109# may be specified to ensure the build goes to a cell which
4110# has hypervisors of the required version. If either the version
4111# requirement on the image or the hypervisor capability of the
4112# cell is not present, this filter returns without filtering out
4113# the cells.
4114#
4115# Target cell filter: A scheduler hint of 'target_cell' with a
4116# value of a full cell name may be specified to route a build to
4117# a particular cell. No error handling is done as there's no way
4118# to know whether the full path is a valid.
4119#
4120# As an admin user, you can also add a filter that directs builds
4121# to a particular cell.
4122#
4123# (list value)
4124# This option is deprecated for removal since 16.0.0.
4125# Its value may be silently ignored in the future.
4126# Reason: Cells v1 is being replaced with Cells v2.
4127#scheduler_filter_classes = nova.cells.filters.all_filters
4128
4129# DEPRECATED:
4130# Scheduler weight classes.
4131#
4132# Weigher classes the cells scheduler should use. An entry of
4133# "nova.cells.weights.all_weighers" maps to all cell weighers
4134# included with nova. As of the Mitaka release the following
4135# weight classes are available:
4136#
4137# mute_child: Downgrades the likelihood of child cells being
4138# chosen for scheduling requests, which haven't sent capacity
4139# or capability updates in a while. Options include
4140# mute_weight_multiplier (multiplier for mute children; value
4141# should be negative).
4142#
4143# ram_by_instance_type: Select cells with the most RAM capacity
4144# for the instance type being requested. Because higher weights
4145# win, Compute returns the number of available units for the
4146# instance type requested. The ram_weight_multiplier option defaults
4147# to 10.0 that adds to the weight by a factor of 10. Use a negative
4148# number to stack VMs on one host instead of spreading out new VMs
4149# to more hosts in the cell.
4150#
4151# weight_offset: Allows modifying the database to weight a particular
4152# cell. The highest weight will be the first cell to be scheduled for
4153# launching an instance. When the weight_offset of a cell is set to 0,
4154# it is unlikely to be picked but it could be picked if other cells
4155# have a lower weight, like if they're full. And when the
4156# weight_offset
4157# is set to a very high value (for example, '999999999999999'), it is
4158# likely to be picked if another cell do not have a higher weight.
4159# (list value)
4160# This option is deprecated for removal since 16.0.0.
4161# Its value may be silently ignored in the future.
4162# Reason: Cells v1 is being replaced with Cells v2.
4163#scheduler_weight_classes = nova.cells.weights.all_weighers
4164
4165# DEPRECATED:
4166# Scheduler retries.
4167#
4168# How many retries when no cells are available. Specifies how many
4169# times the scheduler tries to launch a new instance when no cells
4170# are available.
4171#
4172# Possible values:
4173#
4174# * Positive integer value
4175#
4176# Related options:
4177#
4178# * This value is used with the ``scheduler_retry_delay`` value
4179# while retrying to find a suitable cell.
4180# (integer value)
4181# This option is deprecated for removal since 16.0.0.
4182# Its value may be silently ignored in the future.
4183# Reason: Cells v1 is being replaced with Cells v2.
4184#scheduler_retries = 10
4185
4186# DEPRECATED:
4187# Scheduler retry delay.
4188#
4189# Specifies the delay (in seconds) between scheduling retries when no
4190# cell can be found to place the new instance on. When the instance
4191# could not be scheduled to a cell after ``scheduler_retries`` in
4192# combination with ``scheduler_retry_delay``, then the scheduling
4193# of the instance failed.
4194#
4195# Possible values:
4196#
4197# * Time in seconds.
4198#
4199# Related options:
4200#
4201# * This value is used with the ``scheduler_retries`` value
4202# while retrying to find a suitable cell.
4203# (integer value)
4204# This option is deprecated for removal since 16.0.0.
4205# Its value may be silently ignored in the future.
4206# Reason: Cells v1 is being replaced with Cells v2.
4207#scheduler_retry_delay = 2
4208
4209# DEPRECATED:
4210# DB check interval.
4211#
4212# Cell state manager updates cell status for all cells from the DB
4213# only after this particular interval time is passed. Otherwise cached
4214# status are used. If this value is 0 or negative all cell status are
4215# updated from the DB whenever a state is needed.
4216#
4217# Possible values:
4218#
4219# * Interval time, in seconds.
4220#
4221# (integer value)
4222# This option is deprecated for removal since 16.0.0.
4223# Its value may be silently ignored in the future.
4224# Reason: Cells v1 is being replaced with Cells v2.
4225#db_check_interval = 60
4226
4227# DEPRECATED:
4228# Optional cells configuration.
4229#
4230# Configuration file from which to read cells configuration. If given,
4231# overrides reading cells from the database.
4232#
4233# Cells store all inter-cell communication data, including user names
4234# and passwords, in the database. Because the cells data is not
4235# updated
4236# very frequently, use this option to specify a JSON file to store
4237# cells data. With this configuration, the database is no longer
4238# consulted when reloading the cells data. The file must have columns
4239# present in the Cell model (excluding common database fields and the
4240# id column). You must specify the queue connection information
4241# through
4242# a transport_url field, instead of username, password, and so on.
4243#
4244# The transport_url has the following form:
4245# rabbit://USERNAME:PASSWORD@HOSTNAME:PORT/VIRTUAL_HOST
4246#
4247# Possible values:
4248#
4249# The scheme can be either qpid or rabbit, the following sample shows
4250# this optional configuration:
4251#
4252# {
4253# "parent": {
4254# "name": "parent",
4255# "api_url": "http://api.example.com:8774",
4256# "transport_url": "rabbit://rabbit.example.com",
4257# "weight_offset": 0.0,
4258# "weight_scale": 1.0,
4259# "is_parent": true
4260# },
4261# "cell1": {
4262# "name": "cell1",
4263# "api_url": "http://api.example.com:8774",
4264# "transport_url": "rabbit://rabbit1.example.com",
4265# "weight_offset": 0.0,
4266# "weight_scale": 1.0,
4267# "is_parent": false
4268# },
4269# "cell2": {
4270# "name": "cell2",
4271# "api_url": "http://api.example.com:8774",
4272# "transport_url": "rabbit://rabbit2.example.com",
4273# "weight_offset": 0.0,
4274# "weight_scale": 1.0,
4275# "is_parent": false
4276# }
4277# }
4278#
4279# (string value)
4280# This option is deprecated for removal since 16.0.0.
4281# Its value may be silently ignored in the future.
4282# Reason: Cells v1 is being replaced with Cells v2.
4283#cells_config = <None>
4284
4285
4286[cinder]
4287
4288#
4289# From nova.conf
4290#
Oleh Hryhorov5d0f13a2018-05-21 17:02:54 +00004291
4292#
4293# Info to match when looking for cinder in the service catalog.
4294#
4295# Possible values:
4296#
4297# * Format is separated values of the form:
4298# <service_type>:<service_name>:<endpoint_type>
4299#
4300# Note: Nova does not support the Cinder v2 API since the Nova 17.0.0
4301# Queens
4302# release.
4303#
4304# Related options:
4305#
4306# * endpoint_template - Setting this option will override catalog_info
4307# (string value)
4308#catalog_info = volumev3:cinderv3:publicURL
Michael Polenchuk2c24ab82018-06-18 17:45:42 +04004309catalog_info = volumev3:cinderv3:internalURL
Oleh Hryhorov5d0f13a2018-05-21 17:02:54 +00004310
4311#
4312# If this option is set then it will override service catalog lookup
4313# with
4314# this template for cinder endpoint
4315#
4316# Possible values:
4317#
4318# * URL for cinder endpoint API
4319# e.g. http://localhost:8776/v3/%(project_id)s
4320#
4321# Note: Nova does not support the Cinder v2 API since the Nova 17.0.0
4322# Queens
4323# release.
4324#
4325# Related options:
4326#
4327# * catalog_info - If endpoint_template is not set, catalog_info will
4328# be used.
4329# (string value)
4330#endpoint_template = <None>
4331
4332#
4333# Region name of this node. This is used when picking the URL in the
4334# service
4335# catalog.
4336#
4337# Possible values:
4338#
4339# * Any string representing region name
4340# (string value)
4341#os_region_name = <None>
Vasyl Saienkob6066be2018-05-25 15:41:55 +03004342os_region_name = {{ compute.identity.region }}
Oleh Hryhorov5d0f13a2018-05-21 17:02:54 +00004343
4344#
4345# Number of times cinderclient should retry on any failed http call.
4346# 0 means connection is attempted only once. Setting it to any
4347# positive integer
4348# means that on failure connection is retried that many times e.g.
4349# setting it
4350# to 3 means total attempts to connect will be 4.
4351#
4352# Possible values:
4353#
4354# * Any integer value. 0 means connection is attempted only once
4355# (integer value)
4356# Minimum value: 0
4357#http_retries = 3
4358
4359#
4360# Allow attach between instance and volume in different availability
4361# zones.
4362#
4363# If False, volumes attached to an instance must be in the same
4364# availability
4365# zone in Cinder as the instance availability zone in Nova.
4366# This also means care should be taken when booting an instance from a
4367# volume
4368# where source is not "volume" because Nova will attempt to create a
4369# volume using
4370# the same availability zone as what is assigned to the instance.
4371# If that AZ is not in Cinder (or
4372# allow_availability_zone_fallback=False in
4373# cinder.conf), the volume create request will fail and the instance
4374# will fail
4375# the build request.
4376# By default there is no availability zone restriction on volume
4377# attach.
4378# (boolean value)
4379#cross_az_attach = true
4380{%- if compute.cross_az_attach is defined %}
4381cross_az_attach={{ compute.cross_az_attach }}
4382{%- endif %}
4383
Vasyl Saienkob6066be2018-05-25 15:41:55 +03004384{%- set _data = compute.get('cinder', compute.get('identity', {})) %}
4385{%- set auth_type = _data.get('auth_type', 'password') %}
Oleksandr Shyshkob65003d2018-09-27 18:09:35 +03004386{%- if 'cacert_file' not in _data.keys() %}{% do _data.update({'cacert_file': compute.cacert_file}) %}{% endif %}
Vasyl Saienkob6066be2018-05-25 15:41:55 +03004387{%- include "oslo_templates/files/queens/keystoneauth/_type_" + auth_type + ".conf" %}
Oleh Hryhorov5d0f13a2018-05-21 17:02:54 +00004388
4389
4390[compute]
4391
4392#
4393# From nova.conf
4394#
4395
4396#
4397# Number of consecutive failed builds that result in disabling a
4398# compute service.
4399#
4400# This option will cause nova-compute to set itself to a disabled
4401# state
4402# if a certain number of consecutive build failures occur. This will
4403# prevent the scheduler from continuing to send builds to a compute
4404# node that is
4405# consistently failing. Note that all failures qualify and count
4406# towards this
4407# score, including reschedules that may have been due to racy
4408# scheduler behavior.
4409# Since the failures must be consecutive, it is unlikely that
4410# occasional expected
4411# reschedules will actually disable a compute node.
4412#
4413# Possible values:
4414#
4415# * Any positive integer representing a build failure count.
4416# * Zero to never auto-disable.
4417# (integer value)
4418#consecutive_build_service_disable_threshold = 10
4419{%- if compute.get('compute', {}).consecutive_build_service_disable_threshold is defined %}
4420consecutive_build_service_disable_threshold = {{ compute.compute.consecutive_build_service_disable_threshold }}
4421{%- endif %}
4422
4423#
4424# Interval for updating nova-compute-side cache of the compute node
4425# resource
4426# provider's aggregates and traits info.
4427#
4428# This option specifies the number of seconds between attempts to
4429# update a
4430# provider's aggregates and traits information in the local cache of
4431# the compute
4432# node.
4433#
4434# Possible values:
4435#
4436# * Any positive integer in seconds.
4437# (integer value)
4438# Minimum value: 1
4439#resource_provider_association_refresh = 300
4440
Vladyslav Drokaac40f22018-10-05 13:44:14 +03004441#
4442# Determine if the source compute host should wait for a ``network-vif-plugged``
4443# event from the (neutron) networking service before starting the actual
4444# transfer
4445# of the guest to the destination compute host.
4446#
4447# If you set this option the same on all of your compute hosts, which you should
4448# do if you use the same networking backend universally, you do not have to
4449# worry about this.
4450#
4451# Before starting the transfer of the guest, some setup occurs on the
4452# destination
4453# compute host, including plugging virtual interfaces. Depending on the
4454# networking backend **on the destination host**, a ``network-vif-plugged``
4455# event may be triggered and then received on the source compute host and the
4456# source compute can wait for that event to ensure networking is set up on the
4457# destination host before starting the guest transfer in the hypervisor.
4458#
4459# By default, this is False for two reasons:
4460#
4461# 1. Backward compatibility: deployments should test this out and ensure it
4462# works
4463# for them before enabling it.
4464#
4465# 2. The compute service cannot reliably determine which types of virtual
4466# interfaces (``port.binding:vif_type``) will send ``network-vif-plugged``
4467# events without an accompanying port ``binding:host_id`` change.
4468# Open vSwitch and linuxbridge should be OK, but OpenDaylight is at least
4469# one known backend that will not currently work in this case, see bug
4470# https://launchpad.net/bugs/1755890 for more details.
4471#
4472# Possible values:
4473#
4474# * True: wait for ``network-vif-plugged`` events before starting guest transfer
4475# * False: do not wait for ``network-vif-plugged`` events before starting guest
4476# transfer (this is how things have always worked before this option
4477# was introduced)
4478#
4479# Related options:
4480#
4481# * [DEFAULT]/vif_plugging_is_fatal: if ``live_migration_wait_for_vif_plug`` is
4482# True and ``vif_plugging_timeout`` is greater than 0, and a timeout is
4483# reached, the live migration process will fail with an error but the guest
4484# transfer will not have started to the destination host
4485# * [DEFAULT]/vif_plugging_timeout: if ``live_migration_wait_for_vif_plug`` is
4486# True, this controls the amount of time to wait before timing out and either
4487# failing if ``vif_plugging_is_fatal`` is True, or simply continuing with the
4488# live migration
4489# (boolean value)
4490#live_migration_wait_for_vif_plug = false
4491{%- if pillar.get('neutron', {}).get('compute', {}).get('backend', {}).get('mechanism', {}).get('ovs', {}).get('driver', '') == 'openvswitch' %}
4492live_migration_wait_for_vif_plug = true
4493{%- endif %}
4494
Oleh Hryhorov5d0f13a2018-05-21 17:02:54 +00004495
4496[conductor]
4497#
4498# Options under this group are used to define Conductor's
4499# communication,
4500# which manager should be act as a proxy between computes and
4501# database,
4502# and finally, how many worker processes will be used.
4503
4504#
4505# From nova.conf
4506#
4507
4508# DEPRECATED:
4509# Topic exchange name on which conductor nodes listen.
4510# (string value)
4511# This option is deprecated for removal since 15.0.0.
4512# Its value may be silently ignored in the future.
4513# Reason:
4514# There is no need to let users choose the RPC topic for all services
4515# - there
4516# is little gain from this. Furthermore, it makes it really easy to
4517# break Nova
4518# by using this option.
4519#topic = conductor
4520
4521#
4522# Number of workers for OpenStack Conductor service. The default will
4523# be the
4524# number of CPUs available.
4525# (integer value)
4526#workers = <None>
4527
4528
4529[console]
4530#
4531# Options under this group allow to tune the configuration of the
4532# console proxy
4533# service.
4534#
4535# Note: in configuration of every compute is a ``console_host``
4536# option,
4537# which allows to select the console proxy service to connect to.
4538
4539#
4540# From nova.conf
4541#
4542
4543#
4544# Adds list of allowed origins to the console websocket proxy to allow
4545# connections from other origin hostnames.
4546# Websocket proxy matches the host header with the origin header to
4547# prevent cross-site requests. This list specifies if any there are
4548# values other than host are allowed in the origin header.
4549#
4550# Possible values:
4551#
4552# * A list where each element is an allowed origin hostnames, else an
4553# empty list
4554# (list value)
4555# Deprecated group/name - [DEFAULT]/console_allowed_origins
4556#allowed_origins =
4557
4558
4559[consoleauth]
4560
4561#
4562# From nova.conf
4563#
4564
4565#
4566# The lifetime of a console auth token (in seconds).
4567#
4568# A console auth token is used in authorizing console access for a
4569# user.
4570# Once the auth token time to live count has elapsed, the token is
4571# considered expired. Expired tokens are then deleted.
4572# (integer value)
4573# Minimum value: 0
4574# Deprecated group/name - [DEFAULT]/console_token_ttl
4575#token_ttl = 600
sgarbuzcc02c7f2018-10-25 14:29:30 +03004576{% if compute.consoleauth_token_ttl is defined %}
4577{%- set token_ttl = compute.consoleauth_token_ttl %}
4578token_ttl = {{ token_ttl }}
4579{%- elif compute.get('consoleauth', {}).token_ttl is defined %}
4580token_ttl = {{ compute.consoleauth.token_ttl }}
4581{% endif %}
Oleh Hryhorov5d0f13a2018-05-21 17:02:54 +00004582
4583
4584[crypto]
4585
4586#
4587# From nova.conf
4588#
4589
4590#
4591# Filename of root CA (Certificate Authority). This is a container
4592# format
4593# and includes root certificates.
4594#
4595# Possible values:
4596#
4597# * Any file name containing root CA, cacert.pem is default
4598#
4599# Related options:
4600#
4601# * ca_path
4602# (string value)
4603#ca_file = cacert.pem
4604
4605#
4606# Filename of a private key.
4607#
4608# Related options:
4609#
4610# * keys_path
4611# (string value)
4612#key_file = private/cakey.pem
4613
4614#
4615# Filename of root Certificate Revocation List (CRL). This is a list
4616# of
4617# certificates that have been revoked, and therefore, entities
4618# presenting
4619# those (revoked) certificates should no longer be trusted.
4620#
4621# Related options:
4622#
4623# * ca_path
4624# (string value)
4625#crl_file = crl.pem
4626
4627#
4628# Directory path where keys are located.
4629#
4630# Related options:
4631#
4632# * key_file
4633# (string value)
4634#keys_path = $state_path/keys
4635
4636#
4637# Directory path where root CA is located.
4638#
4639# Related options:
4640#
4641# * ca_file
4642# (string value)
4643#ca_path = $state_path/CA
4644
4645# Option to enable/disable use of CA for each project. (boolean value)
4646#use_project_ca = false
4647
4648#
4649# Subject for certificate for users, %s for
4650# project, user, timestamp
4651# (string value)
4652#user_cert_subject = /C=US/ST=California/O=OpenStack/OU=NovaDev/CN=%.16s-%.16s-%s
4653
4654#
4655# Subject for certificate for projects, %s for
4656# project, timestamp
4657# (string value)
4658#project_cert_subject = /C=US/ST=California/O=OpenStack/OU=NovaDev/CN=project-ca-%.16s-%s
4659
4660
4661[devices]
4662
4663#
4664# From nova.conf
4665#
4666
4667#
4668# A list of the vGPU types enabled in the compute node.
4669#
4670# Some pGPUs (e.g. NVIDIA GRID K1) support different vGPU types. User
4671# can use
4672# this option to specify a list of enabled vGPU types that may be
4673# assigned to a
4674# guest instance. But please note that Nova only supports a single
4675# type in the
4676# Queens release. If more than one vGPU type is specified (as a comma-
4677# separated
4678# list), only the first one will be used. An example is as the
4679# following:
4680# [devices]
4681# enabled_vgpu_types = GRID K100,Intel GVT-g,MxGPU.2,nvidia-11
4682# (list value)
4683#enabled_vgpu_types =
4684
4685
4686[ephemeral_storage_encryption]
4687
4688#
4689# From nova.conf
4690#
4691
4692#
4693# Enables/disables LVM ephemeral storage encryption.
4694# (boolean value)
4695#enabled = false
4696
4697#
4698# Cipher-mode string to be used.
4699#
4700# The cipher and mode to be used to encrypt ephemeral storage. The set
4701# of
4702# cipher-mode combinations available depends on kernel support.
4703# According
4704# to the dm-crypt documentation, the cipher is expected to be in the
4705# format:
4706# "<cipher>-<chainmode>-<ivmode>".
4707#
4708# Possible values:
4709#
4710# * Any crypto option listed in ``/proc/crypto``.
4711# (string value)
4712#cipher = aes-xts-plain64
4713
4714#
4715# Encryption key length in bits.
4716#
4717# The bit length of the encryption key to be used to encrypt ephemeral
4718# storage.
4719# In XTS mode only half of the bits are used for encryption key.
4720# (integer value)
4721# Minimum value: 1
4722#key_size = 512
4723
4724
4725[filter_scheduler]
4726
4727#
4728# From nova.conf
4729#
4730
4731#
4732# Size of subset of best hosts selected by scheduler.
4733#
4734# New instances will be scheduled on a host chosen randomly from a
4735# subset of the
4736# N best hosts, where N is the value set by this option.
4737#
4738# Setting this to a value greater than 1 will reduce the chance that
4739# multiple
4740# scheduler processes handling similar requests will select the same
4741# host,
4742# creating a potential race condition. By selecting a host randomly
4743# from the N
4744# hosts that best fit the request, the chance of a conflict is
4745# reduced. However,
4746# the higher you set this value, the less optimal the chosen host may
4747# be for a
4748# given request.
4749#
4750# This option is only used by the FilterScheduler and its subclasses;
4751# if you use
4752# a different scheduler, this option has no effect.
4753#
4754# Possible values:
4755#
4756# * An integer, where the integer corresponds to the size of a host
4757# subset. Any
4758# integer is valid, although any value less than 1 will be treated
4759# as 1
4760# (integer value)
4761# Minimum value: 1
4762# Deprecated group/name - [DEFAULT]/scheduler_host_subset_size
4763#host_subset_size = 1
4764
4765#
4766# The number of instances that can be actively performing IO on a
4767# host.
4768#
4769# Instances performing IO includes those in the following states:
4770# build, resize,
4771# snapshot, migrate, rescue, unshelve.
4772#
4773# This option is only used by the FilterScheduler and its subclasses;
4774# if you use
4775# a different scheduler, this option has no effect. Also note that
4776# this setting
4777# only affects scheduling if the 'io_ops_filter' filter is enabled.
4778#
4779# Possible values:
4780#
4781# * An integer, where the integer corresponds to the max number of
4782# instances
4783# that can be actively performing IO on any given host.
4784# (integer value)
4785#max_io_ops_per_host = 8
4786
4787#
4788# Maximum number of instances that be active on a host.
4789#
4790# If you need to limit the number of instances on any given host, set
4791# this option
4792# to the maximum number of instances you want to allow. The
4793# num_instances_filter
4794# will reject any host that has at least as many instances as this
4795# option's
4796# value.
4797#
4798# This option is only used by the FilterScheduler and its subclasses;
4799# if you use
4800# a different scheduler, this option has no effect. Also note that
4801# this setting
4802# only affects scheduling if the 'num_instances_filter' filter is
4803# enabled.
4804#
4805# Possible values:
4806#
4807# * An integer, where the integer corresponds to the max instances
4808# that can be
4809# scheduled on a host.
4810# (integer value)
4811# Minimum value: 1
4812#max_instances_per_host = 50
4813
4814#
4815# Enable querying of individual hosts for instance information.
4816#
4817# The scheduler may need information about the instances on a host in
4818# order to
4819# evaluate its filters and weighers. The most common need for this
4820# information is
4821# for the (anti-)affinity filters, which need to choose a host based
4822# on the
4823# instances already running on a host.
4824#
4825# If the configured filters and weighers do not need this information,
4826# disabling
4827# this option will improve performance. It may also be disabled when
4828# the tracking
4829# overhead proves too heavy, although this will cause classes
4830# requiring host
4831# usage data to query the database on each request instead.
4832#
4833# This option is only used by the FilterScheduler and its subclasses;
4834# if you use
4835# a different scheduler, this option has no effect.
4836#
4837# NOTE: In a multi-cell (v2) setup where the cell MQ is separated from
4838# the
4839# top-level, computes cannot directly communicate with the scheduler.
4840# Thus,
4841# this option cannot be enabled in that scenario. See also the
4842# [workarounds]/disable_group_policy_check_upcall option.
4843# (boolean value)
4844# Deprecated group/name - [DEFAULT]/scheduler_tracks_instance_changes
4845#track_instance_changes = true
4846
4847#
4848# Filters that the scheduler can use.
4849#
4850# An unordered list of the filter classes the nova scheduler may
4851# apply. Only the
4852# filters specified in the 'enabled_filters' option will be used, but
4853# any filter appearing in that option must also be included in this
4854# list.
4855#
4856# By default, this is set to all filters that are included with nova.
4857#
4858# This option is only used by the FilterScheduler and its subclasses;
4859# if you use
4860# a different scheduler, this option has no effect.
4861#
4862# Possible values:
4863#
4864# * A list of zero or more strings, where each string corresponds to
4865# the name of
4866# a filter that may be used for selecting a host
4867#
4868# Related options:
4869#
4870# * enabled_filters
4871# (multi valued)
4872# Deprecated group/name - [DEFAULT]/scheduler_available_filters
4873#available_filters = nova.scheduler.filters.all_filters
4874
4875#
4876# Filters that the scheduler will use.
4877#
4878# An ordered list of filter class names that will be used for
4879# filtering
4880# hosts. These filters will be applied in the order they are listed so
4881# place your most restrictive filters first to make the filtering
4882# process more
4883# efficient.
4884#
4885# This option is only used by the FilterScheduler and its subclasses;
4886# if you use
4887# a different scheduler, this option has no effect.
4888#
4889# Possible values:
4890#
4891# * A list of zero or more strings, where each string corresponds to
4892# the name of
4893# a filter to be used for selecting a host
4894#
4895# Related options:
4896#
4897# * All of the filters in this option *must* be present in the
4898# 'scheduler_available_filters' option, or a
4899# SchedulerHostFilterNotFound
4900# exception will be raised.
4901# (list value)
4902# Deprecated group/name - [DEFAULT]/scheduler_default_filters
4903#enabled_filters = RetryFilter,AvailabilityZoneFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,ServerGroupAntiAffinityFilter,ServerGroupAffinityFilter
4904
4905# DEPRECATED:
4906# Filters used for filtering baremetal hosts.
4907#
4908# Filters are applied in order, so place your most restrictive filters
4909# first to
4910# make the filtering process more efficient.
4911#
4912# This option is only used by the FilterScheduler and its subclasses;
4913# if you use
4914# a different scheduler, this option has no effect.
4915#
4916# Possible values:
4917#
4918# * A list of zero or more strings, where each string corresponds to
4919# the name of
4920# a filter to be used for selecting a baremetal host
4921#
4922# Related options:
4923#
4924# * If the 'scheduler_use_baremetal_filters' option is False, this
4925# option has
4926# no effect.
4927# (list value)
4928# Deprecated group/name - [DEFAULT]/baremetal_scheduler_default_filters
4929# This option is deprecated for removal.
4930# Its value may be silently ignored in the future.
4931# Reason:
4932# These filters were used to overcome some of the baremetal scheduling
4933# limitations in Nova prior to the use of the Placement API. Now
4934# scheduling will
4935# use the custom resource class defined for each baremetal node to
4936# make its
4937# selection.
4938#baremetal_enabled_filters = RetryFilter,AvailabilityZoneFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,ExactRamFilter,ExactDiskFilter,ExactCoreFilter
4939
4940# DEPRECATED:
4941# Enable baremetal filters.
4942#
4943# Set this to True to tell the nova scheduler that it should use the
4944# filters
4945# specified in the 'baremetal_enabled_filters' option. If you are not
4946# scheduling baremetal nodes, leave this at the default setting of
4947# False.
4948#
4949# This option is only used by the FilterScheduler and its subclasses;
4950# if you use
4951# a different scheduler, this option has no effect.
4952#
4953# Related options:
4954#
4955# * If this option is set to True, then the filters specified in the
4956# 'baremetal_enabled_filters' are used instead of the filters
4957# specified in 'enabled_filters'.
4958# (boolean value)
4959# Deprecated group/name - [DEFAULT]/scheduler_use_baremetal_filters
4960# This option is deprecated for removal.
4961# Its value may be silently ignored in the future.
4962# Reason:
4963# These filters were used to overcome some of the baremetal scheduling
4964# limitations in Nova prior to the use of the Placement API. Now
4965# scheduling will
4966# use the custom resource class defined for each baremetal node to
4967# make its
4968# selection.
4969#use_baremetal_filters = false
4970
4971#
4972# Weighers that the scheduler will use.
4973#
4974# Only hosts which pass the filters are weighed. The weight for any
4975# host starts
4976# at 0, and the weighers order these hosts by adding to or subtracting
4977# from the
4978# weight assigned by the previous weigher. Weights may become
4979# negative. An
4980# instance will be scheduled to one of the N most-weighted hosts,
4981# where N is
4982# 'scheduler_host_subset_size'.
4983#
4984# By default, this is set to all weighers that are included with Nova.
4985#
4986# This option is only used by the FilterScheduler and its subclasses;
4987# if you use
4988# a different scheduler, this option has no effect.
4989#
4990# Possible values:
4991#
4992# * A list of zero or more strings, where each string corresponds to
4993# the name of
4994# a weigher that will be used for selecting a host
4995# (list value)
4996# Deprecated group/name - [DEFAULT]/scheduler_weight_classes
4997#weight_classes = nova.scheduler.weights.all_weighers
4998
4999#
5000# Ram weight multipler ratio.
5001#
5002# This option determines how hosts with more or less available RAM are
5003# weighed. A
5004# positive value will result in the scheduler preferring hosts with
5005# more
5006# available RAM, and a negative number will result in the scheduler
5007# preferring
5008# hosts with less available RAM. Another way to look at it is that
5009# positive
5010# values for this option will tend to spread instances across many
5011# hosts, while
5012# negative values will tend to fill up (stack) hosts as much as
5013# possible before
5014# scheduling to a less-used host. The absolute value, whether positive
5015# or
5016# negative, controls how strong the RAM weigher is relative to other
5017# weighers.
5018#
5019# This option is only used by the FilterScheduler and its subclasses;
5020# if you use
5021# a different scheduler, this option has no effect. Also note that
5022# this setting
5023# only affects scheduling if the 'ram' weigher is enabled.
5024#
5025# Possible values:
5026#
5027# * An integer or float value, where the value corresponds to the
5028# multipler
5029# ratio for this weigher.
5030# (floating point value)
5031#ram_weight_multiplier = 1.0
5032
5033#
5034# Disk weight multipler ratio.
5035#
5036# Multiplier used for weighing free disk space. Negative numbers mean
5037# to
5038# stack vs spread.
5039#
5040# This option is only used by the FilterScheduler and its subclasses;
5041# if you use
5042# a different scheduler, this option has no effect. Also note that
5043# this setting
5044# only affects scheduling if the 'disk' weigher is enabled.
5045#
5046# Possible values:
5047#
5048# * An integer or float value, where the value corresponds to the
5049# multipler
5050# ratio for this weigher.
5051# (floating point value)
5052#disk_weight_multiplier = 1.0
5053
5054#
5055# IO operations weight multipler ratio.
5056#
5057# This option determines how hosts with differing workloads are
5058# weighed. Negative
5059# values, such as the default, will result in the scheduler preferring
5060# hosts with
5061# lighter workloads whereas positive values will prefer hosts with
5062# heavier
5063# workloads. Another way to look at it is that positive values for
5064# this option
5065# will tend to schedule instances onto hosts that are already busy,
5066# while
5067# negative values will tend to distribute the workload across more
5068# hosts. The
5069# absolute value, whether positive or negative, controls how strong
5070# the io_ops
5071# weigher is relative to other weighers.
5072#
5073# This option is only used by the FilterScheduler and its subclasses;
5074# if you use
5075# a different scheduler, this option has no effect. Also note that
5076# this setting
5077# only affects scheduling if the 'io_ops' weigher is enabled.
5078#
5079# Possible values:
5080#
5081# * An integer or float value, where the value corresponds to the
5082# multipler
5083# ratio for this weigher.
5084# (floating point value)
5085#io_ops_weight_multiplier = -1.0
5086
5087#
5088# PCI device affinity weight multiplier.
5089#
5090# The PCI device affinity weighter computes a weighting based on the
5091# number of
5092# PCI devices on the host and the number of PCI devices requested by
5093# the
5094# instance. The ``NUMATopologyFilter`` filter must be enabled for this
5095# to have
5096# any significance. For more information, refer to the filter
5097# documentation:
5098#
5099# https://docs.openstack.org/nova/latest/user/filter-
5100# scheduler.html
5101#
5102# Possible values:
5103#
5104# * A positive integer or float value, where the value corresponds to
5105# the
5106# multiplier ratio for this weigher.
5107# (floating point value)
5108# Minimum value: 0
5109#pci_weight_multiplier = 1.0
5110
5111#
5112# Multiplier used for weighing hosts for group soft-affinity.
5113#
5114# Possible values:
5115#
5116# * An integer or float value, where the value corresponds to weight
5117# multiplier
5118# for hosts with group soft affinity. Only a positive value are
5119# meaningful, as
5120# negative values would make this behave as a soft anti-affinity
5121# weigher.
5122# (floating point value)
5123#soft_affinity_weight_multiplier = 1.0
5124
5125#
5126# Multiplier used for weighing hosts for group soft-anti-affinity.
5127#
5128# Possible values:
5129#
5130# * An integer or float value, where the value corresponds to weight
5131# multiplier
5132# for hosts with group soft anti-affinity. Only a positive value are
5133# meaningful, as negative values would make this behave as a soft
5134# affinity
5135# weigher.
5136# (floating point value)
5137#soft_anti_affinity_weight_multiplier = 1.0
5138
5139#
5140# Enable spreading the instances between hosts with the same best
5141# weight.
5142#
5143# Enabling it is beneficial for cases when host_subset_size is 1
5144# (default), but there is a large number of hosts with same maximal
5145# weight.
5146# This scenario is common in Ironic deployments where there are
5147# typically many
5148# baremetal nodes with identical weights returned to the scheduler.
5149# In such case enabling this option will reduce contention and chances
5150# for
5151# rescheduling events.
5152# At the same time it will make the instance packing (even in
5153# unweighed case)
5154# less dense.
5155# (boolean value)
5156#shuffle_best_same_weighed_hosts = false
5157
5158#
5159# The default architecture to be used when using the image properties
5160# filter.
5161#
5162# When using the ImagePropertiesFilter, it is possible that you want
5163# to define
5164# a default architecture to make the user experience easier and avoid
5165# having
5166# something like x86_64 images landing on aarch64 compute nodes
5167# because the
5168# user did not specify the 'hw_architecture' property in Glance.
5169#
5170# Possible values:
5171#
5172# * CPU Architectures such as x86_64, aarch64, s390x.
5173# (string value)
5174# Possible values:
5175# alpha - <No description provided>
5176# armv6 - <No description provided>
5177# armv7l - <No description provided>
5178# armv7b - <No description provided>
5179# aarch64 - <No description provided>
5180# cris - <No description provided>
5181# i686 - <No description provided>
5182# ia64 - <No description provided>
5183# lm32 - <No description provided>
5184# m68k - <No description provided>
5185# microblaze - <No description provided>
5186# microblazeel - <No description provided>
5187# mips - <No description provided>
5188# mipsel - <No description provided>
5189# mips64 - <No description provided>
5190# mips64el - <No description provided>
5191# openrisc - <No description provided>
5192# parisc - <No description provided>
5193# parisc64 - <No description provided>
5194# ppc - <No description provided>
5195# ppcle - <No description provided>
5196# ppc64 - <No description provided>
5197# ppc64le - <No description provided>
5198# ppcemb - <No description provided>
5199# s390 - <No description provided>
5200# s390x - <No description provided>
5201# sh4 - <No description provided>
5202# sh4eb - <No description provided>
5203# sparc - <No description provided>
5204# sparc64 - <No description provided>
5205# unicore32 - <No description provided>
5206# x86_64 - <No description provided>
5207# xtensa - <No description provided>
5208# xtensaeb - <No description provided>
5209#image_properties_default_architecture = <None>
5210
5211#
5212# List of UUIDs for images that can only be run on certain hosts.
5213#
5214# If there is a need to restrict some images to only run on certain
5215# designated
5216# hosts, list those image UUIDs here.
5217#
5218# This option is only used by the FilterScheduler and its subclasses;
5219# if you use
5220# a different scheduler, this option has no effect. Also note that
5221# this setting
5222# only affects scheduling if the 'IsolatedHostsFilter' filter is
5223# enabled.
5224#
5225# Possible values:
5226#
5227# * A list of UUID strings, where each string corresponds to the UUID
5228# of an
5229# image
5230#
5231# Related options:
5232#
5233# * scheduler/isolated_hosts
5234# * scheduler/restrict_isolated_hosts_to_isolated_images
5235# (list value)
5236#isolated_images =
5237
5238#
5239# List of hosts that can only run certain images.
5240#
5241# If there is a need to restrict some images to only run on certain
5242# designated
5243# hosts, list those host names here.
5244#
5245# This option is only used by the FilterScheduler and its subclasses;
5246# if you use
5247# a different scheduler, this option has no effect. Also note that
5248# this setting
5249# only affects scheduling if the 'IsolatedHostsFilter' filter is
5250# enabled.
5251#
5252# Possible values:
5253#
5254# * A list of strings, where each string corresponds to the name of a
5255# host
5256#
5257# Related options:
5258#
5259# * scheduler/isolated_images
5260# * scheduler/restrict_isolated_hosts_to_isolated_images
5261# (list value)
5262#isolated_hosts =
5263
5264#
5265# Prevent non-isolated images from being built on isolated hosts.
5266#
5267# This option is only used by the FilterScheduler and its subclasses;
5268# if you use
5269# a different scheduler, this option has no effect. Also note that
5270# this setting
5271# only affects scheduling if the 'IsolatedHostsFilter' filter is
5272# enabled. Even
5273# then, this option doesn't affect the behavior of requests for
5274# isolated images,
5275# which will *always* be restricted to isolated hosts.
5276#
5277# Related options:
5278#
5279# * scheduler/isolated_images
5280# * scheduler/isolated_hosts
5281# (boolean value)
5282#restrict_isolated_hosts_to_isolated_images = true
5283
5284#
5285# Image property namespace for use in the host aggregate.
5286#
5287# Images and hosts can be configured so that certain images can only
5288# be scheduled
5289# to hosts in a particular aggregate. This is done with metadata
5290# values set on
5291# the host aggregate that are identified by beginning with the value
5292# of this
5293# option. If the host is part of an aggregate with such a metadata
5294# key, the image
5295# in the request spec must have the value of that metadata in its
5296# properties in
5297# order for the scheduler to consider the host as acceptable.
5298#
5299# This option is only used by the FilterScheduler and its subclasses;
5300# if you use
5301# a different scheduler, this option has no effect. Also note that
5302# this setting
5303# only affects scheduling if the
5304# 'aggregate_image_properties_isolation' filter is
5305# enabled.
5306#
5307# Possible values:
5308#
5309# * A string, where the string corresponds to an image property
5310# namespace
5311#
5312# Related options:
5313#
5314# * aggregate_image_properties_isolation_separator
5315# (string value)
5316#aggregate_image_properties_isolation_namespace = <None>
5317
5318#
5319# Separator character(s) for image property namespace and name.
5320#
5321# When using the aggregate_image_properties_isolation filter, the
5322# relevant
5323# metadata keys are prefixed with the namespace defined in the
5324# aggregate_image_properties_isolation_namespace configuration option
5325# plus a
5326# separator. This option defines the separator to be used.
5327#
5328# This option is only used by the FilterScheduler and its subclasses;
5329# if you use
5330# a different scheduler, this option has no effect. Also note that
5331# this setting
5332# only affects scheduling if the
5333# 'aggregate_image_properties_isolation' filter
5334# is enabled.
5335#
5336# Possible values:
5337#
5338# * A string, where the string corresponds to an image property
5339# namespace
5340# separator character
5341#
5342# Related options:
5343#
5344# * aggregate_image_properties_isolation_namespace
5345# (string value)
5346#aggregate_image_properties_isolation_separator = .
5347
5348
5349[glance]
5350# Configuration options for the Image service
5351
5352#
5353# From nova.conf
5354#
5355
5356#
5357# List of glance api servers endpoints available to nova.
5358#
5359# https is used for ssl-based glance api servers.
5360#
5361# NOTE: The preferred mechanism for endpoint discovery is via
5362# keystoneauth1
5363# loading options. Only use api_servers if you need multiple endpoints
5364# and are
5365# unable to use a load balancer for some reason.
5366#
5367# Possible values:
5368#
5369# * A list of any fully qualified url of the form
5370# "scheme://hostname:port[/path]"
5371# (i.e. "http://10.0.1.0:9292" or "https://my.glance.server/image").
5372# (list value)
5373#api_servers = <None>
5374{%- if compute.image is defined %}
5375api_servers = {{ compute.image.get('protocol', 'http') }}://{{ compute.image.host }}:{{ compute.image.get('port', 9292) }}
5376{% endif %}
5377
5378#
5379# Enable glance operation retries.
5380#
5381# Specifies the number of retries when uploading / downloading
5382# an image to / from glance. 0 means no retries.
5383# (integer value)
5384# Minimum value: 0
5385#num_retries = 0
5386
5387# DEPRECATED:
5388# List of url schemes that can be directly accessed.
5389#
5390# This option specifies a list of url schemes that can be downloaded
5391# directly via the direct_url. This direct_URL can be fetched from
5392# Image metadata which can be used by nova to get the
5393# image more efficiently. nova-compute could benefit from this by
5394# invoking a copy when it has access to the same file system as
5395# glance.
5396#
5397# Possible values:
5398#
5399# * [file], Empty list (default)
5400# (list value)
5401# This option is deprecated for removal since 17.0.0.
5402# Its value may be silently ignored in the future.
5403# Reason:
5404# This was originally added for the 'nova.image.download.file'
5405# FileTransfer
5406# extension which was removed in the 16.0.0 Pike release. The
5407# 'nova.image.download.modules' extension point is not maintained
5408# and there is no indication of its use in production clouds.
5409#allowed_direct_url_schemes =
5410
5411#
5412# Enable image signature verification.
5413#
5414# nova uses the image signature metadata from glance and verifies the
5415# signature
5416# of a signed image while downloading that image. If the image
5417# signature cannot
5418# be verified or if the image signature metadata is either incomplete
5419# or
5420# unavailable, then nova will not boot the image and instead will
5421# place the
5422# instance into an error state. This provides end users with stronger
5423# assurances
5424# of the integrity of the image data they are using to create servers.
5425#
5426# Related options:
5427#
5428# * The options in the `key_manager` group, as the key_manager is used
5429# for the signature validation.
5430# * Both enable_certificate_validation and
5431# default_trusted_certificate_ids
5432# below depend on this option being enabled.
5433# (boolean value)
Vladyslav Drok539e9932018-08-15 19:17:46 +03005434{%- if compute.get('image', {}).verify_glance_signatures is defined %}
5435verify_glance_signatures={{ compute.image.verify_glance_signatures }}
5436{%- elif compute.get('barbican', {}).get('enabled', False) %}
Oleh Hryhorov5d0f13a2018-05-21 17:02:54 +00005437verify_glance_signatures=true
5438{%- else %}
5439#verify_glance_signatures=false
5440{%- endif %}
5441
5442# DEPRECATED:
5443# Enable certificate validation for image signature verification.
5444#
5445# During image signature verification nova will first verify the
5446# validity of the
5447# image's signing certificate using the set of trusted certificates
5448# associated
5449# with the instance. If certificate validation fails, signature
5450# verification
5451# will not be performed and the image will be placed into an error
5452# state. This
5453# provides end users with stronger assurances that the image data is
5454# unmodified
5455# and trustworthy. If left disabled, image signature verification can
5456# still
5457# occur but the end user will not have any assurance that the signing
5458# certificate used to generate the image signature is still
5459# trustworthy.
5460#
5461# Related options:
5462#
5463# * This option only takes effect if verify_glance_signatures is
5464# enabled.
5465# * The value of default_trusted_certificate_ids may be used when this
5466# option
5467# is enabled.
5468# (boolean value)
5469# This option is deprecated for removal since 16.0.0.
5470# Its value may be silently ignored in the future.
5471# Reason:
5472# This option is intended to ease the transition for deployments
5473# leveraging
5474# image signature verification. The intended state long-term is for
5475# signature
5476# verification and certificate validation to always happen together.
5477#enable_certificate_validation = false
5478
5479#
5480# List of certificate IDs for certificates that should be trusted.
5481#
5482# May be used as a default list of trusted certificate IDs for
5483# certificate
5484# validation. The value of this option will be ignored if the user
5485# provides a
5486# list of trusted certificate IDs with an instance API request. The
5487# value of
5488# this option will be persisted with the instance data if signature
5489# verification
5490# and certificate validation are enabled and if the user did not
5491# provide an
5492# alternative list. If left empty when certificate validation is
5493# enabled the
5494# user must provide a list of trusted certificate IDs otherwise
5495# certificate
5496# validation will fail.
5497#
5498# Related options:
5499#
5500# * The value of this option may be used if both
5501# verify_glance_signatures and
5502# enable_certificate_validation are enabled.
5503# (list value)
5504#default_trusted_certificate_ids =
5505
5506# Enable or disable debug logging with glanceclient. (boolean value)
5507#debug = false
5508
5509# PEM encoded Certificate Authority to use when verifying HTTPs
5510# connections. (string value)
5511#cafile = <None>
5512
5513# PEM encoded client certificate cert file (string value)
5514#certfile = <None>
5515
5516# PEM encoded client certificate key file (string value)
5517#keyfile = <None>
5518
5519# Verify HTTPS connections. (boolean value)
5520#insecure = false
5521
5522# Timeout value for http requests (integer value)
5523#timeout = <None>
5524
5525# The default service_type for endpoint URL discovery. (string value)
5526#service_type = image
5527
5528# The default service_name for endpoint URL discovery. (string value)
5529#service_name = <None>
5530
5531# List of interfaces, in order of preference, for endpoint URL. (list
5532# value)
5533#valid_interfaces = internal,public
5534
5535# The default region_name for endpoint URL discovery. (string value)
5536#region_name = <None>
5537
5538# Always use this endpoint URL for requests for this client. NOTE: The
5539# unversioned endpoint should be specified here; to request a
5540# particular API version, use the `version`, `min-version`, and/or
5541# `max-version` options. (string value)
5542#endpoint_override = <None>
5543
5544
5545[guestfs]
5546#
5547# libguestfs is a set of tools for accessing and modifying virtual
5548# machine (VM) disk images. You can use this for viewing and editing
5549# files inside guests, scripting changes to VMs, monitoring disk
5550# used/free statistics, creating guests, P2V, V2V, performing backups,
5551# cloning VMs, building VMs, formatting disks and resizing disks.
5552
5553#
5554# From nova.conf
5555#
5556
5557#
5558# Enable/disables guestfs logging.
5559#
5560# This configures guestfs to debug messages and push them to OpenStack
5561# logging system. When set to True, it traces libguestfs API calls and
5562# enable verbose debug messages. In order to use the above feature,
5563# "libguestfs" package must be installed.
5564#
5565# Related options:
5566# Since libguestfs access and modifies VM's managed by libvirt, below
5567# options
5568# should be set to give access to those VM's.
5569# * libvirt.inject_key
5570# * libvirt.inject_partition
5571# * libvirt.inject_password
5572# (boolean value)
5573#debug = false
5574
5575
5576[hyperv]
5577#
5578# The hyperv feature allows you to configure the Hyper-V hypervisor
5579# driver to be used within an OpenStack deployment.
5580
5581#
5582# From nova.conf
5583#
5584
5585#
5586# Dynamic memory ratio
5587#
5588# Enables dynamic memory allocation (ballooning) when set to a value
5589# greater than 1. The value expresses the ratio between the total RAM
5590# assigned to an instance and its startup RAM amount. For example a
5591# ratio of 2.0 for an instance with 1024MB of RAM implies 512MB of
5592# RAM allocated at startup.
5593#
5594# Possible values:
5595#
5596# * 1.0: Disables dynamic memory allocation (Default).
5597# * Float values greater than 1.0: Enables allocation of total implied
5598# RAM divided by this value for startup.
5599# (floating point value)
5600#dynamic_memory_ratio = 1.0
5601
5602#
5603# Enable instance metrics collection
5604#
5605# Enables metrics collections for an instance by using Hyper-V's
5606# metric APIs. Collected data can be retrieved by other apps and
5607# services, e.g.: Ceilometer.
5608# (boolean value)
5609#enable_instance_metrics_collection = false
5610
5611#
5612# Instances path share
5613#
5614# The name of a Windows share mapped to the "instances_path" dir
5615# and used by the resize feature to copy files to the target host.
5616# If left blank, an administrative share (hidden network share) will
5617# be used, looking for the same "instances_path" used locally.
5618#
5619# Possible values:
5620#
5621# * "": An administrative share will be used (Default).
5622# * Name of a Windows share.
5623#
5624# Related options:
5625#
5626# * "instances_path": The directory which will be used if this option
5627# here is left blank.
5628# (string value)
5629#instances_path_share =
5630
5631#
5632# Limit CPU features
5633#
5634# This flag is needed to support live migration to hosts with
5635# different CPU features and checked during instance creation
5636# in order to limit the CPU features used by the instance.
5637# (boolean value)
5638#limit_cpu_features = false
5639
5640#
5641# Mounted disk query retry count
5642#
5643# The number of times to retry checking for a mounted disk.
5644# The query runs until the device can be found or the retry
5645# count is reached.
5646#
5647# Possible values:
5648#
5649# * Positive integer values. Values greater than 1 is recommended
5650# (Default: 10).
5651#
5652# Related options:
5653#
5654# * Time interval between disk mount retries is declared with
5655# "mounted_disk_query_retry_interval" option.
5656# (integer value)
5657# Minimum value: 0
5658#mounted_disk_query_retry_count = 10
5659
5660#
5661# Mounted disk query retry interval
5662#
5663# Interval between checks for a mounted disk, in seconds.
5664#
5665# Possible values:
5666#
5667# * Time in seconds (Default: 5).
5668#
5669# Related options:
5670#
5671# * This option is meaningful when the mounted_disk_query_retry_count
5672# is greater than 1.
5673# * The retry loop runs with mounted_disk_query_retry_count and
5674# mounted_disk_query_retry_interval configuration options.
5675# (integer value)
5676# Minimum value: 0
5677#mounted_disk_query_retry_interval = 5
5678
5679#
5680# Power state check timeframe
5681#
5682# The timeframe to be checked for instance power state changes.
5683# This option is used to fetch the state of the instance from Hyper-V
5684# through the WMI interface, within the specified timeframe.
5685#
5686# Possible values:
5687#
5688# * Timeframe in seconds (Default: 60).
5689# (integer value)
5690# Minimum value: 0
5691#power_state_check_timeframe = 60
5692
5693#
5694# Power state event polling interval
5695#
5696# Instance power state change event polling frequency. Sets the
5697# listener interval for power state events to the given value.
5698# This option enhances the internal lifecycle notifications of
5699# instances that reboot themselves. It is unlikely that an operator
5700# has to change this value.
5701#
5702# Possible values:
5703#
5704# * Time in seconds (Default: 2).
5705# (integer value)
5706# Minimum value: 0
5707#power_state_event_polling_interval = 2
5708
5709#
5710# qemu-img command
5711#
5712# qemu-img is required for some of the image related operations
5713# like converting between different image types. You can get it
5714# from here: (http://qemu.weilnetz.de/) or you can install the
5715# Cloudbase OpenStack Hyper-V Compute Driver
5716# (https://cloudbase.it/openstack-hyperv-driver/) which automatically
5717# sets the proper path for this config option. You can either give the
5718# full path of qemu-img.exe or set its path in the PATH environment
5719# variable and leave this option to the default value.
5720#
5721# Possible values:
5722#
5723# * Name of the qemu-img executable, in case it is in the same
5724# directory as the nova-compute service or its path is in the
5725# PATH environment variable (Default).
5726# * Path of qemu-img command (DRIVELETTER:\PATH\TO\QEMU-IMG\COMMAND).
5727#
5728# Related options:
5729#
5730# * If the config_drive_cdrom option is False, qemu-img will be used
5731# to
5732# convert the ISO to a VHD, otherwise the configuration drive will
5733# remain an ISO. To use configuration drive with Hyper-V, you must
5734# set the mkisofs_cmd value to the full path to an mkisofs.exe
5735# installation.
5736# (string value)
5737#qemu_img_cmd = qemu-img.exe
5738
5739#
5740# External virtual switch name
5741#
5742# The Hyper-V Virtual Switch is a software-based layer-2 Ethernet
5743# network switch that is available with the installation of the
5744# Hyper-V server role. The switch includes programmatically managed
5745# and extensible capabilities to connect virtual machines to both
5746# virtual networks and the physical network. In addition, Hyper-V
5747# Virtual Switch provides policy enforcement for security, isolation,
5748# and service levels. The vSwitch represented by this config option
5749# must be an external one (not internal or private).
5750#
5751# Possible values:
5752#
5753# * If not provided, the first of a list of available vswitches
5754# is used. This list is queried using WQL.
5755# * Virtual switch name.
5756# (string value)
5757#vswitch_name = <None>
5758
5759#
5760# Wait soft reboot seconds
5761#
5762# Number of seconds to wait for instance to shut down after soft
5763# reboot request is made. We fall back to hard reboot if instance
5764# does not shutdown within this window.
5765#
5766# Possible values:
5767#
5768# * Time in seconds (Default: 60).
5769# (integer value)
5770# Minimum value: 0
5771#wait_soft_reboot_seconds = 60
5772
5773#
5774# Configuration drive cdrom
5775#
5776# OpenStack can be configured to write instance metadata to
5777# a configuration drive, which is then attached to the
5778# instance before it boots. The configuration drive can be
5779# attached as a disk drive (default) or as a CD drive.
5780#
5781# Possible values:
5782#
5783# * True: Attach the configuration drive image as a CD drive.
5784# * False: Attach the configuration drive image as a disk drive
5785# (Default).
5786#
5787# Related options:
5788#
5789# * This option is meaningful with force_config_drive option set to
5790# 'True'
5791# or when the REST API call to create an instance will have
5792# '--config-drive=True' flag.
5793# * config_drive_format option must be set to 'iso9660' in order to
5794# use
5795# CD drive as the configuration drive image.
5796# * To use configuration drive with Hyper-V, you must set the
5797# mkisofs_cmd value to the full path to an mkisofs.exe installation.
5798# Additionally, you must set the qemu_img_cmd value to the full path
5799# to an qemu-img command installation.
5800# * You can configure the Compute service to always create a
5801# configuration
5802# drive by setting the force_config_drive option to 'True'.
5803# (boolean value)
5804#config_drive_cdrom = false
5805config_drive_cdrom = {{ compute.get('config_drive', {}).get('cdrom', False)|lower }}
5806
5807#
5808# Configuration drive inject password
5809#
5810# Enables setting the admin password in the configuration drive image.
5811#
5812# Related options:
5813#
5814# * This option is meaningful when used with other options that enable
5815# configuration drive usage with Hyper-V, such as
5816# force_config_drive.
5817# * Currently, the only accepted config_drive_format is 'iso9660'.
5818# (boolean value)
5819#config_drive_inject_password = false
5820config_drive_inject_password = {{ compute.get('config_drive', {}).get('inject_password', False)|lower }}
5821
5822#
5823# Volume attach retry count
5824#
5825# The number of times to retry attaching a volume. Volume attachment
5826# is retried until success or the given retry count is reached.
5827#
5828# Possible values:
5829#
5830# * Positive integer values (Default: 10).
5831#
5832# Related options:
5833#
5834# * Time interval between attachment attempts is declared with
5835# volume_attach_retry_interval option.
5836# (integer value)
5837# Minimum value: 0
5838#volume_attach_retry_count = 10
5839
5840#
5841# Volume attach retry interval
5842#
5843# Interval between volume attachment attempts, in seconds.
5844#
5845# Possible values:
5846#
5847# * Time in seconds (Default: 5).
5848#
5849# Related options:
5850#
5851# * This options is meaningful when volume_attach_retry_count
5852# is greater than 1.
5853# * The retry loop runs with volume_attach_retry_count and
5854# volume_attach_retry_interval configuration options.
5855# (integer value)
5856# Minimum value: 0
5857#volume_attach_retry_interval = 5
5858
5859#
5860# Enable RemoteFX feature
5861#
5862# This requires at least one DirectX 11 capable graphics adapter for
5863# Windows / Hyper-V Server 2012 R2 or newer and RDS-Virtualization
5864# feature has to be enabled.
5865#
5866# Instances with RemoteFX can be requested with the following flavor
5867# extra specs:
5868#
5869# **os:resolution**. Guest VM screen resolution size. Acceptable
5870# values::
5871#
5872# 1024x768, 1280x1024, 1600x1200, 1920x1200, 2560x1600, 3840x2160
5873#
5874# ``3840x2160`` is only available on Windows / Hyper-V Server 2016.
5875#
5876# **os:monitors**. Guest VM number of monitors. Acceptable values::
5877#
5878# [1, 4] - Windows / Hyper-V Server 2012 R2
5879# [1, 8] - Windows / Hyper-V Server 2016
5880#
5881# **os:vram**. Guest VM VRAM amount. Only available on
5882# Windows / Hyper-V Server 2016. Acceptable values::
5883#
5884# 64, 128, 256, 512, 1024
5885# (boolean value)
5886#enable_remotefx = false
5887
5888#
5889# Use multipath connections when attaching iSCSI or FC disks.
5890#
5891# This requires the Multipath IO Windows feature to be enabled. MPIO
5892# must be
5893# configured to claim such devices.
5894# (boolean value)
5895#use_multipath_io = false
5896
5897#
5898# List of iSCSI initiators that will be used for estabilishing iSCSI
5899# sessions.
5900#
5901# If none are specified, the Microsoft iSCSI initiator service will
5902# choose the
5903# initiator.
5904# (list value)
5905#iscsi_initiator_list =
5906
5907{% if compute.ironic is defined -%}
5908[ironic]
5909#
5910# Configuration options for Ironic driver (Bare Metal).
5911# If using the Ironic driver following options must be set:
5912# * auth_type
5913# * auth_url
5914# * project_name
5915# * username
5916# * password
5917# * project_domain_id or project_domain_name
5918# * user_domain_id or user_domain_name
5919
5920#
5921# From nova.conf
5922#
5923
5924# DEPRECATED: URL override for the Ironic API endpoint. (uri value)
5925# This option is deprecated for removal.
5926# Its value may be silently ignored in the future.
5927# Reason: Endpoint lookup uses the service catalog via common
5928# keystoneauth1 Adapter configuration options. In the current release,
5929# api_endpoint will override this behavior, but will be ignored and/or
5930# removed in a future release. To achieve the same result, use the
5931# endpoint_override option instead.
5932#api_endpoint = http://ironic.example.org:6385/
5933api_endpoint={{ compute.ironic.get('protocol', 'http') }}://{{ compute.ironic.host }}:{{ compute.ironic.port }}
5934
5935#
5936# The number of times to retry when a request conflicts.
5937# If set to 0, only try once, no retries.
5938#
5939# Related options:
5940#
5941# * api_retry_interval
5942# (integer value)
5943# Minimum value: 0
5944#api_max_retries = 60
5945
5946#
5947# The number of seconds to wait before retrying the request.
5948#
5949# Related options:
5950#
5951# * api_max_retries
5952# (integer value)
5953# Minimum value: 0
5954#api_retry_interval = 2
5955
5956# Timeout (seconds) to wait for node serial console state changed. Set
5957# to 0 to disable timeout. (integer value)
5958# Minimum value: 0
5959#serial_console_state_timeout = 10
5960
5961# PEM encoded Certificate Authority to use when verifying HTTPs
5962# connections. (string value)
5963#cafile = <None>
5964{%- if compute.ironic.get('protocol', 'http') == 'https' %}
5965cafile={{ compute.identity.get('cacert_file', compute.cacert_file) }}
5966{%- endif %}
5967
5968# PEM encoded client certificate cert file (string value)
5969#certfile = <None>
5970
5971# PEM encoded client certificate key file (string value)
5972#keyfile = <None>
5973
5974# Verify HTTPS connections. (boolean value)
5975#insecure = false
5976
5977# Timeout value for http requests (integer value)
5978#timeout = <None>
5979
5980# Authentication type to load (string value)
5981# Deprecated group/name - [ironic]/auth_plugin
5982#auth_type = <None>
5983auth_type={{ compute.ironic.auth_type }}
5984
5985# Config Section from which to load plugin specific options (string
5986# value)
5987#auth_section = <None>
5988
5989# Authentication URL (string value)
5990#auth_url = <None>
5991auth_url={{ compute.identity.get('protocol', 'http') }}://{{ compute.identity.host }}:{{ compute.identity.port }}/v3
5992
5993# Scope for system operations (string value)
5994#system_scope = <None>
5995
5996# Domain ID to scope to (string value)
5997#domain_id = <None>
5998
5999# Domain name to scope to (string value)
6000#domain_name = <None>
6001
6002# Project ID to scope to (string value)
6003#project_id = <None>
6004
6005# Project name to scope to (string value)
6006#project_name = <None>
6007project_name={{ compute.identity.tenant }}
6008
6009# Domain ID containing project (string value)
6010#project_domain_id = <None>
6011
6012# Domain name containing project (string value)
6013#project_domain_name = <None>
6014project_domain_name={{ compute.ironic.project_domain_name }}
6015
6016# Trust ID (string value)
6017#trust_id = <None>
6018
6019# User ID (string value)
6020#user_id = <None>
6021
6022# Username (string value)
6023# Deprecated group/name - [ironic]/user_name
6024#username = <None>
6025username={{ compute.ironic.user }}
6026
6027# User's domain id (string value)
6028#user_domain_id = <None>
6029
6030# User's domain name (string value)
6031#user_domain_name = <None>
6032user_domain_name={{ compute.ironic.user_domain_name }}
6033
6034
6035# User's password (string value)
6036#password = <None>
6037password={{ compute.ironic.password }}
6038# The default service_type for endpoint URL discovery. (string value)
6039#service_type = baremetal
6040
6041# The default service_name for endpoint URL discovery. (string value)
6042#service_name = <None>
6043
6044# List of interfaces, in order of preference, for endpoint URL. (list
6045# value)
6046#valid_interfaces = internal,public
6047
6048# The default region_name for endpoint URL discovery. (string value)
6049#region_name = <None>
6050
6051# Always use this endpoint URL for requests for this client. NOTE: The
6052# unversioned endpoint should be specified here; to request a
6053# particular API version, use the `version`, `min-version`, and/or
6054# `max-version` options. (string value)
6055# Deprecated group/name - [ironic]/api_endpoint
6056#endpoint_override = <None>
6057{%- endif %}
6058
6059
6060[key_manager]
6061
6062#
6063# From nova.conf
6064#
6065
6066#
6067# Fixed key returned by key manager, specified in hex.
6068#
6069# Possible values:
6070#
6071# * Empty string or a key in hex value
6072# (string value)
6073#fixed_key = <None>
6074{%- if compute.get('barbican', {}).get('enabled', False) %}
6075api_class=castellan.key_manager.barbican_key_manager.BarbicanKeyManager
6076{%- endif %}
6077
6078# Specify the key manager implementation. Options are "barbican" and
6079# "vault". Default is "barbican". Will support the values earlier
6080# set using [key_manager]/api_class for some time. (string value)
6081# Deprecated group/name - [key_manager]/api_class
6082#backend = barbican
6083
6084# The type of authentication credential to create. Possible values are
6085# 'token', 'password', 'keystone_token', and 'keystone_password'.
6086# Required if no context is passed to the credential factory. (string
6087# value)
6088#auth_type = <None>
6089
6090# Token for authentication. Required for 'token' and 'keystone_token'
6091# auth_type if no context is passed to the credential factory. (string
6092# value)
6093#token = <None>
6094
6095# Username for authentication. Required for 'password' auth_type.
6096# Optional for the 'keystone_password' auth_type. (string value)
6097#username = <None>
6098
6099# Password for authentication. Required for 'password' and
6100# 'keystone_password' auth_type. (string value)
6101#password = <None>
6102
6103# Use this endpoint to connect to Keystone. (string value)
6104#auth_url = <None>
6105
6106# User ID for authentication. Optional for 'keystone_token' and
6107# 'keystone_password' auth_type. (string value)
6108#user_id = <None>
6109
6110# User's domain ID for authentication. Optional for 'keystone_token'
6111# and 'keystone_password' auth_type. (string value)
6112#user_domain_id = <None>
6113
6114# User's domain name for authentication. Optional for 'keystone_token'
6115# and 'keystone_password' auth_type. (string value)
6116#user_domain_name = <None>
6117
6118# Trust ID for trust scoping. Optional for 'keystone_token' and
6119# 'keystone_password' auth_type. (string value)
6120#trust_id = <None>
6121
6122# Domain ID for domain scoping. Optional for 'keystone_token' and
6123# 'keystone_password' auth_type. (string value)
6124#domain_id = <None>
6125
6126# Domain name for domain scoping. Optional for 'keystone_token' and
6127# 'keystone_password' auth_type. (string value)
6128#domain_name = <None>
6129
6130# Project ID for project scoping. Optional for 'keystone_token' and
6131# 'keystone_password' auth_type. (string value)
6132#project_id = <None>
6133
6134# Project name for project scoping. Optional for 'keystone_token' and
6135# 'keystone_password' auth_type. (string value)
6136#project_name = <None>
6137
6138# Project's domain ID for project. Optional for 'keystone_token' and
6139# 'keystone_password' auth_type. (string value)
6140#project_domain_id = <None>
6141
6142# Project's domain name for project. Optional for 'keystone_token' and
6143# 'keystone_password' auth_type. (string value)
6144#project_domain_name = <None>
6145
6146# Allow fetching a new token if the current one is going to expire.
6147# Optional for 'keystone_token' and 'keystone_password' auth_type.
6148# (boolean value)
6149#reauthenticate = true
6150
6151
6152[keystone]
6153# Configuration options for the identity service
6154
6155#
6156# From nova.conf
6157#
6158
6159# PEM encoded Certificate Authority to use when verifying HTTPs
6160# connections. (string value)
6161#cafile = <None>
6162
6163# PEM encoded client certificate cert file (string value)
6164#certfile = <None>
6165
6166# PEM encoded client certificate key file (string value)
6167#keyfile = <None>
6168
6169# Verify HTTPS connections. (boolean value)
6170#insecure = false
6171
6172# Timeout value for http requests (integer value)
6173#timeout = <None>
6174
6175# The default service_type for endpoint URL discovery. (string value)
6176#service_type = identity
6177
6178# The default service_name for endpoint URL discovery. (string value)
6179#service_name = <None>
6180
6181# List of interfaces, in order of preference, for endpoint URL. (list
6182# value)
6183#valid_interfaces = internal,public
6184
6185# The default region_name for endpoint URL discovery. (string value)
6186#region_name = <None>
6187
6188# Always use this endpoint URL for requests for this client. NOTE: The
6189# unversioned endpoint should be specified here; to request a
6190# particular API version, use the `version`, `min-version`, and/or
6191# `max-version` options. (string value)
6192#endpoint_override = <None>
6193
6194
6195[libvirt]
6196#
6197# Libvirt options allows cloud administrator to configure related
6198# libvirt hypervisor driver to be used within an OpenStack deployment.
6199#
6200# Almost all of the libvirt config options are influence by
6201# ``virt_type`` config
6202# which describes the virtualization type (or so called domain type)
6203# libvirt
6204# should use for specific features such as live migration, snapshot.
6205
6206#
6207# From nova.conf
6208#
Oleh Hryhorov5d0f13a2018-05-21 17:02:54 +00006209{%- if compute.libvirt.virt_type is defined %}
6210virt_type = {{ compute.libvirt.virt_type }}
6211{%- else %}
6212virt_type = kvm
6213{%- endif%}
6214
6215inject_partition={{ compute.libvirt.inject_partition }}
6216{%- if compute.libvirt.get('inject_partition', '-2')|string == '-2' %}
6217inject_password=False
6218{%- else %}
6219inject_password={{ compute.libvirt.inject_password }}
6220{%- endif %}
6221
6222disk_cachemodes="{{ compute.get('disk_cachemodes', 'network=writeback,block=none') }}"
6223block_migration_flag=VIR_MIGRATE_UNDEFINE_SOURCE,VIR_MIGRATE_PEER2PEER,VIR_MIGRATE_LIVE,VIR_MIGRATE_NON_SHARED_INC
6224live_migration_flag=VIR_MIGRATE_UNDEFINE_SOURCE,VIR_MIGRATE_PEER2PEER,VIR_MIGRATE_LIVE,VIR_MIGRATE_PERSIST_DEST
6225inject_key=True
6226vif_driver=nova.virt.libvirt.vif.LibvirtGenericVIFDriver
6227
6228{%- if compute.get('ceph', {}).get('ephemeral', False) %}
6229images_type=rbd
6230images_rbd_pool={{ compute.ceph.rbd_pool }}
6231images_rbd_ceph_conf=/etc/ceph/ceph.conf
6232rbd_user={{ compute.ceph.rbd_user }}
6233rbd_secret_uuid={{ compute.ceph.secret_uuid }}
6234inject_password=false
6235inject_key=false
6236{%- elif compute.get('lvm', {}).get('ephemeral', False) %}
6237images_type=lvm
6238images_volume_group={{ compute.lvm.images_volume_group }}
6239{%- if compute.lvm.volume_clear is defined %}
6240volume_clear={{ compute.lvm.volume_clear }}
6241{%- endif %}
6242{%- if compute.lvm.volume_clear_size is defined %}
6243volume_clear_size={{ compute.lvm.volume_clear_size }}
6244{%- endif %}
6245{%- endif %}
6246
Oleh Hryhorov5d0f13a2018-05-21 17:02:54 +00006247#
6248# The ID of the image to boot from to rescue data from a corrupted
6249# instance.
6250#
6251# If the rescue REST API operation doesn't provide an ID of an image
6252# to
6253# use, the image which is referenced by this ID is used. If this
6254# option is not set, the image from the instance is used.
6255#
6256# Possible values:
6257#
6258# * An ID of an image or nothing. If it points to an *Amazon Machine
6259# Image* (AMI), consider to set the config options
6260# ``rescue_kernel_id``
6261# and ``rescue_ramdisk_id`` too. If nothing is set, the image of the
6262# instance
6263# is used.
6264#
6265# Related options:
6266#
6267# * ``rescue_kernel_id``: If the chosen rescue image allows the
6268# separate
6269# definition of its kernel disk, the value of this option is used,
6270# if specified. This is the case when *Amazon*'s AMI/AKI/ARI image
6271# format is used for the rescue image.
6272# * ``rescue_ramdisk_id``: If the chosen rescue image allows the
6273# separate
6274# definition of its RAM disk, the value of this option is used if,
6275# specified. This is the case when *Amazon*'s AMI/AKI/ARI image
6276# format is used for the rescue image.
6277# (string value)
6278#rescue_image_id = <None>
6279
6280#
6281# The ID of the kernel (AKI) image to use with the rescue image.
6282#
6283# If the chosen rescue image allows the separate definition of its
6284# kernel
6285# disk, the value of this option is used, if specified. This is the
6286# case
6287# when *Amazon*'s AMI/AKI/ARI image format is used for the rescue
6288# image.
6289#
6290# Possible values:
6291#
6292# * An ID of an kernel image or nothing. If nothing is specified, the
6293# kernel
6294# disk from the instance is used if it was launched with one.
6295#
6296# Related options:
6297#
6298# * ``rescue_image_id``: If that option points to an image in
6299# *Amazon*'s
6300# AMI/AKI/ARI image format, it's useful to use ``rescue_kernel_id``
6301# too.
6302# (string value)
6303#rescue_kernel_id = <None>
6304
6305#
6306# The ID of the RAM disk (ARI) image to use with the rescue image.
6307#
6308# If the chosen rescue image allows the separate definition of its RAM
6309# disk, the value of this option is used, if specified. This is the
6310# case
6311# when *Amazon*'s AMI/AKI/ARI image format is used for the rescue
6312# image.
6313#
6314# Possible values:
6315#
6316# * An ID of a RAM disk image or nothing. If nothing is specified, the
6317# RAM
6318# disk from the instance is used if it was launched with one.
6319#
6320# Related options:
6321#
6322# * ``rescue_image_id``: If that option points to an image in
6323# *Amazon*'s
6324# AMI/AKI/ARI image format, it's useful to use ``rescue_ramdisk_id``
6325# too.
6326# (string value)
6327#rescue_ramdisk_id = <None>
6328
6329#
6330# Describes the virtualization type (or so called domain type) libvirt
6331# should
6332# use.
6333#
6334# The choice of this type must match the underlying virtualization
6335# strategy
6336# you have chosen for this host.
6337#
6338# Possible values:
6339#
6340# * See the predefined set of case-sensitive values.
6341#
6342# Related options:
6343#
6344# * ``connection_uri``: depends on this
6345# * ``disk_prefix``: depends on this
6346# * ``cpu_mode``: depends on this
6347# * ``cpu_model``: depends on this
6348# (string value)
6349# Possible values:
6350# kvm - <No description provided>
6351# lxc - <No description provided>
6352# qemu - <No description provided>
6353# uml - <No description provided>
6354# xen - <No description provided>
6355# parallels - <No description provided>
6356#virt_type = kvm
6357
6358#
6359# Overrides the default libvirt URI of the chosen virtualization type.
6360#
6361# If set, Nova will use this URI to connect to libvirt.
6362#
6363# Possible values:
6364#
6365# * An URI like ``qemu:///system`` or ``xen+ssh://oirase/`` for
6366# example.
6367# This is only necessary if the URI differs to the commonly known
6368# URIs
6369# for the chosen virtualization type.
6370#
6371# Related options:
6372#
6373# * ``virt_type``: Influences what is used as default value here.
6374# (string value)
6375#connection_uri =
Oleksandr Shyshkof09a8712018-09-26 19:22:12 +03006376{%- if compute.get('libvirt', {}).uri is defined %}
6377connection_uri={{ compute.libvirt.uri }}
6378{%- endif %}
Oleh Hryhorov5d0f13a2018-05-21 17:02:54 +00006379
6380#
6381# Algorithm used to hash the injected password.
6382# Note that it must be supported by libc on the compute host
6383# _and_ by libc inside *any guest image* that will be booted by this
6384# compute
6385# host whith requested password injection.
6386# In case the specified algorithm is not supported by libc on the
6387# compute host,
6388# a fallback to DES algorithm will be performed.
6389#
6390# Related options:
6391#
6392# * ``inject_password``
6393# * ``inject_partition``
6394# (string value)
6395# Possible values:
6396# SHA-512 - <No description provided>
6397# SHA-256 - <No description provided>
6398# MD5 - <No description provided>
6399#inject_password_algorithm = MD5
6400
6401#
6402# Allow the injection of an admin password for instance only at
6403# ``create`` and
6404# ``rebuild`` process.
6405#
6406# There is no agent needed within the image to do this. If
6407# *libguestfs* is
6408# available on the host, it will be used. Otherwise *nbd* is used. The
6409# file
6410# system of the image will be mounted and the admin password, which is
6411# provided
6412# in the REST API call will be injected as password for the root user.
6413# If no
6414# root user is available, the instance won't be launched and an error
6415# is thrown.
6416# Be aware that the injection is *not* possible when the instance gets
6417# launched
6418# from a volume.
6419#
6420# Possible values:
6421#
6422# * True: Allows the injection.
6423# * False (default): Disallows the injection. Any via the REST API
6424# provided
6425# admin password will be silently ignored.
6426#
6427# Related options:
6428#
6429# * ``inject_partition``: That option will decide about the discovery
6430# and usage
6431# of the file system. It also can disable the injection at all.
6432# (boolean value)
6433#inject_password = false
6434
6435#
6436# Allow the injection of an SSH key at boot time.
6437#
6438# There is no agent needed within the image to do this. If
6439# *libguestfs* is
6440# available on the host, it will be used. Otherwise *nbd* is used. The
6441# file
6442# system of the image will be mounted and the SSH key, which is
6443# provided
6444# in the REST API call will be injected as SSH key for the root user
6445# and
6446# appended to the ``authorized_keys`` of that user. The SELinux
6447# context will
6448# be set if necessary. Be aware that the injection is *not* possible
6449# when the
6450# instance gets launched from a volume.
6451#
6452# This config option will enable directly modifying the instance disk
6453# and does
6454# not affect what cloud-init may do using data from config_drive
6455# option or the
6456# metadata service.
6457#
6458# Related options:
6459#
6460# * ``inject_partition``: That option will decide about the discovery
6461# and usage
6462# of the file system. It also can disable the injection at all.
6463# (boolean value)
6464#inject_key = false
6465
6466#
6467# Determines the way how the file system is chosen to inject data into
6468# it.
6469#
6470# *libguestfs* will be used a first solution to inject data. If that's
6471# not
6472# available on the host, the image will be locally mounted on the host
6473# as a
6474# fallback solution. If libguestfs is not able to determine the root
6475# partition
6476# (because there are more or less than one root partition) or cannot
6477# mount the
6478# file system it will result in an error and the instance won't be
6479# boot.
6480#
6481# Possible values:
6482#
6483# * -2 => disable the injection of data.
6484# * -1 => find the root partition with the file system to mount with
6485# libguestfs
6486# * 0 => The image is not partitioned
6487# * >0 => The number of the partition to use for the injection
6488#
6489# Related options:
6490#
6491# * ``inject_key``: If this option allows the injection of a SSH key
6492# it depends
6493# on value greater or equal to -1 for ``inject_partition``.
6494# * ``inject_password``: If this option allows the injection of an
6495# admin password
6496# it depends on value greater or equal to -1 for
6497# ``inject_partition``.
6498# * ``guestfs`` You can enable the debug log level of libguestfs with
6499# this
6500# config option. A more verbose output will help in debugging
6501# issues.
6502# * ``virt_type``: If you use ``lxc`` as virt_type it will be treated
6503# as a
6504# single partition image
6505# (integer value)
6506# Minimum value: -2
6507#inject_partition = -2
6508
6509# DEPRECATED:
6510# Enable a mouse cursor within a graphical VNC or SPICE sessions.
6511#
6512# This will only be taken into account if the VM is fully virtualized
6513# and VNC
6514# and/or SPICE is enabled. If the node doesn't support a graphical
6515# framebuffer,
6516# then it is valid to set this to False.
6517#
6518# Related options:
6519# * ``[vnc]enabled``: If VNC is enabled, ``use_usb_tablet`` will have
6520# an effect.
6521# * ``[spice]enabled`` + ``[spice].agent_enabled``: If SPICE is
6522# enabled and the
6523# spice agent is disabled, the config value of ``use_usb_tablet``
6524# will have
6525# an effect.
6526# (boolean value)
6527# This option is deprecated for removal since 14.0.0.
6528# Its value may be silently ignored in the future.
6529# Reason: This option is being replaced by the 'pointer_model' option.
6530#use_usb_tablet = true
6531
6532#
6533# The IP address or hostname to be used as the target for live
6534# migration traffic.
6535#
6536# If this option is set to None, the hostname of the migration target
6537# compute
6538# node will be used.
6539#
6540# This option is useful in environments where the live-migration
6541# traffic can
6542# impact the network plane significantly. A separate network for live-
6543# migration
6544# traffic can then use this config option and avoids the impact on the
6545# management network.
6546#
6547# Possible values:
6548#
6549# * A valid IP address or hostname, else None.
6550#
6551# Related options:
6552#
6553# * ``live_migration_tunnelled``: The live_migration_inbound_addr
6554# value is
6555# ignored if tunneling is enabled.
6556# (string value)
6557#live_migration_inbound_addr = <None>
6558{%- if compute.libvirt.migration_inbound_addr is defined %}
6559live_migration_inbound_addr = {{ compute.libvirt.migration_inbound_addr }}
6560{%- endif %}
6561
6562# DEPRECATED:
6563# Live migration target URI to use.
6564#
6565# Override the default libvirt live migration target URI (which is
6566# dependent
6567# on virt_type). Any included "%s" is replaced with the migration
6568# target
6569# hostname.
6570#
6571# If this option is set to None (which is the default), Nova will
6572# automatically
6573# generate the `live_migration_uri` value based on only 4 supported
6574# `virt_type`
6575# in following list:
6576#
6577# * 'kvm': 'qemu+tcp://%s/system'
6578# * 'qemu': 'qemu+tcp://%s/system'
6579# * 'xen': 'xenmigr://%s/system'
6580# * 'parallels': 'parallels+tcp://%s/system'
6581#
6582# Related options:
6583#
6584# * ``live_migration_inbound_addr``: If
6585# ``live_migration_inbound_addr`` value
6586# is not None and ``live_migration_tunnelled`` is False, the
6587# ip/hostname
6588# address of target compute node is used instead of
6589# ``live_migration_uri`` as
6590# the uri for live migration.
6591# * ``live_migration_scheme``: If ``live_migration_uri`` is not set,
6592# the scheme
6593# used for live migration is taken from ``live_migration_scheme``
6594# instead.
6595# (string value)
6596# This option is deprecated for removal since 15.0.0.
6597# Its value may be silently ignored in the future.
6598# Reason:
6599# live_migration_uri is deprecated for removal in favor of two other
6600# options that
6601# allow to change live migration scheme and target URI:
6602# ``live_migration_scheme``
6603# and ``live_migration_inbound_addr`` respectively.
6604#live_migration_uri = <None>
6605
6606#
6607# URI scheme used for live migration.
6608#
6609# Override the default libvirt live migration scheme (which is
6610# dependent on
6611# virt_type). If this option is set to None, nova will automatically
6612# choose a
6613# sensible default based on the hypervisor. It is not recommended that
6614# you change
6615# this unless you are very sure that hypervisor supports a particular
6616# scheme.
6617#
6618# Related options:
6619#
6620# * ``virt_type``: This option is meaningful only when ``virt_type``
6621# is set to
6622# `kvm` or `qemu`.
6623# * ``live_migration_uri``: If ``live_migration_uri`` value is not
6624# None, the
6625# scheme used for live migration is taken from
6626# ``live_migration_uri`` instead.
6627# (string value)
6628#live_migration_scheme = <None>
6629
Oleksandr Shyshkof09a8712018-09-26 19:22:12 +03006630{%- if compute.libvirt.tls.get('enabled', False) %}
6631live_migration_scheme="tls"
6632{%- endif %}
Oleh Hryhorov5d0f13a2018-05-21 17:02:54 +00006633#
6634# Enable tunnelled migration.
6635#
6636# This option enables the tunnelled migration feature, where migration
6637# data is
6638# transported over the libvirtd connection. If enabled, we use the
6639# VIR_MIGRATE_TUNNELLED migration flag, avoiding the need to configure
6640# the network to allow direct hypervisor to hypervisor communication.
6641# If False, use the native transport. If not set, Nova will choose a
6642# sensible default based on, for example the availability of native
6643# encryption support in the hypervisor. Enabling this option will
6644# definitely
6645# impact performance massively.
6646#
6647# Note that this option is NOT compatible with use of block migration.
6648#
6649# Related options:
6650#
6651# * ``live_migration_inbound_addr``: The live_migration_inbound_addr
6652# value is
6653# ignored if tunneling is enabled.
6654# (boolean value)
6655#live_migration_tunnelled = false
6656{%- if compute.libvirt.live_migration_tunnelled is defined %}
6657live_migration_tunnelled = {{ compute.libvirt.live_migration_tunnelled }}
6658{%- endif %}
6659
6660#
6661# Maximum bandwidth(in MiB/s) to be used during migration.
6662#
6663# If set to 0, the hypervisor will choose a suitable default. Some
6664# hypervisors
6665# do not support this feature and will return an error if bandwidth is
6666# not 0.
6667# Please refer to the libvirt documentation for further details.
6668# (integer value)
6669#live_migration_bandwidth = 0
6670
6671#
6672# Maximum permitted downtime, in milliseconds, for live migration
6673# switchover.
6674#
6675# Will be rounded up to a minimum of 100ms. You can increase this
6676# value
6677# if you want to allow live-migrations to complete faster, or avoid
6678# live-migration timeout errors by allowing the guest to be paused for
6679# longer during the live-migration switch over.
6680#
6681# Related options:
6682#
6683# * live_migration_completion_timeout
6684# (integer value)
6685# Minimum value: 100
6686#live_migration_downtime = 500
6687
6688#
6689# Number of incremental steps to reach max downtime value.
6690#
6691# Will be rounded up to a minimum of 3 steps.
6692# (integer value)
6693# Minimum value: 3
6694#live_migration_downtime_steps = 10
6695
6696#
6697# Time to wait, in seconds, between each step increase of the
6698# migration
6699# downtime.
6700#
6701# Minimum delay is 3 seconds. Value is per GiB of guest RAM + disk to
6702# be
6703# transferred, with lower bound of a minimum of 2 GiB per device.
6704# (integer value)
6705# Minimum value: 3
6706#live_migration_downtime_delay = 75
6707
6708#
6709# Time to wait, in seconds, for migration to successfully complete
6710# transferring
6711# data before aborting the operation.
6712#
6713# Value is per GiB of guest RAM + disk to be transferred, with lower
6714# bound of
6715# a minimum of 2 GiB. Should usually be larger than downtime delay *
6716# downtime
6717# steps. Set to 0 to disable timeouts.
6718#
6719# Related options:
6720#
6721# * live_migration_downtime
6722# * live_migration_downtime_steps
6723# * live_migration_downtime_delay
6724# (integer value)
6725# Note: This option can be changed without restarting.
6726#live_migration_completion_timeout = 800
6727
6728# DEPRECATED:
6729# Time to wait, in seconds, for migration to make forward progress in
6730# transferring data before aborting the operation.
6731#
6732# Set to 0 to disable timeouts.
6733#
6734# This is deprecated, and now disabled by default because we have
6735# found serious
6736# bugs in this feature that caused false live-migration timeout
6737# failures. This
6738# feature will be removed or replaced in a future release.
6739# (integer value)
6740# Note: This option can be changed without restarting.
6741# This option is deprecated for removal.
6742# Its value may be silently ignored in the future.
6743# Reason: Serious bugs found in this feature.
6744#live_migration_progress_timeout = 0
6745
6746#
6747# This option allows nova to switch an on-going live migration to
6748# post-copy
6749# mode, i.e., switch the active VM to the one on the destination node
6750# before the
6751# migration is complete, therefore ensuring an upper bound on the
6752# memory that
6753# needs to be transferred. Post-copy requires libvirt>=1.3.3 and
6754# QEMU>=2.5.0.
6755#
6756# When permitted, post-copy mode will be automatically activated if a
6757# live-migration memory copy iteration does not make percentage
6758# increase of at
6759# least 10% over the last iteration.
6760#
6761# The live-migration force complete API also uses post-copy when
6762# permitted. If
6763# post-copy mode is not available, force complete falls back to
6764# pausing the VM
6765# to ensure the live-migration operation will complete.
6766#
6767# When using post-copy mode, if the source and destination hosts loose
6768# network
6769# connectivity, the VM being live-migrated will need to be rebooted.
6770# For more
6771# details, please see the Administration guide.
6772#
6773# Related options:
6774#
6775# * live_migration_permit_auto_converge
6776# (boolean value)
6777#live_migration_permit_post_copy = false
6778
6779#
6780# This option allows nova to start live migration with auto converge
6781# on.
6782#
6783# Auto converge throttles down CPU if a progress of on-going live
6784# migration
6785# is slow. Auto converge will only be used if this flag is set to True
6786# and
6787# post copy is not permitted or post copy is unavailable due to the
6788# version
6789# of libvirt and QEMU in use.
6790#
6791# Related options:
6792#
6793# * live_migration_permit_post_copy
6794# (boolean value)
6795#live_migration_permit_auto_converge = false
6796{%- if compute.libvirt.live_migration_permit_auto_converge is defined %}
6797live_migration_permit_auto_converge={{ compute.libvirt.live_migration_permit_auto_converge|lower }}
6798{%- endif %}
6799
6800#
6801# Determine the snapshot image format when sending to the image
6802# service.
6803#
6804# If set, this decides what format is used when sending the snapshot
6805# to the
6806# image service.
6807# If not set, defaults to same type as source image.
6808#
6809# Possible values:
6810#
6811# * ``raw``: RAW disk format
6812# * ``qcow2``: KVM default disk format
6813# * ``vmdk``: VMWare default disk format
6814# * ``vdi``: VirtualBox default disk format
6815# * If not set, defaults to same type as source image.
6816# (string value)
6817# Possible values:
6818# raw - <No description provided>
6819# qcow2 - <No description provided>
6820# vmdk - <No description provided>
6821# vdi - <No description provided>
6822#snapshot_image_format = <None>
6823
6824#
6825# Override the default disk prefix for the devices attached to an
6826# instance.
6827#
6828# If set, this is used to identify a free disk device name for a bus.
6829#
6830# Possible values:
6831#
6832# * Any prefix which will result in a valid disk device name like
6833# 'sda' or 'hda'
6834# for example. This is only necessary if the device names differ to
6835# the
6836# commonly known device name prefixes for a virtualization type such
6837# as: sd,
6838# xvd, uvd, vd.
6839#
6840# Related options:
6841#
6842# * ``virt_type``: Influences which device type is used, which
6843# determines
6844# the default disk prefix.
6845# (string value)
6846#disk_prefix = <None>
6847
6848# Number of seconds to wait for instance to shut down after soft
6849# reboot request is made. We fall back to hard reboot if instance does
6850# not shutdown within this window. (integer value)
6851#wait_soft_reboot_seconds = 120
6852
6853#
6854# Is used to set the CPU mode an instance should have.
6855#
6856# If virt_type="kvm|qemu", it will default to "host-model", otherwise
6857# it will
6858# default to "none".
6859#
6860# Possible values:
6861#
6862# * ``host-model``: Clones the host CPU feature flags
6863# * ``host-passthrough``: Use the host CPU model exactly
6864# * ``custom``: Use a named CPU model
6865# * ``none``: Don't set a specific CPU model. For instances with
6866# ``virt_type`` as KVM/QEMU, the default CPU model from QEMU will be
6867# used,
6868# which provides a basic set of CPU features that are compatible with
6869# most
6870# hosts.
6871#
6872# Related options:
6873#
6874# * ``cpu_model``: This should be set ONLY when ``cpu_mode`` is set to
6875# ``custom``. Otherwise, it would result in an error and the instance
6876# launch will fail.
6877#
6878# (string value)
6879# Possible values:
6880# host-model - <No description provided>
6881# host-passthrough - <No description provided>
6882# custom - <No description provided>
6883# none - <No description provided>
Vasyl Saienko4be5cca2018-05-25 16:15:49 +03006884#cpu_mode = <None>
6885cpu_mode = {{ compute.cpu_mode }}
Oleh Hryhorov5d0f13a2018-05-21 17:02:54 +00006886
6887#
6888# Set the name of the libvirt CPU model the instance should use.
6889#
6890# Possible values:
6891#
6892# * The named CPU models listed in ``/usr/share/libvirt/cpu_map.xml``
6893#
6894# Related options:
6895#
6896# * ``cpu_mode``: This should be set to ``custom`` ONLY when you want
6897# to
6898# configure (via ``cpu_model``) a specific named CPU model.
6899# Otherwise, it
6900# would result in an error and the instance launch will fail.
6901#
6902# * ``virt_type``: Only the virtualization types ``kvm`` and ``qemu``
6903# use this.
6904# (string value)
6905#cpu_model = <None>
Vasyl Saienko4be5cca2018-05-25 16:15:49 +03006906{%- if compute.get('libvirt', {}).cpu_model is defined and compute.cpu_mode == 'custom' %}
6907cpu_model = {{ compute.libvirt.cpu_model }}
6908{%- endif %}
Oleh Hryhorov5d0f13a2018-05-21 17:02:54 +00006909
6910#
6911# This allows specifying granular CPU feature flags when specifying
6912# CPU
6913# models. For example, to explicitly specify the ``pcid``
6914# (Process-Context ID, an Intel processor feature) flag to the
6915# "IvyBridge"
6916# virtual CPU model::
6917#
6918# [libvirt]
6919# cpu_mode = custom
6920# cpu_model = IvyBridge
6921# cpu_model_extra_flags = pcid
6922#
6923# Currently, the choice is restricted to only one option: ``pcid``
6924# (the
6925# option is case-insensitive, so ``PCID`` is also valid). This flag
6926# is
6927# now required to address the guest performance degradation as a
6928# result of
6929# applying the "Meltdown" CVE fixes on certain Intel CPU models.
6930#
6931# Note that when using this config attribute to set the 'PCID' CPU
6932# flag,
6933# not all virtual (i.e. libvirt / QEMU) CPU models need it:
6934#
6935# * The only virtual CPU models that include the 'PCID' capability are
6936# Intel "Haswell", "Broadwell", and "Skylake" variants.
6937#
6938# * The libvirt / QEMU CPU models "Nehalem", "Westmere",
6939# "SandyBridge",
6940# and "IvyBridge" will _not_ expose the 'PCID' capability by
6941# default,
6942# even if the host CPUs by the same name include it. I.e. 'PCID'
6943# needs
6944# to be explicitly specified when using the said virtual CPU models.
6945#
6946# For now, the ``cpu_model_extra_flags`` config attribute is valid
6947# only in
6948# combination with ``cpu_mode`` + ``cpu_model`` options.
6949#
6950# Besides ``custom``, the libvirt driver has two other CPU modes: The
6951# default, ``host-model``, tells it to do the right thing with respect
6952# to
6953# handling 'PCID' CPU flag for the guest -- *assuming* you are running
6954# updated processor microcode, host and guest kernel, libvirt, and
6955# QEMU.
6956# The other mode, ``host-passthrough``, checks if 'PCID' is available
6957# in
6958# the hardware, and if so directly passes it through to the Nova
6959# guests.
6960# Thus, in context of 'PCID', with either of these CPU modes
6961# (``host-model`` or ``host-passthrough``), there is no need to use
6962# the
6963# ``cpu_model_extra_flags``.
6964#
6965# Related options:
6966#
6967# * cpu_mode
6968# * cpu_model
6969# (list value)
6970#cpu_model_extra_flags =
6971
6972# Location where libvirt driver will store snapshots before uploading
6973# them to image service (string value)
6974#snapshots_directory = $instances_path/snapshots
6975
6976# Location where the Xen hvmloader is kept (string value)
6977#xen_hvmloader_path = /usr/lib/xen/boot/hvmloader
6978
6979#
6980# Specific cache modes to use for different disk types.
6981#
6982# For example: file=directsync,block=none,network=writeback
6983#
6984# For local or direct-attached storage, it is recommended that you use
6985# writethrough (default) mode, as it ensures data integrity and has
6986# acceptable
6987# I/O performance for applications running in the guest, especially
6988# for read
6989# operations. However, caching mode none is recommended for remote NFS
6990# storage,
6991# because direct I/O operations (O_DIRECT) perform better than
6992# synchronous I/O
6993# operations (with O_SYNC). Caching mode none effectively turns all
6994# guest I/O
6995# operations into direct I/O operations on the host, which is the NFS
6996# client in
6997# this environment.
6998#
6999# Possible cache modes:
7000#
7001# * default: Same as writethrough.
7002# * none: With caching mode set to none, the host page cache is
7003# disabled, but
7004# the disk write cache is enabled for the guest. In this mode, the
7005# write
7006# performance in the guest is optimal because write operations
7007# bypass the host
7008# page cache and go directly to the disk write cache. If the disk
7009# write cache
7010# is battery-backed, or if the applications or storage stack in the
7011# guest
7012# transfer data properly (either through fsync operations or file
7013# system
7014# barriers), then data integrity can be ensured. However, because
7015# the host
7016# page cache is disabled, the read performance in the guest would
7017# not be as
7018# good as in the modes where the host page cache is enabled, such as
7019# writethrough mode. Shareable disk devices, like for a multi-
7020# attachable block
7021# storage volume, will have their cache mode set to 'none'
7022# regardless of
7023# configuration.
7024# * writethrough: writethrough mode is the default caching mode. With
7025# caching set to writethrough mode, the host page cache is enabled,
7026# but the
7027# disk write cache is disabled for the guest. Consequently, this
7028# caching mode
7029# ensures data integrity even if the applications and storage stack
7030# in the
7031# guest do not transfer data to permanent storage properly (either
7032# through
7033# fsync operations or file system barriers). Because the host page
7034# cache is
7035# enabled in this mode, the read performance for applications
7036# running in the
7037# guest is generally better. However, the write performance might be
7038# reduced
7039# because the disk write cache is disabled.
7040# * writeback: With caching set to writeback mode, both the host page
7041# cache
7042# and the disk write cache are enabled for the guest. Because of
7043# this, the
7044# I/O performance for applications running in the guest is good, but
7045# the data
7046# is not protected in a power failure. As a result, this caching
7047# mode is
7048# recommended only for temporary data where potential data loss is
7049# not a
7050# concern.
7051# * directsync: Like "writethrough", but it bypasses the host page
7052# cache.
7053# * unsafe: Caching mode of unsafe ignores cache transfer operations
7054# completely. As its name implies, this caching mode should be used
7055# only for
7056# temporary data where data loss is not a concern. This mode can be
7057# useful for
7058# speeding up guest installations, but you should switch to another
7059# caching
7060# mode in production environments.
7061# (list value)
7062#disk_cachemodes =
7063
7064# A path to a device that will be used as source of entropy on the
7065# host. Permitted options are: /dev/random or /dev/hwrng (string
7066# value)
7067#rng_dev_path = <None>
7068
7069# For qemu or KVM guests, set this option to specify a default machine
7070# type per host architecture. You can find a list of supported machine
7071# types in your environment by checking the output of the "virsh
7072# capabilities"command. The format of the value for this config option
7073# is host-arch=machine-type. For example:
7074# x86_64=machinetype1,armv7l=machinetype2 (list value)
7075#hw_machine_type = <None>
7076
7077# The data source used to the populate the host "serial" UUID exposed
7078# to guest in the virtual BIOS. (string value)
7079# Possible values:
7080# none - <No description provided>
7081# os - <No description provided>
7082# hardware - <No description provided>
7083# auto - <No description provided>
7084#sysinfo_serial = auto
7085
7086# A number of seconds to memory usage statistics period. Zero or
7087# negative value mean to disable memory usage statistics. (integer
7088# value)
7089#mem_stats_period_seconds = 10
7090
7091# List of uid targets and ranges.Syntax is guest-uid:host-
7092# uid:countMaximum of 5 allowed. (list value)
7093#uid_maps =
7094
7095# List of guid targets and ranges.Syntax is guest-gid:host-
7096# gid:countMaximum of 5 allowed. (list value)
7097#gid_maps =
7098
7099# In a realtime host context vCPUs for guest will run in that
7100# scheduling priority. Priority depends on the host kernel (usually
7101# 1-99) (integer value)
7102#realtime_scheduler_priority = 1
7103
7104#
7105# This is a performance event list which could be used as monitor.
7106# These events
7107# will be passed to libvirt domain xml while creating a new instances.
7108# Then event statistics data can be collected from libvirt. The
7109# minimum
7110# libvirt version is 2.0.0. For more information about `Performance
7111# monitoring
7112# events`, refer https://libvirt.org/formatdomain.html#elementsPerf .
7113#
7114# Possible values:
7115# * A string list. For example: ``enabled_perf_events = cmt, mbml,
7116# mbmt``
7117# The supported events list can be found in
7118# https://libvirt.org/html/libvirt-libvirt-domain.html ,
7119# which you may need to search key words ``VIR_PERF_PARAM_*``
7120# (list value)
7121#enabled_perf_events =
7122
7123#
7124# VM Images format.
7125#
7126# If default is specified, then use_cow_images flag is used instead of
7127# this
7128# one.
7129#
7130# Related options:
7131#
7132# * virt.use_cow_images
7133# * images_volume_group
7134# (string value)
7135# Possible values:
7136# raw - <No description provided>
7137# flat - <No description provided>
7138# qcow2 - <No description provided>
7139# lvm - <No description provided>
7140# rbd - <No description provided>
7141# ploop - <No description provided>
7142# default - <No description provided>
7143#images_type = default
7144
7145#
7146# LVM Volume Group that is used for VM images, when you specify
7147# images_type=lvm
7148#
7149# Related options:
7150#
7151# * images_type
7152# (string value)
7153#images_volume_group = <None>
7154
7155#
7156# Create sparse logical volumes (with virtualsize) if this flag is set
7157# to True.
7158# (boolean value)
7159#sparse_logical_volumes = false
7160
7161# The RADOS pool in which rbd volumes are stored (string value)
7162#images_rbd_pool = rbd
7163
7164# Path to the ceph configuration file to use (string value)
7165#images_rbd_ceph_conf =
7166
7167#
7168# Discard option for nova managed disks.
7169#
7170# Requires:
7171#
7172# * Libvirt >= 1.0.6
7173# * Qemu >= 1.5 (raw format)
7174# * Qemu >= 1.6 (qcow2 format)
7175# (string value)
7176# Possible values:
7177# ignore - <No description provided>
7178# unmap - <No description provided>
7179#hw_disk_discard = <None>
7180{%- if compute.libvirt.hw_disk_discard is defined %}
7181hw_disk_discard={{ compute.libvirt.hw_disk_discard }}
7182{%- endif %}
7183
7184# DEPRECATED: Allows image information files to be stored in non-
7185# standard locations (string value)
7186# This option is deprecated for removal since 14.0.0.
7187# Its value may be silently ignored in the future.
7188# Reason: Image info files are no longer used by the image cache
7189#image_info_filename_pattern = $instances_path/$image_cache_subdirectory_name/%(image)s.info
7190
7191# Unused resized base images younger than this will not be removed
7192# (integer value)
7193#remove_unused_resized_minimum_age_seconds = 3600
7194
7195# DEPRECATED: Write a checksum for files in _base to disk (boolean
7196# value)
7197# This option is deprecated for removal since 14.0.0.
7198# Its value may be silently ignored in the future.
7199# Reason: The image cache no longer periodically calculates checksums
7200# of stored images. Data integrity can be checked at the block or
7201# filesystem level.
7202#checksum_base_images = false
7203
7204# DEPRECATED: How frequently to checksum base images (integer value)
7205# This option is deprecated for removal since 14.0.0.
7206# Its value may be silently ignored in the future.
7207# Reason: The image cache no longer periodically calculates checksums
7208# of stored images. Data integrity can be checked at the block or
7209# filesystem level.
7210#checksum_interval_seconds = 3600
7211
7212#
7213# Method used to wipe ephemeral disks when they are deleted. Only
7214# takes effect
7215# if LVM is set as backing storage.
7216#
7217# Possible values:
7218#
7219# * none - do not wipe deleted volumes
7220# * zero - overwrite volumes with zeroes
7221# * shred - overwrite volume repeatedly
7222#
7223# Related options:
7224#
7225# * images_type - must be set to ``lvm``
7226# * volume_clear_size
7227# (string value)
7228# Possible values:
7229# none - <No description provided>
7230# zero - <No description provided>
7231# shred - <No description provided>
7232#volume_clear = zero
7233
7234#
7235# Size of area in MiB, counting from the beginning of the allocated
7236# volume,
7237# that will be cleared using method set in ``volume_clear`` option.
7238#
7239# Possible values:
7240#
7241# * 0 - clear whole volume
7242# * >0 - clear specified amount of MiB
7243#
7244# Related options:
7245#
7246# * images_type - must be set to ``lvm``
7247# * volume_clear - must be set and the value must be different than
7248# ``none``
7249# for this option to have any impact
7250# (integer value)
7251# Minimum value: 0
7252#volume_clear_size = 0
7253
7254#
7255# Enable snapshot compression for ``qcow2`` images.
7256#
7257# Note: you can set ``snapshot_image_format`` to ``qcow2`` to force
7258# all
7259# snapshots to be in ``qcow2`` format, independently from their
7260# original image
7261# type.
7262#
7263# Related options:
7264#
7265# * snapshot_image_format
7266# (boolean value)
7267#snapshot_compression = false
7268
7269# Use virtio for bridge interfaces with KVM/QEMU (boolean value)
7270#use_virtio_for_bridges = true
7271
7272#
7273# Use multipath connection of the iSCSI or FC volume
7274#
7275# Volumes can be connected in the LibVirt as multipath devices. This
7276# will
7277# provide high availability and fault tolerance.
7278# (boolean value)
7279# Deprecated group/name - [libvirt]/iscsi_use_multipath
7280#volume_use_multipath = false
Oleh Hryhorov08482aa2018-11-19 14:07:47 +02007281{%- if compute.libvirt.volume_use_multipath is defined %}
7282volume_use_multipath={{ compute.libvirt.volume_use_multipath }}
7283{%- endif %}
Oleh Hryhorov5d0f13a2018-05-21 17:02:54 +00007284
7285#
7286# Number of times to scan given storage protocol to find volume.
7287# (integer value)
7288# Deprecated group/name - [libvirt]/num_iscsi_scan_tries
7289#num_volume_scan_tries = 5
7290
7291#
7292# Number of times to rediscover AoE target to find volume.
7293#
7294# Nova provides support for block storage attaching to hosts via AOE
7295# (ATA over
7296# Ethernet). This option allows the user to specify the maximum number
7297# of retry
7298# attempts that can be made to discover the AoE device.
7299# (integer value)
7300#num_aoe_discover_tries = 3
7301
7302#
7303# The iSCSI transport iface to use to connect to target in case
7304# offload support
7305# is desired.
7306#
7307# Default format is of the form <transport_name>.<hwaddress> where
7308# <transport_name> is one of (be2iscsi, bnx2i, cxgb3i, cxgb4i,
7309# qla4xxx, ocs) and
7310# <hwaddress> is the MAC address of the interface and can be generated
7311# via the
7312# iscsiadm -m iface command. Do not confuse the iscsi_iface parameter
7313# to be
7314# provided here with the actual transport name.
7315# (string value)
7316# Deprecated group/name - [libvirt]/iscsi_transport
7317#iscsi_iface = <None>
7318
7319#
7320# Number of times to scan iSER target to find volume.
7321#
7322# iSER is a server network protocol that extends iSCSI protocol to use
7323# Remote
7324# Direct Memory Access (RDMA). This option allows the user to specify
7325# the maximum
7326# number of scan attempts that can be made to find iSER volume.
7327# (integer value)
7328#num_iser_scan_tries = 5
7329
7330#
7331# Use multipath connection of the iSER volume.
7332#
7333# iSER volumes can be connected as multipath devices. This will
7334# provide high
7335# availability and fault tolerance.
7336# (boolean value)
7337#iser_use_multipath = false
7338
7339#
7340# The RADOS client name for accessing rbd(RADOS Block Devices)
7341# volumes.
7342#
7343# Libvirt will refer to this user when connecting and authenticating
7344# with
7345# the Ceph RBD server.
7346# (string value)
7347#rbd_user = <None>
7348
7349#
7350# The libvirt UUID of the secret for the rbd_user volumes.
7351# (string value)
7352#rbd_secret_uuid = <None>
7353
7354#
7355# Directory where the NFS volume is mounted on the compute node.
7356# The default is 'mnt' directory of the location where nova's Python
7357# module
7358# is installed.
7359#
7360# NFS provides shared storage for the OpenStack Block Storage service.
7361#
7362# Possible values:
7363#
7364# * A string representing absolute path of mount point.
7365# (string value)
7366#nfs_mount_point_base = $state_path/mnt
7367
7368#
7369# Mount options passed to the NFS client. See section of the nfs man
7370# page
7371# for details.
7372#
7373# Mount options controls the way the filesystem is mounted and how the
7374# NFS client behaves when accessing files on this mount point.
7375#
7376# Possible values:
7377#
7378# * Any string representing mount options separated by commas.
7379# * Example string: vers=3,lookupcache=pos
7380# (string value)
Martin Polreichb8f389f2018-08-29 10:48:45 +02007381{%- if compute.nfs_mount_options is defined %}
7382nfs_mount_options="{{ compute.nfs_mount_options }}"
7383{%- endif %}
Oleh Hryhorov5d0f13a2018-05-21 17:02:54 +00007384
7385#
7386# Directory where the Quobyte volume is mounted on the compute node.
7387#
7388# Nova supports Quobyte volume driver that enables storing Block
7389# Storage
7390# service volumes on a Quobyte storage back end. This Option specifies
7391# the
7392# path of the directory where Quobyte volume is mounted.
7393#
7394# Possible values:
7395#
7396# * A string representing absolute path of mount point.
7397# (string value)
7398#quobyte_mount_point_base = $state_path/mnt
7399
7400# Path to a Quobyte Client configuration file. (string value)
7401#quobyte_client_cfg = <None>
7402
7403#
7404# Directory where the SMBFS shares are mounted on the compute node.
7405# (string value)
7406#smbfs_mount_point_base = $state_path/mnt
7407
7408#
7409# Mount options passed to the SMBFS client.
7410#
7411# Provide SMBFS options as a single string containing all parameters.
7412# See mount.cifs man page for details. Note that the libvirt-qemu
7413# ``uid``
7414# and ``gid`` must be specified.
7415# (string value)
7416#smbfs_mount_options =
7417
7418#
7419# libvirt's transport method for remote file operations.
7420#
7421# Because libvirt cannot use RPC to copy files over network to/from
7422# other
7423# compute nodes, other method must be used for:
7424#
7425# * creating directory on remote host
7426# * creating file on remote host
7427# * removing file from remote host
7428# * copying file to remote host
7429# (string value)
7430# Possible values:
7431# ssh - <No description provided>
7432# rsync - <No description provided>
7433#remote_filesystem_transport = ssh
7434
7435#
7436# Directory where the Virtuozzo Storage clusters are mounted on the
7437# compute
7438# node.
7439#
7440# This option defines non-standard mountpoint for Vzstorage cluster.
7441#
7442# Related options:
7443#
7444# * vzstorage_mount_* group of parameters
7445# (string value)
7446#vzstorage_mount_point_base = $state_path/mnt
7447
7448#
7449# Mount owner user name.
7450#
7451# This option defines the owner user of Vzstorage cluster mountpoint.
7452#
7453# Related options:
7454#
7455# * vzstorage_mount_* group of parameters
7456# (string value)
7457#vzstorage_mount_user = stack
7458
7459#
7460# Mount owner group name.
7461#
7462# This option defines the owner group of Vzstorage cluster mountpoint.
7463#
7464# Related options:
7465#
7466# * vzstorage_mount_* group of parameters
7467# (string value)
7468#vzstorage_mount_group = qemu
7469
7470#
7471# Mount access mode.
7472#
7473# This option defines the access bits of Vzstorage cluster mountpoint,
7474# in the format similar to one of chmod(1) utility, like this: 0770.
7475# It consists of one to four digits ranging from 0 to 7, with missing
7476# lead digits assumed to be 0's.
7477#
7478# Related options:
7479#
7480# * vzstorage_mount_* group of parameters
7481# (string value)
7482#vzstorage_mount_perms = 0770
7483
7484#
7485# Path to vzstorage client log.
7486#
7487# This option defines the log of cluster operations,
7488# it should include "%(cluster_name)s" template to separate
7489# logs from multiple shares.
7490#
7491# Related options:
7492#
7493# * vzstorage_mount_opts may include more detailed logging options.
7494# (string value)
7495#vzstorage_log_path = /var/log/vstorage/%(cluster_name)s/nova.log.gz
7496
7497#
7498# Path to the SSD cache file.
7499#
7500# You can attach an SSD drive to a client and configure the drive to
7501# store
7502# a local cache of frequently accessed data. By having a local cache
7503# on a
7504# client's SSD drive, you can increase the overall cluster performance
7505# by
7506# up to 10 and more times.
7507# WARNING! There is a lot of SSD models which are not server grade and
7508# may loose arbitrary set of data changes on power loss.
7509# Such SSDs should not be used in Vstorage and are dangerous as may
7510# lead
7511# to data corruptions and inconsistencies. Please consult with the
7512# manual
7513# on which SSD models are known to be safe or verify it using
7514# vstorage-hwflush-check(1) utility.
7515#
7516# This option defines the path which should include "%(cluster_name)s"
7517# template to separate caches from multiple shares.
7518#
7519# Related options:
7520#
7521# * vzstorage_mount_opts may include more detailed cache options.
7522# (string value)
7523#vzstorage_cache_path = <None>
7524
7525#
7526# Extra mount options for pstorage-mount
7527#
7528# For full description of them, see
7529# https://static.openvz.org/vz-man/man1/pstorage-mount.1.gz.html
7530# Format is a python string representation of arguments list, like:
7531# "['-v', '-R', '500']"
7532# Shouldn't include -c, -l, -C, -u, -g and -m as those have
7533# explicit vzstorage_* options.
7534#
7535# Related options:
7536#
7537# * All other vzstorage_* options
7538# (list value)
7539#vzstorage_mount_opts =
7540
7541
7542[metrics]
7543#
7544# Configuration options for metrics
7545#
7546# Options under this group allow to adjust how values assigned to
7547# metrics are
7548# calculated.
7549
7550#
7551# From nova.conf
7552#
7553
7554#
7555# When using metrics to weight the suitability of a host, you can use
7556# this option
7557# to change how the calculated weight influences the weight assigned
7558# to a host as
7559# follows:
7560#
7561# * >1.0: increases the effect of the metric on overall weight
7562# * 1.0: no change to the calculated weight
7563# * >0.0,<1.0: reduces the effect of the metric on overall weight
7564# * 0.0: the metric value is ignored, and the value of the
7565# 'weight_of_unavailable' option is returned instead
7566# * >-1.0,<0.0: the effect is reduced and reversed
7567# * -1.0: the effect is reversed
7568# * <-1.0: the effect is increased proportionally and reversed
7569#
7570# This option is only used by the FilterScheduler and its subclasses;
7571# if you use
7572# a different scheduler, this option has no effect.
7573#
7574# Possible values:
7575#
7576# * An integer or float value, where the value corresponds to the
7577# multipler
7578# ratio for this weigher.
7579#
7580# Related options:
7581#
7582# * weight_of_unavailable
7583# (floating point value)
7584#weight_multiplier = 1.0
7585
7586#
7587# This setting specifies the metrics to be weighed and the relative
7588# ratios for
7589# each metric. This should be a single string value, consisting of a
7590# series of
7591# one or more 'name=ratio' pairs, separated by commas, where 'name' is
7592# the name
7593# of the metric to be weighed, and 'ratio' is the relative weight for
7594# that
7595# metric.
7596#
7597# Note that if the ratio is set to 0, the metric value is ignored, and
7598# instead
7599# the weight will be set to the value of the 'weight_of_unavailable'
7600# option.
7601#
7602# As an example, let's consider the case where this option is set to:
7603#
7604# ``name1=1.0, name2=-1.3``
7605#
7606# The final weight will be:
7607#
7608# ``(name1.value * 1.0) + (name2.value * -1.3)``
7609#
7610# This option is only used by the FilterScheduler and its subclasses;
7611# if you use
7612# a different scheduler, this option has no effect.
7613#
7614# Possible values:
7615#
7616# * A list of zero or more key/value pairs separated by commas, where
7617# the key is
7618# a string representing the name of a metric and the value is a
7619# numeric weight
7620# for that metric. If any value is set to 0, the value is ignored
7621# and the
7622# weight will be set to the value of the 'weight_of_unavailable'
7623# option.
7624#
7625# Related options:
7626#
7627# * weight_of_unavailable
7628# (list value)
7629#weight_setting =
7630
7631#
7632# This setting determines how any unavailable metrics are treated. If
7633# this option
7634# is set to True, any hosts for which a metric is unavailable will
7635# raise an
7636# exception, so it is recommended to also use the MetricFilter to
7637# filter out
7638# those hosts before weighing.
7639#
7640# This option is only used by the FilterScheduler and its subclasses;
7641# if you use
7642# a different scheduler, this option has no effect.
7643#
7644# Possible values:
7645#
7646# * True or False, where False ensures any metric being unavailable
7647# for a host
7648# will set the host weight to 'weight_of_unavailable'.
7649#
7650# Related options:
7651#
7652# * weight_of_unavailable
7653# (boolean value)
7654#required = true
7655
7656#
7657# When any of the following conditions are met, this value will be
7658# used in place
7659# of any actual metric value:
7660#
7661# * One of the metrics named in 'weight_setting' is not available for
7662# a host,
7663# and the value of 'required' is False
7664# * The ratio specified for a metric in 'weight_setting' is 0
7665# * The 'weight_multiplier' option is set to 0
7666#
7667# This option is only used by the FilterScheduler and its subclasses;
7668# if you use
7669# a different scheduler, this option has no effect.
7670#
7671# Possible values:
7672#
7673# * An integer or float value, where the value corresponds to the
7674# multipler
7675# ratio for this weigher.
7676#
7677# Related options:
7678#
7679# * weight_setting
7680# * required
7681# * weight_multiplier
7682# (floating point value)
7683#weight_of_unavailable = -10000.0
7684
7685
7686[mks]
7687#
7688# Nova compute node uses WebMKS, a desktop sharing protocol to provide
7689# instance console access to VM's created by VMware hypervisors.
7690#
7691# Related options:
7692# Following options must be set to provide console access.
7693# * mksproxy_base_url
7694# * enabled
7695
7696#
7697# From nova.conf
7698#
7699
7700#
7701# Location of MKS web console proxy
7702#
7703# The URL in the response points to a WebMKS proxy which
7704# starts proxying between client and corresponding vCenter
7705# server where instance runs. In order to use the web based
7706# console access, WebMKS proxy should be installed and configured
7707#
7708# Possible values:
7709#
7710# * Must be a valid URL of the form:``http://host:port/`` or
7711# ``https://host:port/``
7712# (uri value)
7713#mksproxy_base_url = http://127.0.0.1:6090/
7714
7715#
7716# Enables graphical console access for virtual machines.
7717# (boolean value)
7718#enabled = false
7719
7720
7721[neutron]
7722#
7723# Configuration options for neutron (network connectivity as a
7724# service).
7725
7726#
7727# From nova.conf
7728#
7729
7730# DEPRECATED:
7731# This option specifies the URL for connecting to Neutron.
7732#
7733# Possible values:
7734#
7735# * Any valid URL that points to the Neutron API service is
7736# appropriate here.
7737# This typically matches the URL returned for the 'network' service
7738# type
7739# from the Keystone service catalog.
7740# (uri value)
7741# This option is deprecated for removal since 17.0.0.
7742# Its value may be silently ignored in the future.
7743# Reason: Endpoint lookup uses the service catalog via common
7744# keystoneauth1 Adapter configuration options. In the current release,
7745# "url" will override this behavior, but will be ignored and/or
7746# removed in a future release. To achieve the same result, use the
7747# endpoint_override option instead.
7748#url = http://127.0.0.1:9696
7749
7750#
7751# Default name for the Open vSwitch integration bridge.
7752#
7753# Specifies the name of an integration bridge interface used by
7754# OpenvSwitch.
7755# This option is only used if Neutron does not specify the OVS bridge
7756# name in
7757# port binding responses.
7758# (string value)
7759#ovs_bridge = br-int
7760
7761#
7762# Default name for the floating IP pool.
7763#
7764# Specifies the name of floating IP pool used for allocating floating
7765# IPs. This
7766# option is only used if Neutron does not specify the floating IP pool
7767# name in
7768# port binding reponses.
7769# (string value)
7770#default_floating_pool = nova
7771
7772#
7773# Integer value representing the number of seconds to wait before
7774# querying
7775# Neutron for extensions. After this number of seconds the next time
7776# Nova
7777# needs to create a resource in Neutron it will requery Neutron for
7778# the
7779# extensions that it has loaded. Setting value to 0 will refresh the
7780# extensions with no wait.
7781# (integer value)
7782# Minimum value: 0
7783#extension_sync_interval = 600
7784extension_sync_interval={{ compute.network.get('extension_sync_interval', '600') }}
7785
7786#
7787# When set to True, this option indicates that Neutron will be used to
7788# proxy
7789# metadata requests and resolve instance ids. Otherwise, the instance
7790# ID must be
7791# passed to the metadata request in the 'X-Instance-ID' header.
7792#
7793# Related options:
7794#
7795# * metadata_proxy_shared_secret
7796# (boolean value)
7797#service_metadata_proxy = false
7798
7799#
7800# This option holds the shared secret string used to validate proxy
7801# requests to
7802# Neutron metadata requests. In order to be used, the
7803# 'X-Metadata-Provider-Signature' header must be supplied in the
7804# request.
7805#
7806# Related options:
7807#
7808# * service_metadata_proxy
7809# (string value)
7810#metadata_proxy_shared_secret =
7811
7812# PEM encoded Certificate Authority to use when verifying HTTPs
7813# connections. (string value)
7814#cafile = <None>
7815{%- if compute.network.get('protocol', 'http') == 'https' %}
7816cafile={{ compute.network.get('cacert_file', compute.cacert_file) }}
7817{%- endif %}
7818
7819# PEM encoded client certificate cert file (string value)
7820#certfile = <None>
7821
7822# PEM encoded client certificate key file (string value)
7823#keyfile = <None>
7824
7825# Verify HTTPS connections. (boolean value)
7826#insecure = false
7827
7828# Timeout value for http requests (integer value)
7829#timeout = <None>
7830timeout=300
7831
7832# Authentication type to load (string value)
7833# Deprecated group/name - [neutron]/auth_plugin
7834#auth_type = <None>
7835auth_type = v3password
7836
7837# Config Section from which to load plugin specific options (string
7838# value)
7839#auth_section = <None>
7840
7841# Authentication URL (string value)
7842#auth_url = <None>
7843auth_url = {{ compute.identity.get('protocol', 'http') }}://{{ compute.identity.host }}:{{ compute.identity.port }}/v3
7844
7845# Scope for system operations (string value)
7846#system_scope = <None>
7847
7848# Domain ID to scope to (string value)
7849#domain_id = <None>
7850
7851# Domain name to scope to (string value)
7852#domain_name = <None>
7853
7854# Project ID to scope to (string value)
7855#project_id = <None>
7856
7857# Project name to scope to (string value)
7858#project_name = <None>
7859project_name={{ compute.identity.tenant }}
7860
7861# Domain ID containing project (string value)
7862#project_domain_id = <None>
7863
7864# Domain name containing project (string value)
7865#project_domain_name = <None>
7866project_domain_name = {{ compute.get('project_domain_name', 'Default') }}
7867
7868# Trust ID (string value)
7869#trust_id = <None>
7870
7871# Optional domain ID to use with v3 and v2 parameters. It will be used
7872# for both the user and project domain in v3 and ignored in v2
7873# authentication. (string value)
7874#default_domain_id = <None>
7875
7876# Optional domain name to use with v3 API and v2 parameters. It will
7877# be used for both the user and project domain in v3 and ignored in v2
7878# authentication. (string value)
7879#default_domain_name = <None>
7880
7881# User ID (string value)
7882#user_id = <None>
7883
7884# Username (string value)
7885# Deprecated group/name - [neutron]/user_name
7886#username = <None>
7887username={{ compute.network.user }}
7888
7889# User's domain id (string value)
7890#user_domain_id = <None>
7891
7892# User's domain name (string value)
7893#user_domain_name = <None>
7894user_domain_name = {{ compute.get('user_domain_name', 'Default') }}
7895
7896# User's password (string value)
7897#password = <None>
7898password={{ compute.network.password }}
7899
7900# Tenant ID (string value)
7901#tenant_id = <None>
7902
7903# Tenant Name (string value)
7904#tenant_name = <None>
7905
7906# The default service_type for endpoint URL discovery. (string value)
7907#service_type = network
7908
7909# The default service_name for endpoint URL discovery. (string value)
7910#service_name = <None>
7911
7912# List of interfaces, in order of preference, for endpoint URL. (list
7913# value)
7914#valid_interfaces = internal,public
7915
7916# The default region_name for endpoint URL discovery. (string value)
7917#region_name = <None>
7918region_name= {{ compute.network.region }}
7919
7920# Always use this endpoint URL for requests for this client. NOTE: The
7921# unversioned endpoint should be specified here; to request a
7922# particular API version, use the `version`, `min-version`, and/or
7923# `max-version` options. (string value)
7924#endpoint_override = <None>
7925
7926
7927[notifications]
7928#
7929# Most of the actions in Nova which manipulate the system state
7930# generate
7931# notifications which are posted to the messaging component (e.g.
7932# RabbitMQ) and
7933# can be consumed by any service outside the OpenStack. More technical
7934# details
7935# at
7936# https://docs.openstack.org/nova/latest/reference/notifications.html
7937
7938#
7939# From nova.conf
7940#
7941
7942#
7943# If set, send compute.instance.update notifications on
7944# instance state changes.
7945#
7946# Please refer to
7947# https://docs.openstack.org/nova/latest/reference/notifications.html
7948# for
7949# additional information on notifications.
7950#
7951# Possible values:
7952#
7953# * None - no notifications
7954# * "vm_state" - notifications are sent with VM state transition
7955# information in
7956# the ``old_state`` and ``state`` fields. The ``old_task_state`` and
7957# ``new_task_state`` fields will be set to the current task_state of
7958# the
7959# instance.
7960# * "vm_and_task_state" - notifications are sent with VM and task
7961# state
7962# transition information.
7963# (string value)
7964# Possible values:
7965# <None> - <No description provided>
7966# vm_state - <No description provided>
7967# vm_and_task_state - <No description provided>
7968#notify_on_state_change = <None>
7969{%- if compute.get('notification', {}).notify_on is defined %}
7970{%- for key, value in compute.notification.notify_on.iteritems() %}
7971notify_on_{{ key }} = {{ value }}
7972{%- endfor %}
7973{%- elif pillar.ceilometer is defined %}
7974notify_on_state_change = vm_and_task_state
7975{%- endif %}
7976
7977# Default notification level for outgoing notifications. (string
7978# value)
7979# Possible values:
7980# DEBUG - <No description provided>
7981# INFO - <No description provided>
7982# WARN - <No description provided>
7983# ERROR - <No description provided>
7984# CRITICAL - <No description provided>
7985# Deprecated group/name - [DEFAULT]/default_notification_level
7986#default_level = INFO
7987
7988# DEPRECATED:
7989# Default publisher_id for outgoing notifications. If you consider
7990# routing
7991# notifications using different publisher, change this value
7992# accordingly.
7993#
7994# Possible values:
7995#
7996# * Defaults to the current hostname of this host, but it can be any
7997# valid
7998# oslo.messaging publisher_id
7999#
8000# Related options:
8001#
8002# * host - Hostname, FQDN or IP address of this host.
8003# (string value)
8004# This option is deprecated for removal since 17.0.0.
8005# Its value may be silently ignored in the future.
8006# Reason:
8007# This option is only used when ``monkey_patch=True`` and
8008# ``monkey_patch_modules`` is configured to specify the legacy
8009# notify_decorator.
8010# Since the monkey_patch and monkey_patch_modules options are
8011# deprecated, this
8012# option is also deprecated.
8013#default_publisher_id = $host
8014
8015#
8016# Specifies which notification format shall be used by nova.
8017#
8018# The default value is fine for most deployments and rarely needs to
8019# be changed.
8020# This value can be set to 'versioned' once the infrastructure moves
8021# closer to
8022# consuming the newer format of notifications. After this occurs, this
8023# option
8024# will be removed.
8025#
8026# Note that notifications can be completely disabled by setting
8027# ``driver=noop``
8028# in the ``[oslo_messaging_notifications]`` group.
8029#
8030# Possible values:
8031# * unversioned: Only the legacy unversioned notifications are
8032# emitted.
8033# * versioned: Only the new versioned notifications are emitted.
8034# * both: Both the legacy unversioned and the new versioned
8035# notifications are
8036# emitted. (Default)
8037#
8038# The list of versioned notifications is visible in
8039# https://docs.openstack.org/nova/latest/reference/notifications.html
8040# (string value)
8041# Possible values:
8042# unversioned - <No description provided>
8043# versioned - <No description provided>
8044# both - <No description provided>
8045#notification_format = both
8046
8047#
8048# Specifies the topics for the versioned notifications issued by nova.
8049#
8050# The default value is fine for most deployments and rarely needs to
8051# be changed.
8052# However, if you have a third-party service that consumes versioned
8053# notifications, it might be worth getting a topic for that service.
8054# Nova will send a message containing a versioned notification payload
8055# to each
8056# topic queue in this list.
8057#
8058# The list of versioned notifications is visible in
8059# https://docs.openstack.org/nova/latest/reference/notifications.html
8060# (list value)
8061#versioned_notifications_topics = versioned_notifications
8062
8063#
8064# If enabled, include block device information in the versioned
8065# notification
8066# payload. Sending block device information is disabled by default as
8067# providing
8068# that information can incur some overhead on the system since the
8069# information
8070# may need to be loaded from the database.
8071# (boolean value)
8072#bdms_in_notifications = false
8073
8074
8075[osapi_v21]
8076
8077#
8078# From nova.conf
8079#
8080
8081# DEPRECATED:
8082# This option is a string representing a regular expression (regex)
8083# that matches
8084# the project_id as contained in URLs. If not set, it will match
8085# normal UUIDs
8086# created by keystone.
8087#
8088# Possible values:
8089#
8090# * A string representing any legal regular expression
8091# (string value)
8092# This option is deprecated for removal since 13.0.0.
8093# Its value may be silently ignored in the future.
8094# Reason:
8095# Recent versions of nova constrain project IDs to hexadecimal
8096# characters and
8097# dashes. If your installation uses IDs outside of this range, you
8098# should use
8099# this option to provide your own regex and give you time to migrate
8100# offending
8101# projects to valid IDs before the next release.
8102#project_id_regex = <None>
8103
8104
8105[pci]
8106
8107#
8108# From nova.conf
8109#
8110
8111#
8112# An alias for a PCI passthrough device requirement.
8113#
8114# This allows users to specify the alias in the extra specs for a
8115# flavor, without
8116# needing to repeat all the PCI property requirements.
8117#
8118# Possible Values:
8119#
8120# * A list of JSON values which describe the aliases. For example::
8121#
8122# alias = {
8123# "name": "QuickAssist",
8124# "product_id": "0443",
8125# "vendor_id": "8086",
8126# "device_type": "type-PCI",
8127# "numa_policy": "required"
8128# }
8129#
8130# This defines an alias for the Intel QuickAssist card. (multi
8131# valued). Valid
8132# key values are :
8133#
8134# ``name``
8135# Name of the PCI alias.
8136#
8137# ``product_id``
8138# Product ID of the device in hexadecimal.
8139#
8140# ``vendor_id``
8141# Vendor ID of the device in hexadecimal.
8142#
8143# ``device_type``
8144# Type of PCI device. Valid values are: ``type-PCI``, ``type-PF``
8145# and
8146# ``type-VF``.
8147#
8148# ``numa_policy``
8149# Required NUMA affinity of device. Valid values are: ``legacy``,
8150# ``preferred`` and ``required``.
8151# (multi valued)
8152# Deprecated group/name - [DEFAULT]/pci_alias
8153#alias =
Oleh Hryhorovf5093b82018-10-17 11:16:08 +00008154{%- if compute.get('pci', {}).get('alias', false) %}
8155 {%- for alias_name, alias in compute.pci.alias.iteritems() %}
8156alias = {{ alias | json }}
8157 {%- endfor %}
8158{%- endif %}
Oleh Hryhorov5d0f13a2018-05-21 17:02:54 +00008159
8160#
8161# White list of PCI devices available to VMs.
8162#
8163# Possible values:
8164#
8165# * A JSON dictionary which describe a whitelisted PCI device. It
8166# should take
8167# the following format:
8168#
8169# ["vendor_id": "<id>",] ["product_id": "<id>",]
8170# ["address": "[[[[<domain>]:]<bus>]:][<slot>][.[<function>]]" |
8171# "devname": "<name>",]
8172# {"<tag>": "<tag_value>",}
8173#
8174# Where '[' indicates zero or one occurrences, '{' indicates zero or
8175# multiple
8176# occurrences, and '|' mutually exclusive options. Note that any
8177# missing
8178# fields are automatically wildcarded.
8179#
8180# Valid key values are :
8181#
8182# * "vendor_id": Vendor ID of the device in hexadecimal.
8183# * "product_id": Product ID of the device in hexadecimal.
8184# * "address": PCI address of the device.
8185# * "devname": Device name of the device (for e.g. interface name).
8186# Not all
8187# PCI devices have a name.
8188# * "<tag>": Additional <tag> and <tag_value> used for matching PCI
8189# devices.
8190# Supported <tag>: "physical_network".
8191#
8192# The address key supports traditional glob style and regular
8193# expression
8194# syntax. Valid examples are:
8195#
8196# passthrough_whitelist = {"devname":"eth0",
8197# "physical_network":"physnet"}
8198# passthrough_whitelist = {"address":"*:0a:00.*"}
8199# passthrough_whitelist = {"address":":0a:00.",
8200# "physical_network":"physnet1"}
8201# passthrough_whitelist = {"vendor_id":"1137",
8202# "product_id":"0071"}
8203# passthrough_whitelist = {"vendor_id":"1137",
8204# "product_id":"0071",
8205# "address": "0000:0a:00.1",
8206# "physical_network":"physnet1"}
8207# passthrough_whitelist = {"address":{"domain": ".*",
8208# "bus": "02", "slot": "01",
8209# "function": "[2-7]"},
8210# "physical_network":"physnet1"}
8211# passthrough_whitelist = {"address":{"domain": ".*",
8212# "bus": "02", "slot":
8213# "0[1-2]",
8214# "function": ".*"},
8215# "physical_network":"physnet1"}
8216#
8217# The following are invalid, as they specify mutually exclusive
8218# options:
8219#
8220# passthrough_whitelist = {"devname":"eth0",
8221# "physical_network":"physnet",
8222# "address":"*:0a:00.*"}
8223#
8224# * A JSON list of JSON dictionaries corresponding to the above
8225# format. For
8226# example:
8227#
8228# passthrough_whitelist = [{"product_id":"0001",
8229# "vendor_id":"8086"},
8230# {"product_id":"0002",
8231# "vendor_id":"8086"}]
8232# (multi valued)
8233# Deprecated group/name - [DEFAULT]/pci_passthrough_whitelist
8234#passthrough_whitelist =
8235{%- if compute.get('sriov', false) %}
Oleh Hryhorovf5093b82018-10-17 11:16:08 +00008236 {%- for nic_name, sriov in compute.sriov.iteritems() %}
8237passthrough_whitelist = {{ sriov | json }}
8238 {%- endfor %}
Oleh Hryhorov5d0f13a2018-05-21 17:02:54 +00008239{%- endif %}
8240
Oleksandr Pidrepnyi14f08272019-02-20 12:48:17 +02008241{%- if compute.get('pci', {}).get('passthrough_whitelist', false) %}
8242 {%- for item in compute.pci.passthrough_whitelist %}
8243passthrough_whitelist = {{ item | json }}
8244 {%- endfor %}
8245{%- endif %}
8246
8247
Oleh Hryhorov5d0f13a2018-05-21 17:02:54 +00008248[placement]
8249
8250#
8251# From nova.conf
8252#
8253
8254# DEPRECATED:
8255# Region name of this node. This is used when picking the URL in the
8256# service
8257# catalog.
8258#
8259# Possible values:
8260#
8261# * Any string representing region name
8262# (string value)
8263# This option is deprecated for removal since 17.0.0.
8264# Its value may be silently ignored in the future.
8265# Reason: Endpoint lookup uses the service catalog via common
8266# keystoneauth1 Adapter configuration options. Use the region_name
8267# option instead.
8268os_region_name = {{ compute.identity.region }}
8269
8270# DEPRECATED:
8271# Endpoint interface for this node. This is used when picking the URL
8272# in the
8273# service catalog.
8274# (string value)
8275# This option is deprecated for removal since 17.0.0.
8276# Its value may be silently ignored in the future.
8277# Reason: Endpoint lookup uses the service catalog via common
8278# keystoneauth1 Adapter configuration options. Use the
8279# valid_interfaces option instead.
8280#os_interface = <None>
8281
8282#
8283# If True, when limiting allocation candidate results, the results
8284# will be
8285# a random sampling of the full result set. If False, allocation
8286# candidates
8287# are returned in a deterministic but undefined order. That is, all
8288# things
8289# being equal, two requests for allocation candidates will return the
8290# same
8291# results in the same order; but no guarantees are made as to how that
8292# order
8293# is determined.
8294# (boolean value)
8295#randomize_allocation_candidates = false
8296
8297# PEM encoded Certificate Authority to use when verifying HTTPs
8298# connections. (string value)
8299#cafile = <None>
8300{%- if compute.identity.get('protocol', 'http') == 'https' %}
8301cafile={{ compute.identity.get('cacert_file', compute.cacert_file) }}
8302{%- endif %}
8303
8304# PEM encoded client certificate cert file (string value)
8305#certfile = <None>
8306
8307# PEM encoded client certificate key file (string value)
8308#keyfile = <None>
8309
8310# Verify HTTPS connections. (boolean value)
8311#insecure = false
8312
8313# Timeout value for http requests (integer value)
8314#timeout = <None>
8315
8316# Authentication type to load (string value)
8317# Deprecated group/name - [placement]/auth_plugin
8318auth_type = password
8319
8320# Config Section from which to load plugin specific options (string
8321# value)
8322#auth_section = <None>
8323
8324# Authentication URL (string value)
8325#auth_url = <None>
8326auth_url={{ compute.identity.get('protocol', 'http') }}://{{ compute.identity.host }}:35357/v3
8327
8328# Scope for system operations (string value)
8329#system_scope = <None>
8330
8331# Domain ID to scope to (string value)
8332#domain_id = <None>
8333
8334# Domain name to scope to (string value)
8335#domain_name = <None>
8336
8337# Project ID to scope to (string value)
8338#project_id = <None>
8339
8340# Project name to scope to (string value)
8341project_name = {{ compute.identity.tenant }}
8342
8343# Domain ID containing project (string value)
8344project_domain_id = {{ compute.identity.get('domain', 'default') }}
8345
8346# Domain name containing project (string value)
8347#project_domain_name = <None>
8348
8349# Trust ID (string value)
8350#trust_id = <None>
8351
8352# Optional domain ID to use with v3 and v2 parameters. It will be used
8353# for both the user and project domain in v3 and ignored in v2
8354# authentication. (string value)
8355#default_domain_id = <None>
8356
8357# Optional domain name to use with v3 API and v2 parameters. It will
8358# be used for both the user and project domain in v3 and ignored in v2
8359# authentication. (string value)
8360#default_domain_name = <None>
8361
8362# User ID (string value)
8363#user_id = <None>
8364
8365# Username (string value)
8366# Deprecated group/name - [placement]/user_name
8367username = {{ compute.identity.user }}
8368
8369# User's domain id (string value)
8370user_domain_id = {{ compute.identity.get('domain', 'default') }}
8371
8372# User's domain name (string value)
8373#user_domain_name = <None>
8374
8375# User's password (string value)
8376password = {{ compute.identity.password }}
8377
8378# Tenant ID (string value)
8379#tenant_id = <None>
8380
8381# Tenant Name (string value)
8382#tenant_name = <None>
8383
8384# The default service_type for endpoint URL discovery. (string value)
8385#service_type = placement
8386
8387# The default service_name for endpoint URL discovery. (string value)
8388#service_name = <None>
8389
8390# List of interfaces, in order of preference, for endpoint URL. (list
8391# value)
8392# Deprecated group/name - [placement]/os_interface
8393valid_interfaces = internal
8394
8395# The default region_name for endpoint URL discovery. (string value)
8396# Deprecated group/name - [placement]/os_region_name
8397#region_name = <None>
8398
8399# Always use this endpoint URL for requests for this client. NOTE: The
8400# unversioned endpoint should be specified here; to request a
8401# particular API version, use the `version`, `min-version`, and/or
8402# `max-version` options. (string value)
8403#endpoint_override = <None>
8404
8405
8406[quota]
8407#
8408# Quota options allow to manage quotas in openstack deployment.
8409
8410#
8411# From nova.conf
8412#
8413
8414#
8415# The number of instances allowed per project.
8416#
8417# Possible Values
8418#
8419# * A positive integer or 0.
8420# * -1 to disable the quota.
8421# (integer value)
8422# Minimum value: -1
8423# Deprecated group/name - [DEFAULT]/quota_instances
8424#instances = 10
8425
8426#
8427# The number of instance cores or vCPUs allowed per project.
8428#
8429# Possible values:
8430#
8431# * A positive integer or 0.
8432# * -1 to disable the quota.
8433# (integer value)
8434# Minimum value: -1
8435# Deprecated group/name - [DEFAULT]/quota_cores
8436#cores = 20
8437
8438#
8439# The number of megabytes of instance RAM allowed per project.
8440#
8441# Possible values:
8442#
8443# * A positive integer or 0.
8444# * -1 to disable the quota.
8445# (integer value)
8446# Minimum value: -1
8447# Deprecated group/name - [DEFAULT]/quota_ram
8448#ram = 51200
8449
8450# DEPRECATED:
8451# The number of floating IPs allowed per project.
8452#
8453# Floating IPs are not allocated to instances by default. Users need
8454# to select
8455# them from the pool configured by the OpenStack administrator to
8456# attach to their
8457# instances.
8458#
8459# Possible values:
8460#
8461# * A positive integer or 0.
8462# * -1 to disable the quota.
8463# (integer value)
8464# Minimum value: -1
8465# Deprecated group/name - [DEFAULT]/quota_floating_ips
8466# This option is deprecated for removal since 15.0.0.
8467# Its value may be silently ignored in the future.
8468# Reason:
8469# nova-network is deprecated, as are any related configuration
8470# options.
8471#floating_ips = 10
8472
8473# DEPRECATED:
8474# The number of fixed IPs allowed per project.
8475#
8476# Unlike floating IPs, fixed IPs are allocated dynamically by the
8477# network
8478# component when instances boot up. This quota value should be at
8479# least the
8480# number of instances allowed
8481#
8482# Possible values:
8483#
8484# * A positive integer or 0.
8485# * -1 to disable the quota.
8486# (integer value)
8487# Minimum value: -1
8488# Deprecated group/name - [DEFAULT]/quota_fixed_ips
8489# This option is deprecated for removal since 15.0.0.
8490# Its value may be silently ignored in the future.
8491# Reason:
8492# nova-network is deprecated, as are any related configuration
8493# options.
8494#fixed_ips = -1
8495
8496#
8497# The number of metadata items allowed per instance.
8498#
8499# Users can associate metadata with an instance during instance
8500# creation. This
8501# metadata takes the form of key-value pairs.
8502#
8503# Possible values:
8504#
8505# * A positive integer or 0.
8506# * -1 to disable the quota.
8507# (integer value)
8508# Minimum value: -1
8509# Deprecated group/name - [DEFAULT]/quota_metadata_items
8510#metadata_items = 128
8511
8512#
8513# The number of injected files allowed.
8514#
8515# File injection allows users to customize the personality of an
8516# instance by
8517# injecting data into it upon boot. Only text file injection is
8518# permitted: binary
8519# or ZIP files are not accepted. During file injection, any existing
8520# files that
8521# match specified files are renamed to include ``.bak`` extension
8522# appended with a
8523# timestamp.
8524#
8525# Possible values:
8526#
8527# * A positive integer or 0.
8528# * -1 to disable the quota.
8529# (integer value)
8530# Minimum value: -1
8531# Deprecated group/name - [DEFAULT]/quota_injected_files
8532#injected_files = 5
8533
8534#
8535# The number of bytes allowed per injected file.
8536#
8537# Possible values:
8538#
8539# * A positive integer or 0.
8540# * -1 to disable the quota.
8541# (integer value)
8542# Minimum value: -1
8543# Deprecated group/name - [DEFAULT]/quota_injected_file_content_bytes
8544#injected_file_content_bytes = 10240
8545
8546#
8547# The maximum allowed injected file path length.
8548#
8549# Possible values:
8550#
8551# * A positive integer or 0.
8552# * -1 to disable the quota.
8553# (integer value)
8554# Minimum value: -1
8555# Deprecated group/name - [DEFAULT]/quota_injected_file_path_length
8556#injected_file_path_length = 255
8557
8558# DEPRECATED:
8559# The number of security groups per project.
8560#
8561# Possible values:
8562#
8563# * A positive integer or 0.
8564# * -1 to disable the quota.
8565# (integer value)
8566# Minimum value: -1
8567# Deprecated group/name - [DEFAULT]/quota_security_groups
8568# This option is deprecated for removal since 15.0.0.
8569# Its value may be silently ignored in the future.
8570# Reason:
8571# nova-network is deprecated, as are any related configuration
8572# options.
8573#security_groups = 10
8574
8575# DEPRECATED:
8576# The number of security rules per security group.
8577#
8578# The associated rules in each security group control the traffic to
8579# instances in
8580# the group.
8581#
8582# Possible values:
8583#
8584# * A positive integer or 0.
8585# * -1 to disable the quota.
8586# (integer value)
8587# Minimum value: -1
8588# Deprecated group/name - [DEFAULT]/quota_security_group_rules
8589# This option is deprecated for removal since 15.0.0.
8590# Its value may be silently ignored in the future.
8591# Reason:
8592# nova-network is deprecated, as are any related configuration
8593# options.
8594#security_group_rules = 20
8595
8596#
8597# The maximum number of key pairs allowed per user.
8598#
8599# Users can create at least one key pair for each project and use the
8600# key pair
8601# for multiple instances that belong to that project.
8602#
8603# Possible values:
8604#
8605# * A positive integer or 0.
8606# * -1 to disable the quota.
8607# (integer value)
8608# Minimum value: -1
8609# Deprecated group/name - [DEFAULT]/quota_key_pairs
8610#key_pairs = 100
8611
8612#
8613# The maxiumum number of server groups per project.
8614#
8615# Server groups are used to control the affinity and anti-affinity
8616# scheduling
8617# policy for a group of servers or instances. Reducing the quota will
8618# not affect
8619# any existing group, but new servers will not be allowed into groups
8620# that have
8621# become over quota.
8622#
8623# Possible values:
8624#
8625# * A positive integer or 0.
8626# * -1 to disable the quota.
8627# (integer value)
8628# Minimum value: -1
8629# Deprecated group/name - [DEFAULT]/quota_server_groups
8630#server_groups = 10
8631
8632#
8633# The maximum number of servers per server group.
8634#
8635# Possible values:
8636#
8637# * A positive integer or 0.
8638# * -1 to disable the quota.
8639# (integer value)
8640# Minimum value: -1
8641# Deprecated group/name - [DEFAULT]/quota_server_group_members
8642#server_group_members = 10
8643
8644#
8645# The number of seconds until a reservation expires.
8646#
8647# This quota represents the time period for invalidating quota
8648# reservations.
8649# (integer value)
8650#reservation_expire = 86400
8651
8652#
8653# The count of reservations until usage is refreshed.
8654#
8655# This defaults to 0 (off) to avoid additional load but it is useful
8656# to turn on
8657# to help keep quota usage up-to-date and reduce the impact of out of
8658# sync usage
8659# issues.
8660# (integer value)
8661# Minimum value: 0
8662#until_refresh = 0
8663
8664#
8665# The number of seconds between subsequent usage refreshes.
8666#
8667# This defaults to 0 (off) to avoid additional load but it is useful
8668# to turn on
8669# to help keep quota usage up-to-date and reduce the impact of out of
8670# sync usage
8671# issues. Note that quotas are not updated on a periodic task, they
8672# will update
8673# on a new reservation if max_age has passed since the last
8674# reservation.
8675# (integer value)
8676# Minimum value: 0
8677#max_age = 0
8678
8679# DEPRECATED:
8680# The quota enforcer driver.
8681#
8682# Provides abstraction for quota checks. Users can configure a
8683# specific
8684# driver to use for quota checks.
8685#
8686# Possible values:
8687#
8688# * nova.quota.DbQuotaDriver (default) or any string representing
8689# fully
8690# qualified class name.
8691# (string value)
8692# Deprecated group/name - [DEFAULT]/quota_driver
8693# This option is deprecated for removal since 14.0.0.
8694# Its value may be silently ignored in the future.
8695#driver = nova.quota.DbQuotaDriver
8696
8697#
8698# Recheck quota after resource creation to prevent allowing quota to
8699# be exceeded.
8700#
8701# This defaults to True (recheck quota after resource creation) but
8702# can be set to
8703# False to avoid additional load if allowing quota to be exceeded
8704# because of
8705# racing requests is considered acceptable. For example, when set to
8706# False, if a
8707# user makes highly parallel REST API requests to create servers, it
8708# will be
8709# possible for them to create more servers than their allowed quota
8710# during the
8711# race. If their quota is 10 servers, they might be able to create 50
8712# during the
8713# burst. After the burst, they will not be able to create any more
8714# servers but
8715# they will be able to keep their 50 servers until they delete them.
8716#
8717# The initial quota check is done before resources are created, so if
8718# multiple
8719# parallel requests arrive at the same time, all could pass the quota
8720# check and
8721# create resources, potentially exceeding quota. When recheck_quota is
8722# True,
8723# quota will be checked a second time after resources have been
8724# created and if
8725# the resource is over quota, it will be deleted and OverQuota will be
8726# raised,
8727# usually resulting in a 403 response to the REST API user. This makes
8728# it
8729# impossible for a user to exceed their quota with the caveat that it
8730# will,
8731# however, be possible for a REST API user to be rejected with a 403
8732# response in
8733# the event of a collision close to reaching their quota limit, even
8734# if the user
8735# has enough quota available when they made the request.
8736# (boolean value)
8737#recheck_quota = true
8738
8739
8740[rdp]
8741#
8742# Options under this group enable and configure Remote Desktop
8743# Protocol (
8744# RDP) related features.
8745#
8746# This group is only relevant to Hyper-V users.
8747
8748#
8749# From nova.conf
8750#
8751
8752#
8753# Enable Remote Desktop Protocol (RDP) related features.
8754#
8755# Hyper-V, unlike the majority of the hypervisors employed on Nova
8756# compute
8757# nodes, uses RDP instead of VNC and SPICE as a desktop sharing
8758# protocol to
8759# provide instance console access. This option enables RDP for
8760# graphical
8761# console access for virtual machines created by Hyper-V.
8762#
8763# **Note:** RDP should only be enabled on compute nodes that support
8764# the Hyper-V
8765# virtualization platform.
8766#
8767# Related options:
8768#
8769# * ``compute_driver``: Must be hyperv.
8770#
8771# (boolean value)
8772#enabled = false
8773
8774#
8775# The URL an end user would use to connect to the RDP HTML5 console
8776# proxy.
8777# The console proxy service is called with this token-embedded URL and
8778# establishes the connection to the proper instance.
8779#
8780# An RDP HTML5 console proxy service will need to be configured to
8781# listen on the
8782# address configured here. Typically the console proxy service would
8783# be run on a
8784# controller node. The localhost address used as default would only
8785# work in a
8786# single node environment i.e. devstack.
8787#
8788# An RDP HTML5 proxy allows a user to access via the web the text or
8789# graphical
8790# console of any Windows server or workstation using RDP. RDP HTML5
8791# console
8792# proxy services include FreeRDP, wsgate.
8793# See https://github.com/FreeRDP/FreeRDP-WebConnect
8794#
8795# Possible values:
8796#
8797# * <scheme>://<ip-address>:<port-number>/
8798#
8799# The scheme must be identical to the scheme configured for the RDP
8800# HTML5
8801# console proxy service. It is ``http`` or ``https``.
8802#
8803# The IP address must be identical to the address on which the RDP
8804# HTML5
8805# console proxy service is listening.
8806#
8807# The port must be identical to the port on which the RDP HTML5
8808# console proxy
8809# service is listening.
8810#
8811# Related options:
8812#
8813# * ``rdp.enabled``: Must be set to ``True`` for
8814# ``html5_proxy_base_url`` to be
8815# effective.
8816# (uri value)
8817#html5_proxy_base_url = http://127.0.0.1:6083/
8818
8819
8820[remote_debug]
8821
8822#
8823# From nova.conf
8824#
8825
8826#
8827# Debug host (IP or name) to connect to. This command line parameter
8828# is used when
8829# you want to connect to a nova service via a debugger running on a
8830# different
8831# host.
8832#
8833# Note that using the remote debug option changes how Nova uses the
8834# eventlet
8835# library to support async IO. This could result in failures that do
8836# not occur
8837# under normal operation. Use at your own risk.
8838#
8839# Possible Values:
8840#
8841# * IP address of a remote host as a command line parameter
8842# to a nova service. For Example:
8843#
8844# /usr/local/bin/nova-compute --config-file /etc/nova/nova.conf
8845# --remote_debug-host <IP address where the debugger is running>
8846# (unknown value)
8847#host = <None>
8848
8849#
8850# Debug port to connect to. This command line parameter allows you to
8851# specify
8852# the port you want to use to connect to a nova service via a debugger
8853# running
8854# on different host.
8855#
8856# Note that using the remote debug option changes how Nova uses the
8857# eventlet
8858# library to support async IO. This could result in failures that do
8859# not occur
8860# under normal operation. Use at your own risk.
8861#
8862# Possible Values:
8863#
8864# * Port number you want to use as a command line parameter
8865# to a nova service. For Example:
8866#
8867# /usr/local/bin/nova-compute --config-file /etc/nova/nova.conf
8868# --remote_debug-host <IP address where the debugger is running>
8869# --remote_debug-port <port> it's listening on>.
8870# (port value)
8871# Minimum value: 0
8872# Maximum value: 65535
8873#port = <None>
8874
8875
8876[scheduler]
8877
8878#
8879# From nova.conf
8880#
8881
8882#
8883# The scheduler host manager to use.
8884#
8885# The host manager manages the in-memory picture of the hosts that the
8886# scheduler
8887# uses. The options values are chosen from the entry points under the
8888# namespace
8889# 'nova.scheduler.host_manager' in 'setup.cfg'.
8890#
8891# NOTE: The "ironic_host_manager" option is deprecated as of the
8892# 17.0.0 Queens
8893# release.
8894# (string value)
8895# Possible values:
8896# host_manager - <No description provided>
8897# ironic_host_manager - <No description provided>
8898# Deprecated group/name - [DEFAULT]/scheduler_host_manager
8899#host_manager = host_manager
8900
8901#
8902# The class of the driver used by the scheduler. This should be chosen
8903# from one
8904# of the entrypoints under the namespace 'nova.scheduler.driver' of
8905# file
8906# 'setup.cfg'. If nothing is specified in this option, the
8907# 'filter_scheduler' is
8908# used.
8909#
8910# Other options are:
8911#
8912# * 'caching_scheduler' which aggressively caches the system state for
8913# better
8914# individual scheduler performance at the risk of more retries when
8915# running
8916# multiple schedulers. [DEPRECATED]
8917# * 'chance_scheduler' which simply picks a host at random.
8918# [DEPRECATED]
8919# * 'fake_scheduler' which is used for testing.
8920#
8921# Possible values:
8922#
8923# * Any of the drivers included in Nova:
8924# ** filter_scheduler
8925# ** caching_scheduler
8926# ** chance_scheduler
8927# ** fake_scheduler
8928# * You may also set this to the entry point name of a custom
8929# scheduler driver,
8930# but you will be responsible for creating and maintaining it in
8931# your setup.cfg
8932# file.
8933# (string value)
8934# Deprecated group/name - [DEFAULT]/scheduler_driver
8935#driver = filter_scheduler
8936
8937#
8938# Periodic task interval.
8939#
8940# This value controls how often (in seconds) to run periodic tasks in
8941# the
8942# scheduler. The specific tasks that are run for each period are
8943# determined by
8944# the particular scheduler being used.
8945#
8946# If this is larger than the nova-service 'service_down_time' setting,
8947# Nova may
8948# report the scheduler service as down. This is because the scheduler
8949# driver is
8950# responsible for sending a heartbeat and it will only do that as
8951# often as this
8952# option allows. As each scheduler can work a little differently than
8953# the others,
8954# be sure to test this with your selected scheduler.
8955#
8956# Possible values:
8957#
8958# * An integer, where the integer corresponds to periodic task
8959# interval in
8960# seconds. 0 uses the default interval (60 seconds). A negative
8961# value disables
8962# periodic tasks.
8963#
8964# Related options:
8965#
8966# * ``nova-service service_down_time``
8967# (integer value)
8968# Deprecated group/name - [DEFAULT]/scheduler_driver_task_period
8969#periodic_task_interval = 60
8970
8971#
8972# This is the maximum number of attempts that will be made for a given
8973# instance
8974# build/move operation. It limits the number of alternate hosts
8975# returned by the
8976# scheduler. When that list of hosts is exhausted, a
8977# MaxRetriesExceeded
8978# exception is raised and the instance is set to an error state.
8979#
8980# Possible values:
8981#
8982# * A positive integer, where the integer corresponds to the max
8983# number of
8984# attempts that can be made when building or moving an instance.
8985# (integer value)
8986# Minimum value: 1
8987# Deprecated group/name - [DEFAULT]/scheduler_max_attempts
8988#max_attempts = 3
8989
8990#
8991# Periodic task interval.
8992#
8993# This value controls how often (in seconds) the scheduler should
8994# attempt
8995# to discover new hosts that have been added to cells. If negative
8996# (the
8997# default), no automatic discovery will occur.
8998#
8999# Deployments where compute nodes come and go frequently may want this
9000# enabled, where others may prefer to manually discover hosts when one
9001# is added to avoid any overhead from constantly checking. If enabled,
9002# every time this runs, we will select any unmapped hosts out of each
9003# cell database on every run.
9004# (integer value)
9005# Minimum value: -1
9006#discover_hosts_in_cells_interval = -1
9007
9008#
9009# This setting determines the maximum limit on results received from
9010# the
9011# placement service during a scheduling operation. It effectively
9012# limits
9013# the number of hosts that may be considered for scheduling requests
9014# that
9015# match a large number of candidates.
9016#
9017# A value of 1 (the minimum) will effectively defer scheduling to the
9018# placement
9019# service strictly on "will it fit" grounds. A higher value will put
9020# an upper
9021# cap on the number of results the scheduler will consider during the
9022# filtering
9023# and weighing process. Large deployments may need to set this lower
9024# than the
9025# total number of hosts available to limit memory consumption, network
9026# traffic,
9027# etc. of the scheduler.
9028#
9029# This option is only used by the FilterScheduler; if you use a
9030# different
9031# scheduler, this option has no effect.
9032# (integer value)
9033# Minimum value: 1
9034#max_placement_results = 1000
9035
9036
9037[serial_console]
9038#
9039# The serial console feature allows you to connect to a guest in case
9040# a
9041# graphical console like VNC, RDP or SPICE is not available. This is
9042# only
9043# currently supported for the libvirt, Ironic and hyper-v drivers.
9044
9045#
9046# From nova.conf
9047#
9048
9049#
9050# Enable the serial console feature.
9051#
9052# In order to use this feature, the service ``nova-serialproxy`` needs
9053# to run.
9054# This service is typically executed on the controller node.
9055# (boolean value)
9056#enabled = false
9057
9058#
9059# A range of TCP ports a guest can use for its backend.
9060#
9061# Each instance which gets created will use one port out of this
9062# range. If the
9063# range is not big enough to provide another port for an new instance,
9064# this
9065# instance won't get launched.
9066#
9067# Possible values:
9068#
9069# * Each string which passes the regex ``\d+:\d+`` For example
9070# ``10000:20000``.
9071# Be sure that the first port number is lower than the second port
9072# number
9073# and that both are in range from 0 to 65535.
9074# (string value)
9075#port_range = 10000:20000
9076
9077#
9078# The URL an end user would use to connect to the ``nova-serialproxy``
9079# service.
9080#
9081# The ``nova-serialproxy`` service is called with this token enriched
9082# URL
9083# and establishes the connection to the proper instance.
9084#
9085# Related options:
9086#
9087# * The IP address must be identical to the address to which the
9088# ``nova-serialproxy`` service is listening (see option
9089# ``serialproxy_host``
9090# in this section).
9091# * The port must be the same as in the option ``serialproxy_port`` of
9092# this
9093# section.
9094# * If you choose to use a secured websocket connection, then start
9095# this option
9096# with ``wss://`` instead of the unsecured ``ws://``. The options
9097# ``cert``
9098# and ``key`` in the ``[DEFAULT]`` section have to be set for that.
9099# (uri value)
9100#base_url = ws://127.0.0.1:6083/
9101
9102#
9103# The IP address to which proxy clients (like ``nova-serialproxy``)
9104# should
9105# connect to get the serial console of an instance.
9106#
9107# This is typically the IP address of the host of a ``nova-compute``
9108# service.
9109# (string value)
9110#proxyclient_address = 127.0.0.1
9111
9112#
9113# The IP address which is used by the ``nova-serialproxy`` service to
9114# listen
9115# for incoming requests.
9116#
9117# The ``nova-serialproxy`` service listens on this IP address for
9118# incoming
9119# connection requests to instances which expose serial console.
9120#
9121# Related options:
9122#
9123# * Ensure that this is the same IP address which is defined in the
9124# option
9125# ``base_url`` of this section or use ``0.0.0.0`` to listen on all
9126# addresses.
9127# (string value)
9128#serialproxy_host = 0.0.0.0
9129
9130#
9131# The port number which is used by the ``nova-serialproxy`` service to
9132# listen
9133# for incoming requests.
9134#
9135# The ``nova-serialproxy`` service listens on this port number for
9136# incoming
9137# connection requests to instances which expose serial console.
9138#
9139# Related options:
9140#
9141# * Ensure that this is the same port number which is defined in the
9142# option
9143# ``base_url`` of this section.
9144# (port value)
9145# Minimum value: 0
9146# Maximum value: 65535
9147#serialproxy_port = 6083
9148
9149
9150[service_user]
9151#
9152# Configuration options for service to service authentication using a
9153# service
9154# token. These options allow sending a service token along with the
9155# user's token
9156# when contacting external REST APIs.
9157
9158#
9159# From nova.conf
9160#
9161
9162#
9163# When True, if sending a user token to a REST API, also send a
9164# service token.
9165#
9166# Nova often reuses the user token provided to the nova-api to talk to
9167# other REST
9168# APIs, such as Cinder, Glance and Neutron. It is possible that while
9169# the user
9170# token was valid when the request was made to Nova, the token may
9171# expire before
9172# it reaches the other service. To avoid any failures, and to make it
9173# clear it is
9174# Nova calling the service on the user's behalf, we include a service
9175# token along
9176# with the user token. Should the user's token have expired, a valid
9177# service
9178# token ensures the REST API request will still be accepted by the
9179# keystone
9180# middleware.
9181# (boolean value)
9182#send_service_user_token = false
Oleksandr Bryndzii6d821f52019-02-20 15:51:15 +02009183{%- if compute.get('service_user', {}).get('enabled', True) %}
9184send_service_user_token = True
Oleksandr Bryndziif4f91a12019-03-04 15:14:48 +02009185{%- set _data = {} %}
9186{%- do _data.update(compute.get('identity', {})) %}
9187{%- do _data.update(compute.get('service_user', {})) %}
Oleksandr Bryndzii6d821f52019-02-20 15:51:15 +02009188{%- if not _data.port == '5000' %}{% do _data.update({'port': '5000'}) %}{% endif %}
Oleksandr Bryndziif4f91a12019-03-04 15:14:48 +02009189{%- if 'cacert_file' not in _data.keys() %}{% do _data.update({'cacert_file': compute.cacert_file}) %}{% endif %}
9190{%- include "oslo_templates/files/queens/keystoneauth/_type_"+ _data.get('auth_type','password') +".conf" %}
Oleksandr Bryndzii6d821f52019-02-20 15:51:15 +02009191{%- else %}
Oleh Hryhorov5d0f13a2018-05-21 17:02:54 +00009192
9193# PEM encoded Certificate Authority to use when verifying HTTPs
9194# connections. (string value)
9195#cafile = <None>
9196
9197# PEM encoded client certificate cert file (string value)
9198#certfile = <None>
9199
9200# PEM encoded client certificate key file (string value)
9201#keyfile = <None>
9202
9203# Verify HTTPS connections. (boolean value)
9204#insecure = false
9205
9206# Timeout value for http requests (integer value)
9207#timeout = <None>
9208
9209# Authentication type to load (string value)
9210# Deprecated group/name - [service_user]/auth_plugin
9211#auth_type = <None>
9212
9213# Config Section from which to load plugin specific options (string
9214# value)
9215#auth_section = <None>
9216
9217# Authentication URL (string value)
9218#auth_url = <None>
9219
9220# Scope for system operations (string value)
9221#system_scope = <None>
9222
9223# Domain ID to scope to (string value)
9224#domain_id = <None>
9225
9226# Domain name to scope to (string value)
9227#domain_name = <None>
9228
9229# Project ID to scope to (string value)
9230#project_id = <None>
9231
9232# Project name to scope to (string value)
9233#project_name = <None>
9234
9235# Domain ID containing project (string value)
9236#project_domain_id = <None>
9237
9238# Domain name containing project (string value)
9239#project_domain_name = <None>
9240
9241# Trust ID (string value)
9242#trust_id = <None>
9243
9244# Optional domain ID to use with v3 and v2 parameters. It will be used
9245# for both the user and project domain in v3 and ignored in v2
9246# authentication. (string value)
9247#default_domain_id = <None>
9248
9249# Optional domain name to use with v3 API and v2 parameters. It will
9250# be used for both the user and project domain in v3 and ignored in v2
9251# authentication. (string value)
9252#default_domain_name = <None>
9253
9254# User ID (string value)
9255#user_id = <None>
9256
9257# Username (string value)
9258# Deprecated group/name - [service_user]/user_name
9259#username = <None>
9260
9261# User's domain id (string value)
9262#user_domain_id = <None>
9263
9264# User's domain name (string value)
9265#user_domain_name = <None>
9266
9267# User's password (string value)
9268#password = <None>
9269
9270# Tenant ID (string value)
9271#tenant_id = <None>
9272
9273# Tenant Name (string value)
9274#tenant_name = <None>
Oleksandr Bryndzii6d821f52019-02-20 15:51:15 +02009275{%- endif %}
Oleh Hryhorov5d0f13a2018-05-21 17:02:54 +00009276
9277
9278[spice]
9279#
9280# SPICE console feature allows you to connect to a guest virtual
9281# machine.
9282# SPICE is a replacement for fairly limited VNC protocol.
9283#
9284# Following requirements must be met in order to use SPICE:
9285#
9286# * Virtualization driver must be libvirt
9287# * spice.enabled set to True
9288# * vnc.enabled set to False
9289# * update html5proxy_base_url
9290# * update server_proxyclient_address
9291
9292#
9293# From nova.conf
9294#
9295
9296#
9297# Enable SPICE related features.
9298#
9299# Related options:
9300#
9301# * VNC must be explicitly disabled to get access to the SPICE
9302# console. Set the
9303# enabled option to False in the [vnc] section to disable the VNC
9304# console.
9305# (boolean value)
9306#enabled = false
9307enabled = false
9308#
9309# Enable the SPICE guest agent support on the instances.
9310#
9311# The Spice agent works with the Spice protocol to offer a better
9312# guest console
9313# experience. However, the Spice console can still be used without the
9314# Spice
9315# Agent. With the Spice agent installed the following features are
9316# enabled:
9317#
9318# * Copy & Paste of text and images between the guest and client
9319# machine
9320# * Automatic adjustment of resolution when the client screen changes
9321# - e.g.
9322# if you make the Spice console full screen the guest resolution
9323# will adjust to
9324# match it rather than letterboxing.
9325# * Better mouse integration - The mouse can be captured and released
9326# without
9327# needing to click inside the console or press keys to release it.
9328# The
9329# performance of mouse movement is also improved.
9330# (boolean value)
9331#agent_enabled = true
9332
9333#
9334# Location of the SPICE HTML5 console proxy.
9335#
9336# End user would use this URL to connect to the `nova-
9337# spicehtml5proxy``
9338# service. This service will forward request to the console of an
9339# instance.
9340#
9341# In order to use SPICE console, the service ``nova-spicehtml5proxy``
9342# should be
9343# running. This service is typically launched on the controller node.
9344#
9345# Possible values:
9346#
9347# * Must be a valid URL of the form:
9348# ``http://host:port/spice_auto.html``
9349# where host is the node running ``nova-spicehtml5proxy`` and the
9350# port is
9351# typically 6082. Consider not using default value as it is not well
9352# defined
9353# for any real deployment.
9354#
9355# Related options:
9356#
9357# * This option depends on ``html5proxy_host`` and ``html5proxy_port``
9358# options.
9359# The access URL returned by the compute node must have the host
9360# and port where the ``nova-spicehtml5proxy`` service is listening.
9361# (uri value)
9362#html5proxy_base_url = http://127.0.0.1:6082/spice_auto.html
9363{%- if compute.vncproxy_url is defined %}
9364html5proxy_base_url = {{ compute.vncproxy_url }}/spice_auto.html
9365{%- endif %}
9366
9367#
9368# The address where the SPICE server running on the instances should
9369# listen.
9370#
9371# Typically, the ``nova-spicehtml5proxy`` proxy client runs on the
9372# controller
9373# node and connects over the private network to this address on the
9374# compute
9375# node(s).
9376#
9377# Possible values:
9378#
9379# * IP address to listen on.
9380# (string value)
9381#server_listen = 127.0.0.1
9382
9383#
9384# The address used by ``nova-spicehtml5proxy`` client to connect to
9385# instance
9386# console.
9387#
9388# Typically, the ``nova-spicehtml5proxy`` proxy client runs on the
9389# controller node and connects over the private network to this
9390# address on the
9391# compute node(s).
9392#
9393# Possible values:
9394#
9395# * Any valid IP address on the compute node.
9396#
9397# Related options:
9398#
9399# * This option depends on the ``server_listen`` option.
9400# The proxy client must be able to access the address specified in
9401# ``server_listen`` using the value of this option.
9402# (string value)
9403#server_proxyclient_address = 127.0.0.1
9404
9405#
9406# A keyboard layout which is supported by the underlying hypervisor on
9407# this
9408# node.
9409#
9410# Possible values:
9411# * This is usually an 'IETF language tag' (default is 'en-us'). If
9412# you
9413# use QEMU as hypervisor, you should find the list of supported
9414# keyboard
9415# layouts at /usr/share/qemu/keymaps.
9416# (string value)
9417#keymap = en-us
9418
9419#
9420# IP address or a hostname on which the ``nova-spicehtml5proxy``
9421# service
9422# listens for incoming requests.
9423#
9424# Related options:
9425#
9426# * This option depends on the ``html5proxy_base_url`` option.
9427# The ``nova-spicehtml5proxy`` service must be listening on a host
9428# that is
9429# accessible from the HTML5 client.
9430# (unknown value)
9431#html5proxy_host = 0.0.0.0
9432
9433#
9434# Port on which the ``nova-spicehtml5proxy`` service listens for
9435# incoming
9436# requests.
9437#
9438# Related options:
9439#
9440# * This option depends on the ``html5proxy_base_url`` option.
9441# The ``nova-spicehtml5proxy`` service must be listening on a port
9442# that is
9443# accessible from the HTML5 client.
9444# (port value)
9445# Minimum value: 0
9446# Maximum value: 65535
9447#html5proxy_port = 6082
9448
9449
9450[upgrade_levels]
9451
9452{%- if compute.upgrade_levels is defined %}
9453{%- for key, value in compute.upgrade_levels.iteritems() %}
9454{{ key }}={{ value }}
9455{%- endfor %}
9456{%- endif %}
9457#
9458# upgrade_levels options are used to set version cap for RPC
9459# messages sent between different nova services.
9460#
9461# By default all services send messages using the latest version
9462# they know about.
9463#
9464# The compute upgrade level is an important part of rolling upgrades
9465# where old and new nova-compute services run side by side.
9466#
9467# The other options can largely be ignored, and are only kept to
9468# help with a possible future backport issue.
9469
9470#
9471# From nova.conf
9472#
9473
9474#
9475# Compute RPC API version cap.
9476#
9477# By default, we always send messages using the most recent version
9478# the client knows about.
9479#
9480# Where you have old and new compute services running, you should set
9481# this to the lowest deployed version. This is to guarantee that all
9482# services never send messages that one of the compute nodes can't
9483# understand. Note that we only support upgrading from release N to
9484# release N+1.
9485#
9486# Set this option to "auto" if you want to let the compute RPC module
9487# automatically determine what version to use based on the service
9488# versions in the deployment.
9489#
9490# Possible values:
9491#
9492# * By default send the latest version the client knows about
9493# * 'auto': Automatically determines what version to use based on
9494# the service versions in the deployment.
9495# * A string representing a version number in the format 'N.N';
9496# for example, possible values might be '1.12' or '2.0'.
9497# * An OpenStack release name, in lower case, such as 'mitaka' or
9498# 'liberty'.
9499# (string value)
9500#compute = <None>
9501
9502# Cells RPC API version cap (string value)
9503#cells = <None>
9504
9505# Intercell RPC API version cap (string value)
9506#intercell = <None>
9507
9508# Cert RPC API version cap (string value)
9509#cert = <None>
9510
9511# Scheduler RPC API version cap (string value)
9512#scheduler = <None>
9513
9514# Conductor RPC API version cap (string value)
9515#conductor = <None>
9516
9517# Console RPC API version cap (string value)
9518#console = <None>
9519
9520# Consoleauth RPC API version cap (string value)
9521#consoleauth = <None>
9522
9523# Network RPC API version cap (string value)
9524#network = <None>
9525
9526# Base API RPC API version cap (string value)
9527#baseapi = <None>
9528
9529
9530[vault]
9531
9532#
9533# From nova.conf
9534#
9535
9536# root token for vault (string value)
9537#root_token_id = <None>
9538
9539# Use this endpoint to connect to Vault, for example:
9540# "http://127.0.0.1:8200" (string value)
9541#vault_url = http://127.0.0.1:8200
9542
9543# Absolute path to ca cert file (string value)
9544#ssl_ca_crt_file = <None>
9545
9546# SSL Enabled/Disabled (boolean value)
9547#use_ssl = false
9548
9549
9550[vendordata_dynamic_auth]
9551#
9552# Options within this group control the authentication of the
9553# vendordata
9554# subsystem of the metadata API server (and config drive) with
9555# external systems.
9556
9557#
9558# From nova.conf
9559#
9560
9561# PEM encoded Certificate Authority to use when verifying HTTPs
9562# connections. (string value)
9563#cafile = <None>
9564
9565# PEM encoded client certificate cert file (string value)
9566#certfile = <None>
9567
9568# PEM encoded client certificate key file (string value)
9569#keyfile = <None>
9570
9571# Verify HTTPS connections. (boolean value)
9572#insecure = false
9573
9574# Timeout value for http requests (integer value)
9575#timeout = <None>
9576
9577# Authentication type to load (string value)
9578# Deprecated group/name - [vendordata_dynamic_auth]/auth_plugin
9579#auth_type = <None>
9580
9581# Config Section from which to load plugin specific options (string
9582# value)
9583#auth_section = <None>
9584
9585# Authentication URL (string value)
9586#auth_url = <None>
9587
9588# Scope for system operations (string value)
9589#system_scope = <None>
9590
9591# Domain ID to scope to (string value)
9592#domain_id = <None>
9593
9594# Domain name to scope to (string value)
9595#domain_name = <None>
9596
9597# Project ID to scope to (string value)
9598#project_id = <None>
9599
9600# Project name to scope to (string value)
9601#project_name = <None>
9602
9603# Domain ID containing project (string value)
9604#project_domain_id = <None>
9605
9606# Domain name containing project (string value)
9607#project_domain_name = <None>
9608
9609# Trust ID (string value)
9610#trust_id = <None>
9611
9612# Optional domain ID to use with v3 and v2 parameters. It will be used
9613# for both the user and project domain in v3 and ignored in v2
9614# authentication. (string value)
9615#default_domain_id = <None>
9616
9617# Optional domain name to use with v3 API and v2 parameters. It will
9618# be used for both the user and project domain in v3 and ignored in v2
9619# authentication. (string value)
9620#default_domain_name = <None>
9621
9622# User ID (string value)
9623#user_id = <None>
9624
9625# Username (string value)
9626# Deprecated group/name - [vendordata_dynamic_auth]/user_name
9627#username = <None>
9628
9629# User's domain id (string value)
9630#user_domain_id = <None>
9631
9632# User's domain name (string value)
9633#user_domain_name = <None>
9634
9635# User's password (string value)
9636#password = <None>
9637
9638# Tenant ID (string value)
9639#tenant_id = <None>
9640
9641# Tenant Name (string value)
9642#tenant_name = <None>
9643
Vasyl Saienkocab3a902018-07-12 13:17:17 +03009644{%- set compute_driver = compute.get('compute_driver', 'libvirt.LibvirtDriver') %}
9645{%- if compute_driver in compute_driver_mapping.keys() %}
9646{%- set _data = compute.get(compute_driver_mapping[compute_driver]) %}
9647{%- include "nova/files/queens/compute/_" + compute_driver_mapping[compute_driver] + ".conf" %}
9648{%- endif %}
Oleh Hryhorov5d0f13a2018-05-21 17:02:54 +00009649
9650[vnc]
9651#
9652# Virtual Network Computer (VNC) can be used to provide remote desktop
9653# console access to instances for tenants and/or administrators.
9654
9655#
9656# From nova.conf
9657#
9658
9659#
9660# Enable VNC related features.
9661#
9662# Guests will get created with graphical devices to support this.
9663# Clients
9664# (for example Horizon) can then establish a VNC connection to the
9665# guest.
9666# (boolean value)
9667# Deprecated group/name - [DEFAULT]/vnc_enabled
9668enabled = true
9669
9670{%- if compute.vncproxy_url is defined %}
9671novncproxy_base_url={{ compute.vncproxy_url }}/vnc_auto.html
9672{%- endif %}
9673{%- if compute.bind is defined and compute.bind.vnc_port is defined %}
9674novncproxy_port={{ compute.bind.vnc_port }}
9675{%- endif %}
9676{%- if compute.bind is defined %}
9677{%- if compute.bind.vnc_address is defined %}
9678vncserver_listen={{ compute.bind.vnc_address }}
9679vncserver_proxyclient_address={{ compute.bind.vnc_address }}
9680{%- else %}
9681vncserver_listen=0.0.0.0
9682{%- endif %}
9683{%- endif %}
9684
9685#
9686# Keymap for VNC.
9687#
9688# The keyboard mapping (keymap) determines which keyboard layout a VNC
9689# session should use by default.
9690#
9691# Possible values:
9692#
9693# * A keyboard layout which is supported by the underlying hypervisor
9694# on
9695# this node. This is usually an 'IETF language tag' (for example
9696# 'en-us'). If you use QEMU as hypervisor, you should find the
9697# list
9698# of supported keyboard layouts at ``/usr/share/qemu/keymaps``.
9699# (string value)
9700# Deprecated group/name - [DEFAULT]/vnc_keymap
9701keymap = {{ compute.get('vnc_keymap', 'en-us') }}
9702
9703#
9704# The IP address or hostname on which an instance should listen to for
9705# incoming VNC connection requests on this node.
9706# (unknown value)
9707# Deprecated group/name - [DEFAULT]/vncserver_listen
9708# Deprecated group/name - [vnc]/vncserver_listen
9709#server_listen = 127.0.0.1
9710
9711#
9712# Private, internal IP address or hostname of VNC console proxy.
9713#
9714# The VNC proxy is an OpenStack component that enables compute service
9715# users to access their instances through VNC clients.
9716#
9717# This option sets the private address to which proxy clients, such as
9718# ``nova-xvpvncproxy``, should connect to.
9719# (unknown value)
9720# Deprecated group/name - [DEFAULT]/vncserver_proxyclient_address
9721# Deprecated group/name - [vnc]/vncserver_proxyclient_address
9722#server_proxyclient_address = 127.0.0.1
9723
9724#
9725# Public address of noVNC VNC console proxy.
9726#
9727# The VNC proxy is an OpenStack component that enables compute service
9728# users to access their instances through VNC clients. noVNC provides
9729# VNC support through a websocket-based client.
9730#
9731# This option sets the public base URL to which client systems will
9732# connect. noVNC clients can use this address to connect to the noVNC
9733# instance and, by extension, the VNC sessions.
9734#
9735# Related options:
9736#
9737# * novncproxy_host
9738# * novncproxy_port
9739# (uri value)
9740#novncproxy_base_url = http://127.0.0.1:6080/vnc_auto.html
9741
9742#
9743# IP address or hostname that the XVP VNC console proxy should bind
9744# to.
9745#
9746# The VNC proxy is an OpenStack component that enables compute service
9747# users to access their instances through VNC clients. Xen provides
9748# the Xenserver VNC Proxy, or XVP, as an alternative to the
9749# websocket-based noVNC proxy used by Libvirt. In contrast to noVNC,
9750# XVP clients are Java-based.
9751#
9752# This option sets the private address to which the XVP VNC console
9753# proxy
9754# service should bind to.
9755#
9756# Related options:
9757#
9758# * xvpvncproxy_port
9759# * xvpvncproxy_base_url
9760# (unknown value)
9761#xvpvncproxy_host = 0.0.0.0
9762
9763#
9764# Port that the XVP VNC console proxy should bind to.
9765#
9766# The VNC proxy is an OpenStack component that enables compute service
9767# users to access their instances through VNC clients. Xen provides
9768# the Xenserver VNC Proxy, or XVP, as an alternative to the
9769# websocket-based noVNC proxy used by Libvirt. In contrast to noVNC,
9770# XVP clients are Java-based.
9771#
9772# This option sets the private port to which the XVP VNC console proxy
9773# service should bind to.
9774#
9775# Related options:
9776#
9777# * xvpvncproxy_host
9778# * xvpvncproxy_base_url
9779# (port value)
9780# Minimum value: 0
9781# Maximum value: 65535
9782#xvpvncproxy_port = 6081
9783
9784#
9785# Public URL address of XVP VNC console proxy.
9786#
9787# The VNC proxy is an OpenStack component that enables compute service
9788# users to access their instances through VNC clients. Xen provides
9789# the Xenserver VNC Proxy, or XVP, as an alternative to the
9790# websocket-based noVNC proxy used by Libvirt. In contrast to noVNC,
9791# XVP clients are Java-based.
9792#
9793# This option sets the public base URL to which client systems will
9794# connect. XVP clients can use this address to connect to the XVP
9795# instance and, by extension, the VNC sessions.
9796#
9797# Related options:
9798#
9799# * xvpvncproxy_host
9800# * xvpvncproxy_port
9801# (uri value)
9802#xvpvncproxy_base_url = http://127.0.0.1:6081/console
9803
9804#
9805# IP address that the noVNC console proxy should bind to.
9806#
9807# The VNC proxy is an OpenStack component that enables compute service
9808# users to access their instances through VNC clients. noVNC provides
9809# VNC support through a websocket-based client.
9810#
9811# This option sets the private address to which the noVNC console
9812# proxy
9813# service should bind to.
9814#
9815# Related options:
9816#
9817# * novncproxy_port
9818# * novncproxy_base_url
9819# (string value)
9820#novncproxy_host = 0.0.0.0
9821
9822#
9823# Port that the noVNC console proxy should bind to.
9824#
9825# The VNC proxy is an OpenStack component that enables compute service
9826# users to access their instances through VNC clients. noVNC provides
9827# VNC support through a websocket-based client.
9828#
9829# This option sets the private port to which the noVNC console proxy
9830# service should bind to.
9831#
9832# Related options:
9833#
9834# * novncproxy_host
9835# * novncproxy_base_url
9836# (port value)
9837# Minimum value: 0
9838# Maximum value: 65535
9839#novncproxy_port = 6080
9840
9841#
9842# The authentication schemes to use with the compute node.
9843#
9844# Control what RFB authentication schemes are permitted for
9845# connections between
9846# the proxy and the compute host. If multiple schemes are enabled, the
9847# first
9848# matching scheme will be used, thus the strongest schemes should be
9849# listed
9850# first.
9851#
9852# Possible values:
9853#
9854# * ``none``: allow connection without authentication
9855# * ``vencrypt``: use VeNCrypt authentication scheme
9856#
9857# Related options:
9858#
9859# * ``[vnc]vencrypt_client_key``, ``[vnc]vencrypt_client_cert``: must
9860# also be set
9861# (list value)
9862#auth_schemes = none
9863
9864# The path to the client certificate PEM file (for x509)
9865#
9866# The fully qualified path to a PEM file containing the private key
9867# which the VNC
9868# proxy server presents to the compute node during VNC authentication.
9869#
9870# Related options:
9871#
9872# * ``vnc.auth_schemes``: must include ``vencrypt``
9873# * ``vnc.vencrypt_client_cert``: must also be set
9874# (string value)
9875#vencrypt_client_key = <None>
9876
9877# The path to the client key file (for x509)
9878#
9879# The fully qualified path to a PEM file containing the x509
9880# certificate which
9881# the VNC proxy server presents to the compute node during VNC
9882# authentication.
9883#
9884# Realted options:
9885#
9886# * ``vnc.auth_schemes``: must include ``vencrypt``
9887# * ``vnc.vencrypt_client_key``: must also be set
9888# (string value)
9889#vencrypt_client_cert = <None>
9890
9891# The path to the CA certificate PEM file
9892#
9893# The fully qualified path to a PEM file containing one or more x509
9894# certificates
9895# for the certificate authorities used by the compute node VNC server.
9896#
9897# Related options:
9898#
9899# * ``vnc.auth_schemes``: must include ``vencrypt``
9900# (string value)
9901#vencrypt_ca_certs = <None>
9902
9903
9904[workarounds]
9905#
9906# A collection of workarounds used to mitigate bugs or issues found in
9907# system
9908# tools (e.g. Libvirt or QEMU) or Nova itself under certain
9909# conditions. These
9910# should only be enabled in exceptional circumstances. All options are
9911# linked
9912# against bug IDs, where more information on the issue can be found.
9913
9914#
9915# From nova.conf
9916#
9917
9918#
9919# Use sudo instead of rootwrap.
9920#
9921# Allow fallback to sudo for performance reasons.
9922#
9923# For more information, refer to the bug report:
9924#
9925# https://bugs.launchpad.net/nova/+bug/1415106
9926#
9927# Possible values:
9928#
9929# * True: Use sudo instead of rootwrap
9930# * False: Use rootwrap as usual
9931#
9932# Interdependencies to other options:
9933#
9934# * Any options that affect 'rootwrap' will be ignored.
9935# (boolean value)
9936#disable_rootwrap = false
9937
9938#
9939# Disable live snapshots when using the libvirt driver.
9940#
9941# Live snapshots allow the snapshot of the disk to happen without an
9942# interruption to the guest, using coordination with a guest agent to
9943# quiesce the filesystem.
9944#
9945# When using libvirt 1.2.2 live snapshots fail intermittently under
9946# load
9947# (likely related to concurrent libvirt/qemu operations). This config
9948# option provides a mechanism to disable live snapshot, in favor of
9949# cold
9950# snapshot, while this is resolved. Cold snapshot causes an instance
9951# outage while the guest is going through the snapshotting process.
9952#
9953# For more information, refer to the bug report:
9954#
9955# https://bugs.launchpad.net/nova/+bug/1334398
9956#
9957# Possible values:
9958#
9959# * True: Live snapshot is disabled when using libvirt
9960# * False: Live snapshots are always used when snapshotting (as long
9961# as
9962# there is a new enough libvirt and the backend storage supports it)
9963# (boolean value)
9964#disable_libvirt_livesnapshot = false
9965disable_libvirt_livesnapshot = {{ compute.get('workaround', {}).get('disable_libvirt_livesnapshot', True)|lower }}
9966
9967#
9968# Enable handling of events emitted from compute drivers.
9969#
9970# Many compute drivers emit lifecycle events, which are events that
9971# occur when,
9972# for example, an instance is starting or stopping. If the instance is
9973# going
9974# through task state changes due to an API operation, like resize, the
9975# events
9976# are ignored.
9977#
9978# This is an advanced feature which allows the hypervisor to signal to
9979# the
9980# compute service that an unexpected state change has occurred in an
9981# instance
9982# and that the instance can be shutdown automatically. Unfortunately,
9983# this can
9984# race in some conditions, for example in reboot operations or when
9985# the compute
9986# service or when host is rebooted (planned or due to an outage). If
9987# such races
9988# are common, then it is advisable to disable this feature.
9989#
9990# Care should be taken when this feature is disabled and
9991# 'sync_power_state_interval' is set to a negative value. In this
9992# case, any
9993# instances that get out of sync between the hypervisor and the Nova
9994# database
9995# will have to be synchronized manually.
9996#
9997# For more information, refer to the bug report:
9998#
9999# https://bugs.launchpad.net/bugs/1444630
10000#
10001# Interdependencies to other options:
10002#
10003# * If ``sync_power_state_interval`` is negative and this feature is
10004# disabled,
10005# then instances that get out of sync between the hypervisor and the
10006# Nova
10007# database will have to be synchronized manually.
10008# (boolean value)
10009#handle_virt_lifecycle_events = true
10010
10011#
10012# Disable the server group policy check upcall in compute.
10013#
10014# In order to detect races with server group affinity policy, the
10015# compute
10016# service attempts to validate that the policy was not violated by the
10017# scheduler. It does this by making an upcall to the API database to
10018# list
10019# the instances in the server group for one that it is booting, which
10020# violates
10021# our api/cell isolation goals. Eventually this will be solved by
10022# proper affinity
10023# guarantees in the scheduler and placement service, but until then,
10024# this late
10025# check is needed to ensure proper affinity policy.
10026#
10027# Operators that desire api/cell isolation over this check should
10028# enable this flag, which will avoid making that upcall from compute.
10029#
10030# Related options:
10031#
10032# * [filter_scheduler]/track_instance_changes also relies on upcalls
10033# from the
10034# compute service to the scheduler service.
10035# (boolean value)
10036#disable_group_policy_check_upcall = false
10037
10038
10039[wsgi]
10040#
10041# Options under this group are used to configure WSGI (Web Server
10042# Gateway
10043# Interface). WSGI is used to serve API requests.
10044
10045#
10046# From nova.conf
10047#
10048
10049#
10050# This option represents a file name for the paste.deploy config for
10051# nova-api.
10052#
10053# Possible values:
10054#
10055# * A string representing file name for the paste.deploy config.
10056# (string value)
10057api_paste_config = /etc/nova/api-paste.ini
10058
10059# DEPRECATED:
10060# It represents a python format string that is used as the template to
10061# generate
10062# log lines. The following values can be formatted into it: client_ip,
10063# date_time, request_line, status_code, body_length, wall_seconds.
10064#
10065# This option is used for building custom request loglines when
10066# running
10067# nova-api under eventlet. If used under uwsgi or apache, this option
10068# has no effect.
10069#
10070# Possible values:
10071#
10072# * '%(client_ip)s "%(request_line)s" status: %(status_code)s'
10073# 'len: %(body_length)s time: %(wall_seconds).7f' (default)
10074# * Any formatted string formed by specific values.
10075# (string value)
10076# This option is deprecated for removal since 16.0.0.
10077# Its value may be silently ignored in the future.
10078# Reason:
10079# This option only works when running nova-api under eventlet, and
10080# encodes very eventlet specific pieces of information. Starting in
10081# Pike
10082# the preferred model for running nova-api is under uwsgi or apache
10083# mod_wsgi.
10084#wsgi_log_format = %(client_ip)s "%(request_line)s" status: %(status_code)s len: %(body_length)s time: %(wall_seconds).7f
10085
10086#
10087# This option specifies the HTTP header used to determine the protocol
10088# scheme
10089# for the original request, even if it was removed by a SSL
10090# terminating proxy.
10091#
10092# Possible values:
10093#
10094# * None (default) - the request scheme is not influenced by any HTTP
10095# headers
10096# * Valid HTTP header, like HTTP_X_FORWARDED_PROTO
10097#
10098# WARNING: Do not set this unless you know what you are doing.
10099#
10100# Make sure ALL of the following are true before setting this
10101# (assuming the
10102# values from the example above):
10103# * Your API is behind a proxy.
10104# * Your proxy strips the X-Forwarded-Proto header from all incoming
10105# requests.
10106# In other words, if end users include that header in their
10107# requests, the proxy
10108# will discard it.
10109# * Your proxy sets the X-Forwarded-Proto header and sends it to API,
10110# but only
10111# for requests that originally come in via HTTPS.
10112#
10113# If any of those are not true, you should keep this setting set to
10114# None.
10115#
10116# (string value)
10117#secure_proxy_ssl_header = <None>
10118
10119#
10120# This option allows setting path to the CA certificate file that
10121# should be used
10122# to verify connecting clients.
10123#
10124# Possible values:
10125#
10126# * String representing path to the CA certificate file.
10127#
10128# Related options:
10129#
10130# * enabled_ssl_apis
10131# (string value)
10132#ssl_ca_file = <None>
10133
10134#
10135# This option allows setting path to the SSL certificate of API
10136# server.
10137#
10138# Possible values:
10139#
10140# * String representing path to the SSL certificate.
10141#
10142# Related options:
10143#
10144# * enabled_ssl_apis
10145# (string value)
10146#ssl_cert_file = <None>
10147
10148#
10149# This option specifies the path to the file where SSL private key of
10150# API
10151# server is stored when SSL is in effect.
10152#
10153# Possible values:
10154#
10155# * String representing path to the SSL private key.
10156#
10157# Related options:
10158#
10159# * enabled_ssl_apis
10160# (string value)
10161#ssl_key_file = <None>
10162
10163#
10164# This option sets the value of TCP_KEEPIDLE in seconds for each
10165# server socket.
10166# It specifies the duration of time to keep connection active. TCP
10167# generates a
10168# KEEPALIVE transmission for an application that requests to keep
10169# connection
10170# active. Not supported on OS X.
10171#
10172# Related options:
10173#
10174# * keep_alive
10175# (integer value)
10176# Minimum value: 0
10177#tcp_keepidle = 600
10178
10179#
10180# This option specifies the size of the pool of greenthreads used by
10181# wsgi.
10182# It is possible to limit the number of concurrent connections using
10183# this
10184# option.
10185# (integer value)
10186# Minimum value: 0
10187# Deprecated group/name - [DEFAULT]/wsgi_default_pool_size
10188#default_pool_size = 1000
10189
10190#
10191# This option specifies the maximum line size of message headers to be
10192# accepted.
10193# max_header_line may need to be increased when using large tokens
10194# (typically
10195# those generated by the Keystone v3 API with big service catalogs).
10196#
10197# Since TCP is a stream based protocol, in order to reuse a
10198# connection, the HTTP
10199# has to have a way to indicate the end of the previous response and
10200# beginning
10201# of the next. Hence, in a keep_alive case, all messages must have a
10202# self-defined message length.
10203# (integer value)
10204# Minimum value: 0
10205#max_header_line = 16384
10206
10207#
10208# This option allows using the same TCP connection to send and receive
10209# multiple
10210# HTTP requests/responses, as opposed to opening a new one for every
10211# single
10212# request/response pair. HTTP keep-alive indicates HTTP connection
10213# reuse.
10214#
10215# Possible values:
10216#
10217# * True : reuse HTTP connection.
10218# * False : closes the client socket connection explicitly.
10219#
10220# Related options:
10221#
10222# * tcp_keepidle
10223# (boolean value)
10224# Deprecated group/name - [DEFAULT]/wsgi_keep_alive
10225#keep_alive = true
10226
10227#
10228# This option specifies the timeout for client connections' socket
10229# operations.
10230# If an incoming connection is idle for this number of seconds it will
10231# be
10232# closed. It indicates timeout on individual read/writes on the socket
10233# connection. To wait forever set to 0.
10234# (integer value)
10235# Minimum value: 0
10236#client_socket_timeout = 900
10237
10238
10239[xenserver]
10240#
10241# XenServer options are used when the compute_driver is set to use
10242# XenServer (compute_driver=xenapi.XenAPIDriver).
10243#
10244# Must specify connection_url, connection_password and
10245# ovs_integration_bridge to
10246# use compute_driver=xenapi.XenAPIDriver.
10247
10248#
10249# From nova.conf
10250#
10251
10252#
10253# Number of seconds to wait for agent's reply to a request.
10254#
10255# Nova configures/performs certain administrative actions on a server
10256# with the
10257# help of an agent that's installed on the server. The communication
10258# between
10259# Nova and the agent is achieved via sharing messages, called records,
10260# over
10261# xenstore, a shared storage across all the domains on a Xenserver
10262# host.
10263# Operations performed by the agent on behalf of nova are: 'version','
10264# key_init',
10265# 'password','resetnetwork','inject_file', and 'agentupdate'.
10266#
10267# To perform one of the above operations, the xapi 'agent' plugin
10268# writes the
10269# command and its associated parameters to a certain location known to
10270# the domain
10271# and awaits response. On being notified of the message, the agent
10272# performs
10273# appropriate actions on the server and writes the result back to
10274# xenstore. This
10275# result is then read by the xapi 'agent' plugin to determine the
10276# success/failure
10277# of the operation.
10278#
10279# This config option determines how long the xapi 'agent' plugin shall
10280# wait to
10281# read the response off of xenstore for a given request/command. If
10282# the agent on
10283# the instance fails to write the result in this time period, the
10284# operation is
10285# considered to have timed out.
10286#
10287# Related options:
10288#
10289# * ``agent_version_timeout``
10290# * ``agent_resetnetwork_timeout``
10291#
10292# (integer value)
10293# Minimum value: 0
10294#agent_timeout = 30
10295
10296#
10297# Number of seconds to wait for agent't reply to version request.
10298#
10299# This indicates the amount of time xapi 'agent' plugin waits for the
10300# agent to
10301# respond to the 'version' request specifically. The generic timeout
10302# for agent
10303# communication ``agent_timeout`` is ignored in this case.
10304#
10305# During the build process the 'version' request is used to determine
10306# if the
10307# agent is available/operational to perform other requests such as
10308# 'resetnetwork', 'password', 'key_init' and 'inject_file'. If the
10309# 'version' call
10310# fails, the other configuration is skipped. So, this configuration
10311# option can
10312# also be interpreted as time in which agent is expected to be fully
10313# operational.
10314# (integer value)
10315# Minimum value: 0
10316#agent_version_timeout = 300
10317
10318#
10319# Number of seconds to wait for agent's reply to resetnetwork
10320# request.
10321#
10322# This indicates the amount of time xapi 'agent' plugin waits for the
10323# agent to
10324# respond to the 'resetnetwork' request specifically. The generic
10325# timeout for
10326# agent communication ``agent_timeout`` is ignored in this case.
10327# (integer value)
10328# Minimum value: 0
10329#agent_resetnetwork_timeout = 60
10330
10331#
10332# Path to locate guest agent on the server.
10333#
10334# Specifies the path in which the XenAPI guest agent should be
10335# located. If the
10336# agent is present, network configuration is not injected into the
10337# image.
10338#
10339# Related options:
10340#
10341# For this option to have an effect:
10342# * ``flat_injected`` should be set to ``True``
10343# * ``compute_driver`` should be set to ``xenapi.XenAPIDriver``
10344#
10345# (string value)
10346#agent_path = usr/sbin/xe-update-networking
10347
10348#
10349# Disables the use of XenAPI agent.
10350#
10351# This configuration option suggests whether the use of agent should
10352# be enabled
10353# or not regardless of what image properties are present. Image
10354# properties have
10355# an effect only when this is set to ``True``. Read description of
10356# config option
10357# ``use_agent_default`` for more information.
10358#
10359# Related options:
10360#
10361# * ``use_agent_default``
10362#
10363# (boolean value)
10364#disable_agent = false
10365
10366#
10367# Whether or not to use the agent by default when its usage is enabled
10368# but not
10369# indicated by the image.
10370#
10371# The use of XenAPI agent can be disabled altogether using the
10372# configuration
10373# option ``disable_agent``. However, if it is not disabled, the use of
10374# an agent
10375# can still be controlled by the image in use through one of its
10376# properties,
10377# ``xenapi_use_agent``. If this property is either not present or
10378# specified
10379# incorrectly on the image, the use of agent is determined by this
10380# configuration
10381# option.
10382#
10383# Note that if this configuration is set to ``True`` when the agent is
10384# not
10385# present, the boot times will increase significantly.
10386#
10387# Related options:
10388#
10389# * ``disable_agent``
10390#
10391# (boolean value)
10392#use_agent_default = false
10393
10394# Timeout in seconds for XenAPI login. (integer value)
10395# Minimum value: 0
10396#login_timeout = 10
10397
10398#
10399# Maximum number of concurrent XenAPI connections.
10400#
10401# In nova, multiple XenAPI requests can happen at a time.
10402# Configuring this option will parallelize access to the XenAPI
10403# session, which allows you to make concurrent XenAPI connections.
10404# (integer value)
10405# Minimum value: 1
10406#connection_concurrent = 5
10407
10408#
10409# Cache glance images locally.
10410#
10411# The value for this option must be chosen from the choices listed
10412# here. Configuring a value other than these will default to 'all'.
10413#
10414# Note: There is nothing that deletes these images.
10415#
10416# Possible values:
10417#
10418# * `all`: will cache all images.
10419# * `some`: will only cache images that have the
10420# image_property `cache_in_nova=True`.
10421# * `none`: turns off caching entirely.
10422# (string value)
10423# Possible values:
10424# all - <No description provided>
10425# some - <No description provided>
10426# none - <No description provided>
10427#cache_images = all
10428
10429#
10430# Compression level for images.
10431#
10432# By setting this option we can configure the gzip compression level.
10433# This option sets GZIP environment variable before spawning tar -cz
10434# to force the compression level. It defaults to none, which means the
10435# GZIP environment variable is not set and the default (usually -6)
10436# is used.
10437#
10438# Possible values:
10439#
10440# * Range is 1-9, e.g., 9 for gzip -9, 9 being most
10441# compressed but most CPU intensive on dom0.
10442# * Any values out of this range will default to None.
10443# (integer value)
10444# Minimum value: 1
10445# Maximum value: 9
10446#image_compression_level = <None>
10447
10448# Default OS type used when uploading an image to glance (string
10449# value)
10450#default_os_type = linux
10451
10452# Time in secs to wait for a block device to be created (integer
10453# value)
10454# Minimum value: 1
10455#block_device_creation_timeout = 10
10456{%- if compute.block_device_creation_timeout is defined %}
10457block_device_creation_timeout = {{ compute.block_device_creation_timeout }}
10458{%- endif %}
10459
10460#
10461# Maximum size in bytes of kernel or ramdisk images.
10462#
10463# Specifying the maximum size of kernel or ramdisk will avoid copying
10464# large files to dom0 and fill up /boot/guest.
10465# (integer value)
10466#max_kernel_ramdisk_size = 16777216
10467
10468#
10469# Filter for finding the SR to be used to install guest instances on.
10470#
10471# Possible values:
10472#
10473# * To use the Local Storage in default XenServer/XCP installations
10474# set this flag to other-config:i18n-key=local-storage.
10475# * To select an SR with a different matching criteria, you could
10476# set it to other-config:my_favorite_sr=true.
10477# * To fall back on the Default SR, as displayed by XenCenter,
10478# set this flag to: default-sr:true.
10479# (string value)
10480#sr_matching_filter = default-sr:true
10481
10482#
10483# Whether to use sparse_copy for copying data on a resize down.
10484# (False will use standard dd). This speeds up resizes down
10485# considerably since large runs of zeros won't have to be rsynced.
10486# (boolean value)
10487#sparse_copy = true
10488
10489#
10490# Maximum number of retries to unplug VBD.
10491# If set to 0, should try once, no retries.
10492# (integer value)
10493# Minimum value: 0
10494#num_vbd_unplug_retries = 10
10495
10496#
10497# Name of network to use for booting iPXE ISOs.
10498#
10499# An iPXE ISO is a specially crafted ISO which supports iPXE booting.
10500# This feature gives a means to roll your own image.
10501#
10502# By default this option is not set. Enable this option to
10503# boot an iPXE ISO.
10504#
10505# Related Options:
10506#
10507# * `ipxe_boot_menu_url`
10508# * `ipxe_mkisofs_cmd`
10509# (string value)
10510#ipxe_network_name = <None>
10511
10512#
10513# URL to the iPXE boot menu.
10514#
10515# An iPXE ISO is a specially crafted ISO which supports iPXE booting.
10516# This feature gives a means to roll your own image.
10517#
10518# By default this option is not set. Enable this option to
10519# boot an iPXE ISO.
10520#
10521# Related Options:
10522#
10523# * `ipxe_network_name`
10524# * `ipxe_mkisofs_cmd`
10525# (string value)
10526#ipxe_boot_menu_url = <None>
10527
10528#
10529# Name and optionally path of the tool used for ISO image creation.
10530#
10531# An iPXE ISO is a specially crafted ISO which supports iPXE booting.
10532# This feature gives a means to roll your own image.
10533#
10534# Note: By default `mkisofs` is not present in the Dom0, so the
10535# package can either be manually added to Dom0 or include the
10536# `mkisofs` binary in the image itself.
10537#
10538# Related Options:
10539#
10540# * `ipxe_network_name`
10541# * `ipxe_boot_menu_url`
10542# (string value)
10543#ipxe_mkisofs_cmd = mkisofs
10544
10545#
10546# URL for connection to XenServer/Xen Cloud Platform. A special value
10547# of unix://local can be used to connect to the local unix socket.
10548#
10549# Possible values:
10550#
10551# * Any string that represents a URL. The connection_url is
10552# generally the management network IP address of the XenServer.
10553# * This option must be set if you chose the XenServer driver.
10554# (string value)
10555#connection_url = <None>
10556
10557# Username for connection to XenServer/Xen Cloud Platform (string
10558# value)
10559#connection_username = root
10560
10561# Password for connection to XenServer/Xen Cloud Platform (string
10562# value)
10563#connection_password = <None>
10564
10565#
10566# The interval used for polling of coalescing vhds.
10567#
10568# This is the interval after which the task of coalesce VHD is
10569# performed, until it reaches the max attempts that is set by
10570# vhd_coalesce_max_attempts.
10571#
10572# Related options:
10573#
10574# * `vhd_coalesce_max_attempts`
10575# (floating point value)
10576# Minimum value: 0
10577#vhd_coalesce_poll_interval = 5.0
10578
10579#
10580# Ensure compute service is running on host XenAPI connects to.
10581# This option must be set to false if the 'independent_compute'
10582# option is set to true.
10583#
10584# Possible values:
10585#
10586# * Setting this option to true will make sure that compute service
10587# is running on the same host that is specified by connection_url.
10588# * Setting this option to false, doesn't perform the check.
10589#
10590# Related options:
10591#
10592# * `independent_compute`
10593# (boolean value)
10594#check_host = true
10595
10596#
10597# Max number of times to poll for VHD to coalesce.
10598#
10599# This option determines the maximum number of attempts that can be
10600# made for coalescing the VHD before giving up.
10601#
10602# Related opitons:
10603#
10604# * `vhd_coalesce_poll_interval`
10605# (integer value)
10606# Minimum value: 0
10607#vhd_coalesce_max_attempts = 20
10608
10609# Base path to the storage repository on the XenServer host. (string
10610# value)
10611#sr_base_path = /var/run/sr-mount
10612
10613#
10614# The iSCSI Target Host.
10615#
10616# This option represents the hostname or ip of the iSCSI Target.
10617# If the target host is not present in the connection information from
10618# the volume provider then the value from this option is taken.
10619#
10620# Possible values:
10621#
10622# * Any string that represents hostname/ip of Target.
10623# (unknown value)
10624#target_host = <None>
10625
10626#
10627# The iSCSI Target Port.
10628#
10629# This option represents the port of the iSCSI Target. If the
10630# target port is not present in the connection information from the
10631# volume provider then the value from this option is taken.
10632# (port value)
10633# Minimum value: 0
10634# Maximum value: 65535
10635#target_port = 3260
10636
10637#
10638# Used to prevent attempts to attach VBDs locally, so Nova can
10639# be run in a VM on a different host.
10640#
10641# Related options:
10642#
10643# * ``CONF.flat_injected`` (Must be False)
10644# * ``CONF.xenserver.check_host`` (Must be False)
10645# * ``CONF.default_ephemeral_format`` (Must be unset or 'ext3')
10646# * Joining host aggregates (will error if attempted)
10647# * Swap disks for Windows VMs (will error if attempted)
10648# * Nova-based auto_configure_disk (will error if attempted)
10649# (boolean value)
10650#independent_compute = false
10651
10652#
10653# Wait time for instances to go to running state.
10654#
10655# Provide an integer value representing time in seconds to set the
10656# wait time for an instance to go to running state.
10657#
10658# When a request to create an instance is received by nova-api and
10659# communicated to nova-compute, the creation of the instance occurs
10660# through interaction with Xen via XenAPI in the compute node. Once
10661# the node on which the instance(s) are to be launched is decided by
10662# nova-schedule and the launch is triggered, a certain amount of wait
10663# time is involved until the instance(s) can become available and
10664# 'running'. This wait time is defined by running_timeout. If the
10665# instances do not go to running state within this specified wait
10666# time, the launch expires and the instance(s) are set to 'error'
10667# state.
10668# (integer value)
10669# Minimum value: 0
10670#running_timeout = 60
10671
10672# DEPRECATED:
10673# The XenAPI VIF driver using XenServer Network APIs.
10674#
10675# Provide a string value representing the VIF XenAPI vif driver to use
10676# for
10677# plugging virtual network interfaces.
10678#
10679# Xen configuration uses bridging within the backend domain to allow
10680# all VMs to appear on the network as individual hosts. Bridge
10681# interfaces are used to create a XenServer VLAN network in which
10682# the VIFs for the VM instances are plugged. If no VIF bridge driver
10683# is plugged, the bridge is not made available. This configuration
10684# option takes in a value for the VIF driver.
10685#
10686# Possible values:
10687#
10688# * nova.virt.xenapi.vif.XenAPIOpenVswitchDriver (default)
10689# * nova.virt.xenapi.vif.XenAPIBridgeDriver (deprecated)
10690#
10691# Related options:
10692#
10693# * ``vlan_interface``
10694# * ``ovs_integration_bridge``
10695# (string value)
10696# This option is deprecated for removal since 15.0.0.
10697# Its value may be silently ignored in the future.
10698# Reason:
10699# There are only two in-tree vif drivers for XenServer.
10700# XenAPIBridgeDriver is for
10701# nova-network which is deprecated and XenAPIOpenVswitchDriver is for
10702# Neutron
10703# which is the default configuration for Nova since the 15.0.0 Ocata
10704# release. In
10705# the future the "use_neutron" configuration option will be used to
10706# determine
10707# which vif driver to use.
10708#vif_driver = nova.virt.xenapi.vif.XenAPIOpenVswitchDriver
10709
10710#
10711# Dom0 plugin driver used to handle image uploads.
10712#
10713# Provide a string value representing a plugin driver required to
10714# handle the image uploading to GlanceStore.
10715#
10716# Images, and snapshots from XenServer need to be uploaded to the data
10717# store for use. image_upload_handler takes in a value for the Dom0
10718# plugin driver. This driver is then called to uplaod images to the
10719# GlanceStore.
10720# (string value)
10721#image_upload_handler = nova.virt.xenapi.image.glance.GlanceStore
10722
10723#
10724# Number of seconds to wait for SR to settle if the VDI
10725# does not exist when first introduced.
10726#
10727# Some SRs, particularly iSCSI connections are slow to see the VDIs
10728# right after they got introduced. Setting this option to a
10729# time interval will make the SR to wait for that time period
10730# before raising VDI not found exception.
10731# (integer value)
10732# Minimum value: 0
10733#introduce_vdi_retry_wait = 20
10734
10735#
10736# The name of the integration Bridge that is used with xenapi
10737# when connecting with Open vSwitch.
10738#
10739# Note: The value of this config option is dependent on the
10740# environment, therefore this configuration value must be set
10741# accordingly if you are using XenAPI.
10742#
10743# Possible values:
10744#
10745# * Any string that represents a bridge name.
10746# (string value)
10747#ovs_integration_bridge = <None>
10748
10749#
10750# When adding new host to a pool, this will append a --force flag to
10751# the
10752# command, forcing hosts to join a pool, even if they have different
10753# CPUs.
10754#
10755# Since XenServer version 5.6 it is possible to create a pool of hosts
10756# that have
10757# different CPU capabilities. To accommodate CPU differences,
10758# XenServer limited
10759# features it uses to determine CPU compatibility to only the ones
10760# that are
10761# exposed by CPU and support for CPU masking was added.
10762# Despite this effort to level differences between CPUs, it is still
10763# possible
10764# that adding new host will fail, thus option to force join was
10765# introduced.
10766# (boolean value)
10767#use_join_force = true
10768
10769#
10770# Publicly visible name for this console host.
10771#
10772# Possible values:
10773#
10774# * Current hostname (default) or any string representing hostname.
10775# (string value)
10776#console_public_hostname = <current_hostname>
10777
10778
10779[xvp]
10780#
10781# Configuration options for XVP.
10782#
10783# xvp (Xen VNC Proxy) is a proxy server providing password-protected
10784# VNC-based
10785# access to the consoles of virtual machines hosted on Citrix
10786# XenServer.
10787
10788#
10789# From nova.conf
10790#
10791
10792# XVP conf template (string value)
10793#console_xvp_conf_template = $pybasedir/nova/console/xvp.conf.template
10794
10795# Generated XVP conf file (string value)
10796#console_xvp_conf = /etc/xvp.conf
10797
10798# XVP master process pid file (string value)
10799#console_xvp_pid = /var/run/xvp.pid
10800
10801# XVP log file (string value)
10802#console_xvp_log = /var/log/xvp.log
10803
10804# Port for XVP to multiplex VNC connections on (port value)
10805# Minimum value: 0
10806# Maximum value: 65535
10807#console_xvp_multiplex_port = 5900
10808
10809[matchmaker_redis]
10810{#- include "oslo_templates/oslo/_matchmaker_redis.conf" #}
10811
10812[oslo_messaging_notifications]
10813{%- set _data = compute.notification %}
10814{%- include "oslo_templates/files/queens/oslo/messaging/_notifications.conf" %}
10815
Oleh Hryhorov2fdc3522018-05-23 14:09:37 +000010816{%- if compute.message_queue is defined %}
Oleh Hryhorov5d0f13a2018-05-21 17:02:54 +000010817{%- set _data = compute.message_queue %}
10818{%- if _data.engine == 'rabbitmq' %}
10819 {%- set messaging_engine = 'rabbit' %}
10820{%- else %}
10821 {%- set messaging_engine = _data.engine %}
10822{%- endif %}
10823[oslo_messaging_{{ messaging_engine }}]
Oleksandr Bryndziie539a912018-09-25 17:45:48 +000010824{%- if _data.ssl is defined and 'cacert_file' not in _data.get('ssl', {}).keys() %}{% do _data['ssl'].update({'cacert_file': compute.cacert_file}) %}{% endif %}
Oleh Hryhorov5d0f13a2018-05-21 17:02:54 +000010825{%- include "oslo_templates/files/queens/oslo/messaging/_" + messaging_engine + ".conf" %}
10826{%- endif %}
10827
10828[oslo_policy]
10829{%- if compute.policy is defined %}
10830{%- set _data = compute.policy %}
10831{%- include "oslo_templates/files/queens/oslo/_policy.conf" %}
10832{%- endif %}
10833
10834[database]
10835{%- set _data = compute.database %}
10836{%- if _data.ssl is defined and 'cacert_file' not in _data.get('ssl', {}).keys() %}{% do _data['ssl'].update({'cacert_file': compute.cacert_file}) %}{% endif %}
10837{%- include "oslo_templates/files/queens/oslo/_database.conf" %}
10838
10839[oslo_middleware]
10840{%- set _data = compute %}
10841{%- include "oslo_templates/files/queens/oslo/_middleware.conf" %}
10842
10843[keystone_authtoken]
10844{%- set _data = compute.identity %}
Mykyta Karpinb3bc4512018-06-14 22:06:17 +030010845{%- if 'cacert_file' not in _data.keys() %}{% do _data.update({'cacert_file': compute.cacert_file}) %}{% endif %}
Oleh Hryhorov5d0f13a2018-05-21 17:02:54 +000010846{%- set auth_type = _data.get('auth_type', 'password') %}
Oleksandr Bryndzii9b918512018-11-22 18:18:11 +000010847{%- if compute.get('cache',{}).members is defined and 'cache' not in _data.keys() %}
10848{% do _data.update({'cache': compute.cache}) %}
10849{%- endif %}
Oleh Hryhorov5d0f13a2018-05-21 17:02:54 +000010850{%- include "oslo_templates/files/queens/keystonemiddleware/_auth_token.conf" %}
10851{%- include "oslo_templates/files/queens/keystoneauth/_type_" + auth_type + ".conf" %}
10852