blob: f430fdfface8645923cf56497c96485a7459a5f2 [file] [log] [blame]
Vasyl Saienkocab3a902018-07-12 13:17:17 +03001{%- from "nova/map.jinja" import controller,compute_driver_mapping with context %}
Oleh Hryhorov5d0f13a2018-05-21 17:02:54 +00002[DEFAULT]
3
4#
5# From nova.conf
6#
7compute_manager=nova.controller.manager.ComputeManager
8network_device_mtu=65000
9use_neutron = True
10security_group_api=neutron
11image_service=nova.image.glance.GlanceImageService
12
13#
14# Availability zone for internal services.
15#
16# This option determines the availability zone for the various
17# internal nova
18# services, such as 'nova-scheduler', 'nova-conductor', etc.
19#
20# Possible values:
21#
22# * Any string representing an existing availability zone name.
23# (string value)
24#internal_service_availability_zone = internal
25
26#
27# Default availability zone for compute services.
28#
29# This option determines the default availability zone for 'nova-
30# compute'
31# services, which will be used if the service(s) do not belong to
32# aggregates with
33# availability zone metadata.
34#
35# Possible values:
36#
37# * Any string representing an existing availability zone name.
38# (string value)
39#default_availability_zone = nova
40
41#
42# Default availability zone for instances.
43#
44# This option determines the default availability zone for instances,
45# which will
46# be used when a user does not specify one when creating an instance.
47# The
48# instance(s) will be bound to this availability zone for their
49# lifetime.
50#
51# Possible values:
52#
53# * Any string representing an existing availability zone name.
54# * None, which means that the instance can move from one availability
55# zone to
56# another during its lifetime if it is moved from one compute node
57# to another.
58# (string value)
59#default_schedule_zone = <None>
60
61# Length of generated instance admin passwords. (integer value)
62# Minimum value: 0
63#password_length = 12
64
65#
66# Time period to generate instance usages for. It is possible to
67# define optional
68# offset to given period by appending @ character followed by a number
69# defining
70# offset.
71#
72# Possible values:
73#
74# * period, example: ``hour``, ``day``, ``month` or ``year``
75# * period with offset, example: ``month@15`` will result in monthly
76# audits
77# starting on 15th day of month.
78# (string value)
79#instance_usage_audit_period = month
80{% if pillar.ceilometer is defined %}
81instance_usage_audit = True
82instance_usage_audit_period = hour
83{%- endif %}
84
85#
86# Start and use a daemon that can run the commands that need to be run
87# with
88# root privileges. This option is usually enabled on nodes that run
89# nova compute
90# processes.
91# (boolean value)
92#use_rootwrap_daemon = false
93
94#
95# Path to the rootwrap configuration file.
96#
97# Goal of the root wrapper is to allow a service-specific unprivileged
98# user to
99# run a number of actions as the root user in the safest manner
100# possible.
101# The configuration file used here must match the one defined in the
102# sudoers
103# entry.
104# (string value)
105rootwrap_config = /etc/nova/rootwrap.conf
106
107# Explicitly specify the temporary working directory. (string value)
108#tempdir = <None>
109
110# DEPRECATED:
111# Determine if monkey patching should be applied.
112#
113# Related options:
114#
115# * ``monkey_patch_modules``: This must have values set for this
116# option to
117# have any effect
118# (boolean value)
119# This option is deprecated for removal since 17.0.0.
120# Its value may be silently ignored in the future.
121# Reason:
122# Monkey patching nova is not tested, not supported, and is a barrier
123# for interoperability.
124#monkey_patch = false
125
126# DEPRECATED:
127# List of modules/decorators to monkey patch.
128#
129# This option allows you to patch a decorator for all functions in
130# specified
131# modules.
132#
133# Possible values:
134#
135# * nova.compute.api:nova.notifications.notify_decorator
136# * [...]
137#
138# Related options:
139#
140# * ``monkey_patch``: This must be set to ``True`` for this option to
141# have any effect
142# (list value)
143# This option is deprecated for removal since 17.0.0.
144# Its value may be silently ignored in the future.
145# Reason:
146# Monkey patching nova is not tested, not supported, and is a barrier
147# for interoperability.
148#monkey_patch_modules = nova.compute.api:nova.notifications.notify_decorator
149
150#
151# Defines which driver to use for controlling virtualization.
152#
153# Possible values:
154#
155# * ``libvirt.LibvirtDriver``
156# * ``xenapi.XenAPIDriver``
157# * ``fake.FakeDriver``
158# * ``ironic.IronicDriver``
159# * ``vmwareapi.VMwareVCDriver``
160# * ``hyperv.HyperVDriver``
161# * ``powervm.PowerVMDriver``
162# (string value)
163#compute_driver = <None>
164compute_driver = {{ controller.get('compute_driver', 'libvirt.LibvirtDriver') }}
165
166#
167# Allow destination machine to match source for resize. Useful when
168# testing in single-host environments. By default it is not allowed
169# to resize to the same host. Setting this option to true will add
170# the same host to the destination options. Also set to true
171# if you allow the ServerGroupAffinityFilter and need to resize.
172# (boolean value)
173#allow_resize_to_same_host = false
174allow_resize_to_same_host = true
175
176#
177# Image properties that should not be inherited from the instance
178# when taking a snapshot.
179#
180# This option gives an opportunity to select which image-properties
181# should not be inherited by newly created snapshots.
182#
183# Possible values:
184#
185# * A comma-separated list whose item is an image property. Usually
186# only
187# the image properties that are only needed by base images can be
188# included
189# here, since the snapshots that are created from the base images
190# don't
191# need them.
192# * Default list: cache_in_nova, bittorrent,
193# img_signature_hash_method,
194# img_signature, img_signature_key_type,
195# img_signature_certificate_uuid
196#
197# (list value)
198#non_inheritable_image_properties = cache_in_nova,bittorrent,img_signature_hash_method,img_signature,img_signature_key_type,img_signature_certificate_uuid
199
200# DEPRECATED:
201# When creating multiple instances with a single request using the
202# os-multiple-create API extension, this template will be used to
203# build
204# the display name for each instance. The benefit is that the
205# instances
206# end up with different hostnames. Example display names when creating
207# two VM's: name-1, name-2.
208#
209# Possible values:
210#
211# * Valid keys for the template are: name, uuid, count.
212# (string value)
213# This option is deprecated for removal since 15.0.0.
214# Its value may be silently ignored in the future.
215# Reason:
216# This config changes API behaviour. All changes in API behaviour
217# should be
218# discoverable.
219#multi_instance_display_name_template = %(name)s-%(count)d
220
221#
222# Maximum number of devices that will result in a local image being
223# created on the hypervisor node.
224#
225# A negative number means unlimited. Setting max_local_block_devices
226# to 0 means that any request that attempts to create a local disk
227# will fail. This option is meant to limit the number of local discs
228# (so root local disc that is the result of --image being used, and
229# any other ephemeral and swap disks). 0 does not mean that images
230# will be automatically converted to volumes and boot instances from
231# volumes - it just means that all requests that attempt to create a
232# local disk will fail.
233#
234# Possible values:
235#
236# * 0: Creating a local disk is not allowed.
237# * Negative number: Allows unlimited number of local discs.
238# * Positive number: Allows only these many number of local discs.
239# (Default value is 3).
240# (integer value)
241#max_local_block_devices = 3
242
243#
244# A comma-separated list of monitors that can be used for getting
245# compute metrics. You can use the alias/name from the setuptools
246# entry points for nova.compute.monitors.* namespaces. If no
247# namespace is supplied, the "cpu." namespace is assumed for
248# backwards-compatibility.
249#
250# NOTE: Only one monitor per namespace (For example: cpu) can be
251# loaded at
252# a time.
253#
254# Possible values:
255#
256# * An empty list will disable the feature (Default).
257# * An example value that would enable both the CPU and NUMA memory
258# bandwidth monitors that use the virt driver variant:
259#
260# compute_monitors = cpu.virt_driver, numa_mem_bw.virt_driver
261# (list value)
262#compute_monitors =
263
264#
265# The default format an ephemeral_volume will be formatted with on
266# creation.
267#
268# Possible values:
269#
270# * ``ext2``
271# * ``ext3``
272# * ``ext4``
273# * ``xfs``
274# * ``ntfs`` (only for Windows guests)
275# (string value)
276#default_ephemeral_format = <None>
277
278#
279# Determine if instance should boot or fail on VIF plugging timeout.
280#
281# Nova sends a port update to Neutron after an instance has been
282# scheduled,
283# providing Neutron with the necessary information to finish setup of
284# the port.
285# Once completed, Neutron notifies Nova that it has finished setting
286# up the
287# port, at which point Nova resumes the boot of the instance since
288# network
289# connectivity is now supposed to be present. A timeout will occur if
290# the reply
291# is not received after a given interval.
292#
293# This option determines what Nova does when the VIF plugging timeout
294# event
295# happens. When enabled, the instance will error out. When disabled,
296# the
297# instance will continue to boot on the assumption that the port is
298# ready.
299#
300# Possible values:
301#
302# * True: Instances should fail after VIF plugging timeout
303# * False: Instances should continue booting after VIF plugging
304# timeout
305# (boolean value)
306vif_plugging_is_fatal = {{ controller.get('vif_plugging_is_fatal', 'true') }}
307
308#
309# Timeout for Neutron VIF plugging event message arrival.
310#
311# Number of seconds to wait for Neutron vif plugging events to
312# arrive before continuing or failing (see 'vif_plugging_is_fatal').
313#
314# Related options:
315#
316# * vif_plugging_is_fatal - If ``vif_plugging_timeout`` is set to zero
317# and
318# ``vif_plugging_is_fatal`` is False, events should not be expected
319# to
320# arrive at all.
321# (integer value)
322# Minimum value: 0
323vif_plugging_timeout = {{ controller.get('vif_plugging_timeout', '300') }}
324
325# Path to '/etc/network/interfaces' template.
326#
327# The path to a template file for the '/etc/network/interfaces'-style
328# file, which
329# will be populated by nova and subsequently used by cloudinit. This
330# provides a
331# method to configure network connectivity in environments without a
332# DHCP server.
333#
334# The template will be rendered using Jinja2 template engine, and
335# receive a
336# top-level key called ``interfaces``. This key will contain a list of
337# dictionaries, one for each interface.
338#
339# Refer to the cloudinit documentaion for more information:
340#
341# https://cloudinit.readthedocs.io/en/latest/topics/datasources.html
342#
343# Possible values:
344#
345# * A path to a Jinja2-formatted template for a Debian
346# '/etc/network/interfaces'
347# file. This applies even if using a non Debian-derived guest.
348#
349# Related options:
350#
351# * ``flat_inject``: This must be set to ``True`` to ensure nova
352# embeds network
353# configuration information in the metadata provided through the
354# config drive.
355# (string value)
356#injected_network_template = $pybasedir/nova/virt/interfaces.template
357
358#
359# The image preallocation mode to use.
360#
361# Image preallocation allows storage for instance images to be
362# allocated up front
363# when the instance is initially provisioned. This ensures immediate
364# feedback is
365# given if enough space isn't available. In addition, it should
366# significantly
367# improve performance on writes to new blocks and may even improve I/O
368# performance to prewritten blocks due to reduced fragmentation.
369#
370# Possible values:
371#
372# * "none" => no storage provisioning is done up front
373# * "space" => storage is fully allocated at instance start
374# (string value)
375# Possible values:
376# none - <No description provided>
377# space - <No description provided>
378#preallocate_images = none
379
380#
381# Enable use of copy-on-write (cow) images.
382#
383# QEMU/KVM allow the use of qcow2 as backing files. By disabling this,
384# backing files will not be used.
385# (boolean value)
386#use_cow_images = true
387
388#
389# Force conversion of backing images to raw format.
390#
391# Possible values:
392#
393# * True: Backing image files will be converted to raw image format
394# * False: Backing image files will not be converted
395#
396# Related options:
397#
398# * ``compute_driver``: Only the libvirt driver uses this option.
399# (boolean value)
400#force_raw_images = true
401
402#
403# Name of the mkfs commands for ephemeral device.
404#
405# The format is <os_type>=<mkfs command>
406# (multi valued)
407#virt_mkfs =
408
409#
410# Enable resizing of filesystems via a block device.
411#
412# If enabled, attempt to resize the filesystem by accessing the image
413# over a
414# block device. This is done by the host and may not be necessary if
415# the image
416# contains a recent version of cloud-init. Possible mechanisms require
417# the nbd
418# driver (for qcow and raw), or loop (for raw).
419# (boolean value)
420#resize_fs_using_block_device = false
421
422# Amount of time, in seconds, to wait for NBD device start up.
423# (integer value)
424# Minimum value: 0
425#timeout_nbd = 10
Mykyta Karpin5ef9f982019-02-07 18:40:00 +0200426{%- if controller.timeout_nbd is defined %}
427timeout_nbd = {{ controller.timeout_nbd }}
428{%- endif %}
Oleh Hryhorov5d0f13a2018-05-21 17:02:54 +0000429
430#
431# Location of cached images.
432#
433# This is NOT the full path - just a folder name relative to
434# '$instances_path'.
435# For per-compute-host cached images, set to '_base_$my_ip'
436# (string value)
437#image_cache_subdirectory_name = _base
438
439# Should unused base images be removed? (boolean value)
440#remove_unused_base_images = true
441
442#
443# Unused unresized base images younger than this will not be removed.
444# (integer value)
445remove_unused_original_minimum_age_seconds = 86400
446
447#
448# Generic property to specify the pointer type.
449#
450# Input devices allow interaction with a graphical framebuffer. For
451# example to provide a graphic tablet for absolute cursor movement.
452#
453# If set, the 'hw_pointer_model' image property takes precedence over
454# this configuration option.
455#
456# Possible values:
457#
458# * None: Uses default behavior provided by drivers (mouse on PS2 for
459# libvirt x86)
460# * ps2mouse: Uses relative movement. Mouse connected by PS2
461# * usbtablet: Uses absolute movement. Tablet connect by USB
462#
463# Related options:
464#
465# * usbtablet must be configured with VNC enabled or SPICE enabled and
466# SPICE
467# agent disabled. When used with libvirt the instance mode should be
468# configured as HVM.
469# (string value)
470# Possible values:
471# <None> - <No description provided>
472# ps2mouse - <No description provided>
473# usbtablet - <No description provided>
474#pointer_model = usbtablet
475
476#
477# Defines which physical CPUs (pCPUs) can be used by instance
478# virtual CPUs (vCPUs).
479#
480# Possible values:
481#
482# * A comma-separated list of physical CPU numbers that virtual CPUs
483# can be
484# allocated to by default. Each element should be either a single
485# CPU number,
486# a range of CPU numbers, or a caret followed by a CPU number to be
487# excluded from a previous range. For example:
488#
489# vcpu_pin_set = "4-12,^8,15"
490# (string value)
491#vcpu_pin_set = <None>
492
493#
494# Number of huge/large memory pages to reserved per NUMA host cell.
495#
496# Possible values:
497#
498# * A list of valid key=value which reflect NUMA node ID, page size
499# (Default unit is KiB) and number of pages to be reserved.
500#
501# reserved_huge_pages = node:0,size:2048,count:64
502# reserved_huge_pages = node:1,size:1GB,count:1
503#
504# In this example we are reserving on NUMA node 0 64 pages of 2MiB
505# and on NUMA node 1 1 page of 1GiB.
506# (dict value)
507#reserved_huge_pages = <None>
508
509#
510# Amount of disk resources in MB to make them always available to
511# host. The
512# disk usage gets reported back to the scheduler from nova-compute
513# running
514# on the compute nodes. To prevent the disk resources from being
515# considered
516# as available, this option can be used to reserve disk space for that
517# host.
518#
519# Possible values:
520#
521# * Any positive integer representing amount of disk in MB to reserve
522# for the host.
523# (integer value)
524# Minimum value: 0
525#reserved_host_disk_mb = 0
526
527#
528# Amount of memory in MB to reserve for the host so that it is always
529# available
530# to host processes. The host resources usage is reported back to the
531# scheduler
532# continuously from nova-compute running on the compute node. To
533# prevent the host
534# memory from being considered as available, this option is used to
535# reserve
536# memory for the host.
537#
538# Possible values:
539#
540# * Any positive integer representing amount of memory in MB to
541# reserve
542# for the host.
543# (integer value)
544# Minimum value: 0
545#reserved_host_memory_mb = 512
546
547#
548# Number of physical CPUs to reserve for the host. The host resources
549# usage is
550# reported back to the scheduler continuously from nova-compute
551# running on the
552# compute node. To prevent the host CPU from being considered as
553# available,
554# this option is used to reserve random pCPU(s) for the host.
555#
556# Possible values:
557#
558# * Any positive integer representing number of physical CPUs to
559# reserve
560# for the host.
561# (integer value)
562# Minimum value: 0
563#reserved_host_cpus = 0
564
565#
566# This option helps you specify virtual CPU to physical CPU allocation
567# ratio.
568#
569# From Ocata (15.0.0) this is used to influence the hosts selected by
570# the Placement API. Note that when Placement is used, the CoreFilter
571# is redundant, because the Placement API will have already filtered
572# out hosts that would have failed the CoreFilter.
573#
574# This configuration specifies ratio for CoreFilter which can be set
575# per compute node. For AggregateCoreFilter, it will fall back to this
576# configuration value if no per-aggregate setting is found.
577#
578# NOTE: This can be set per-compute, or if set to 0.0, the value
579# set on the scheduler node(s) or compute node(s) will be used
580# and defaulted to 16.0.
581#
582# NOTE: As of the 16.0.0 Pike release, this configuration option is
583# ignored
584# for the ironic.IronicDriver compute driver and is hardcoded to 1.0.
585#
586# Possible values:
587#
588# * Any valid positive integer or float value
589# (floating point value)
590# Minimum value: 0
591#cpu_allocation_ratio = 0.0
592{%- if controller.cpu_allocation_ratio is defined %}
593cpu_allocation_ratio = {{ controller.cpu_allocation_ratio }}
594{%- else %}
595#cpu_allocation_ratio=0.0
596{%- endif %}
597
598#
599# This option helps you specify virtual RAM to physical RAM
600# allocation ratio.
601#
602# From Ocata (15.0.0) this is used to influence the hosts selected by
603# the Placement API. Note that when Placement is used, the RamFilter
604# is redundant, because the Placement API will have already filtered
605# out hosts that would have failed the RamFilter.
606#
607# This configuration specifies ratio for RamFilter which can be set
608# per compute node. For AggregateRamFilter, it will fall back to this
609# configuration value if no per-aggregate setting found.
610#
611# NOTE: This can be set per-compute, or if set to 0.0, the value
612# set on the scheduler node(s) or compute node(s) will be used and
613# defaulted to 1.5.
614#
615# NOTE: As of the 16.0.0 Pike release, this configuration option is
616# ignored
617# for the ironic.IronicDriver compute driver and is hardcoded to 1.0.
618#
619# Possible values:
620#
621# * Any valid positive integer or float value
622# (floating point value)
623# Minimum value: 0
624#ram_allocation_ratio = 0.0
625{%- if controller.ram_allocation_ratio is defined %}
626ram_allocation_ratio = {{ controller.ram_allocation_ratio }}
627{%- else %}
628#ram_allocation_ratio=0.0
629{%- endif %}
630
631#
632# This option helps you specify virtual disk to physical disk
633# allocation ratio.
634#
635# From Ocata (15.0.0) this is used to influence the hosts selected by
636# the Placement API. Note that when Placement is used, the DiskFilter
637# is redundant, because the Placement API will have already filtered
638# out hosts that would have failed the DiskFilter.
639#
640# A ratio greater than 1.0 will result in over-subscription of the
641# available physical disk, which can be useful for more
642# efficiently packing instances created with images that do not
643# use the entire virtual disk, such as sparse or compressed
644# images. It can be set to a value between 0.0 and 1.0 in order
645# to preserve a percentage of the disk for uses other than
646# instances.
647#
648# NOTE: This can be set per-compute, or if set to 0.0, the value
649# set on the scheduler node(s) or compute node(s) will be used and
650# defaulted to 1.0.
651#
652# NOTE: As of the 16.0.0 Pike release, this configuration option is
653# ignored
654# for the ironic.IronicDriver compute driver and is hardcoded to 1.0.
655#
656# Possible values:
657#
658# * Any valid positive integer or float value
659# (floating point value)
660# Minimum value: 0
661#disk_allocation_ratio = 0.0
662
663#
664# Console proxy host to be used to connect to instances on this host.
665# It is the
666# publicly visible name for the console host.
667#
668# Possible values:
669#
670# * Current hostname (default) or any string representing hostname.
671# (string value)
672#console_host = <current_hostname>
673
674#
675# Name of the network to be used to set access IPs for instances. If
676# there are
677# multiple IPs to choose from, an arbitrary one will be chosen.
678#
679# Possible values:
680#
681# * None (default)
682# * Any string representing network name.
683# (string value)
684#default_access_ip_network_name = <None>
685
686#
687# Whether to batch up the application of IPTables rules during a host
688# restart
689# and apply all at the end of the init phase.
690# (boolean value)
691#defer_iptables_apply = false
692
693#
694# Specifies where instances are stored on the hypervisor's disk.
695# It can point to locally attached storage or a directory on NFS.
696#
697# Possible values:
698#
699# * $state_path/instances where state_path is a config option that
700# specifies
701# the top-level directory for maintaining nova's state. (default) or
702# Any string representing directory path.
703# (string value)
Michael Polenchuk4c55a202018-06-15 15:13:28 +0400704#instances_path =
Oleh Hryhorov5d0f13a2018-05-21 17:02:54 +0000705
706#
707# This option enables periodic compute.instance.exists notifications.
708# Each
709# compute node must be configured to generate system usage data. These
710# notifications are consumed by OpenStack Telemetry service.
711# (boolean value)
712#instance_usage_audit = false
713{% if controller.instance_usage_audit is defined %}
714instance_usage_audit = {{ controller.instance_usage_audit }}
715{%- endif %}
716
717#
718# Maximum number of 1 second retries in live_migration. It specifies
719# number
720# of retries to iptables when it complains. It happens when an user
721# continuously
722# sends live-migration request to same host leading to concurrent
723# request
724# to iptables.
725#
726# Possible values:
727#
728# * Any positive integer representing retry count.
729# (integer value)
730# Minimum value: 0
731#live_migration_retry_count = 30
732
733#
734# This option specifies whether to start guests that were running
735# before the
736# host rebooted. It ensures that all of the instances on a Nova
737# compute node
738# resume their state each time the compute node boots or restarts.
739# (boolean value)
740#resume_guests_state_on_host_boot = {{ controller.get('resume_guests_state_on_host_boot', True) }}
741
742#
743# Number of times to retry network allocation. It is required to
744# attempt network
745# allocation retries if the virtual interface plug fails.
746#
747# Possible values:
748#
749# * Any positive integer representing retry count.
750# (integer value)
751# Minimum value: 0
752#network_allocate_retries = 0
753
754#
755# Limits the maximum number of instance builds to run concurrently by
756# nova-compute. Compute service can attempt to build an infinite
757# number of
758# instances, if asked to do so. This limit is enforced to avoid
759# building
760# unlimited instance concurrently on a compute node. This value can be
761# set
762# per compute node.
763#
764# Possible Values:
765#
766# * 0 : treated as unlimited.
767# * Any positive integer representing maximum concurrent builds.
768# (integer value)
769# Minimum value: 0
770#max_concurrent_builds = 10
771
772#
773# Maximum number of live migrations to run concurrently. This limit is
774# enforced
775# to avoid outbound live migrations overwhelming the host/network and
776# causing
777# failures. It is not recommended that you change this unless you are
778# very sure
779# that doing so is safe and stable in your environment.
780#
781# Possible values:
782#
783# * 0 : treated as unlimited.
784# * Negative value defaults to 0.
785# * Any positive integer representing maximum number of live
786# migrations
787# to run concurrently.
788# (integer value)
789#max_concurrent_live_migrations = 1
790
791#
792# Number of times to retry block device allocation on failures.
793# Starting with
794# Liberty, Cinder can use image volume cache. This may help with block
795# device
796# allocation performance. Look at the cinder
797# image_volume_cache_enabled
798# configuration option.
799#
800# Possible values:
801#
802# * 60 (default)
803# * If value is 0, then one attempt is made.
804# * Any negative value is treated as 0.
805# * For any value > 0, total attempts are (value + 1)
806# (integer value)
807block_device_allocate_retries = {{ controller.get('block_device_allocate_retries', '600') }}
808
809#
810# Number of greenthreads available for use to sync power states.
811#
812# This option can be used to reduce the number of concurrent requests
813# made to the hypervisor or system with real instance power states
814# for performance reasons, for example, with Ironic.
815#
816# Possible values:
817#
818# * Any positive integer representing greenthreads count.
819# (integer value)
820#sync_power_state_pool_size = 1000
821
822#
823# Number of seconds to wait between runs of the image cache manager.
824#
825# Possible values:
826# * 0: run at the default rate.
827# * -1: disable
828# * Any other value
829# (integer value)
830# Minimum value: -1
831# image_cache_manager_interval = 0
832
833#
834# Interval to pull network bandwidth usage info.
835#
836# Not supported on all hypervisors. If a hypervisor doesn't support
837# bandwidth
838# usage, it will not get the info in the usage events.
839#
840# Possible values:
841#
842# * 0: Will run at the default periodic interval.
843# * Any value < 0: Disables the option.
844# * Any positive integer in seconds.
845# (integer value)
846#bandwidth_poll_interval = 600
847
848#
849# Interval to sync power states between the database and the
850# hypervisor.
851#
852# The interval that Nova checks the actual virtual machine power state
853# and the power state that Nova has in its database. If a user powers
854# down their VM, Nova updates the API to report the VM has been
855# powered down. Should something turn on the VM unexpectedly,
856# Nova will turn the VM back off to keep the system in the expected
857# state.
858#
859# Possible values:
860#
861# * 0: Will run at the default periodic interval.
862# * Any value < 0: Disables the option.
863# * Any positive integer in seconds.
864#
865# Related options:
866#
867# * If ``handle_virt_lifecycle_events`` in workarounds_group is
868# false and this option is negative, then instances that get out
869# of sync between the hypervisor and the Nova database will have
870# to be synchronized manually.
871# (integer value)
872#sync_power_state_interval = 600
873
874#
875# Interval between instance network information cache updates.
876#
877# Number of seconds after which each compute node runs the task of
878# querying Neutron for all of its instances networking information,
879# then updates the Nova db with that information. Nova will never
880# update it's cache if this option is set to 0. If we don't update the
881# cache, the metadata service and nova-api endpoints will be proxying
882# incorrect network data about the instance. So, it is not recommended
883# to set this option to 0.
884#
885# Possible values:
886#
887# * Any positive integer in seconds.
888# * Any value <=0 will disable the sync. This is not recommended.
889# (integer value)
890#heal_instance_info_cache_interval = 60
Mykyta Karpin5ef9f982019-02-07 18:40:00 +0200891{%- if controller.heal_instance_info_cache_interval is defined %}
892heal_instance_info_cache_interval = {{ controller.heal_instance_info_cache_interval }}
893{%- endif %}
Oleh Hryhorov5d0f13a2018-05-21 17:02:54 +0000894
895#
896# Interval for reclaiming deleted instances.
897#
898# A value greater than 0 will enable SOFT_DELETE of instances.
899# This option decides whether the server to be deleted will be put
900# into
901# the SOFT_DELETED state. If this value is greater than 0, the deleted
902# server will not be deleted immediately, instead it will be put into
903# a queue until it's too old (deleted time greater than the value of
904# reclaim_instance_interval). The server can be recovered from the
905# delete queue by using the restore action. If the deleted server
906# remains
907# longer than the value of reclaim_instance_interval, it will be
908# deleted by a periodic task in the compute service automatically.
909#
910# Note that this option is read from both the API and compute nodes,
911# and
912# must be set globally otherwise servers could be put into a soft
913# deleted
914# state in the API and never actually reclaimed (deleted) on the
915# compute
916# node.
917#
918# Possible values:
919#
920# * Any positive integer(in seconds) greater than 0 will enable
921# this option.
922# * Any value <=0 will disable the option.
923# (integer value)
924#reclaim_instance_interval = 0
Machi Hoshinoe2b3cb32018-12-12 10:14:23 +0800925{%- if controller.reclaim_instance_interval is defined %}
926reclaim_instance_interval = {{ controller.reclaim_instance_interval }}
927{%- endif %}
Oleh Hryhorov5d0f13a2018-05-21 17:02:54 +0000928
929#
930# Interval for gathering volume usages.
931#
932# This option updates the volume usage cache for every
933# volume_usage_poll_interval number of seconds.
934#
935# Possible values:
936#
937# * Any positive integer(in seconds) greater than 0 will enable
938# this option.
939# * Any value <=0 will disable the option.
940# (integer value)
941#volume_usage_poll_interval = 0
942
943#
944# Interval for polling shelved instances to offload.
945#
946# The periodic task runs for every shelved_poll_interval number
947# of seconds and checks if there are any shelved instances. If it
948# finds a shelved instance, based on the 'shelved_offload_time' config
949# value it offloads the shelved instances. Check
950# 'shelved_offload_time'
951# config option description for details.
952#
953# Possible values:
954#
955# * Any value <= 0: Disables the option.
956# * Any positive integer in seconds.
957#
958# Related options:
959#
960# * ``shelved_offload_time``
961# (integer value)
962#shelved_poll_interval = 3600
963
964#
965# Time before a shelved instance is eligible for removal from a host.
966#
967# By default this option is set to 0 and the shelved instance will be
968# removed from the hypervisor immediately after shelve operation.
969# Otherwise, the instance will be kept for the value of
970# shelved_offload_time(in seconds) so that during the time period the
971# unshelve action will be faster, then the periodic task will remove
972# the instance from hypervisor after shelved_offload_time passes.
973#
974# Possible values:
975#
976# * 0: Instance will be immediately offloaded after being
977# shelved.
978# * Any value < 0: An instance will never offload.
979# * Any positive integer in seconds: The instance will exist for
980# the specified number of seconds before being offloaded.
981# (integer value)
982#shelved_offload_time = 0
983
984#
985# Interval for retrying failed instance file deletes.
986#
987# This option depends on 'maximum_instance_delete_attempts'.
988# This option specifies how often to retry deletes whereas
989# 'maximum_instance_delete_attempts' specifies the maximum number
990# of retry attempts that can be made.
991#
992# Possible values:
993#
994# * 0: Will run at the default periodic interval.
995# * Any value < 0: Disables the option.
996# * Any positive integer in seconds.
997#
998# Related options:
999#
1000# * ``maximum_instance_delete_attempts`` from instance_cleaning_opts
1001# group.
1002# (integer value)
1003#instance_delete_interval = 300
1004
1005#
1006# Interval (in seconds) between block device allocation retries on
1007# failures.
1008#
1009# This option allows the user to specify the time interval between
1010# consecutive retries. 'block_device_allocate_retries' option
1011# specifies
1012# the maximum number of retries.
1013#
1014# Possible values:
1015#
1016# * 0: Disables the option.
1017# * Any positive integer in seconds enables the option.
1018#
1019# Related options:
1020#
1021# * ``block_device_allocate_retries`` in compute_manager_opts group.
1022# (integer value)
1023# Minimum value: 0
1024block_device_allocate_retries_interval = {{ controller.get('block_device_allocate_retries_interval', '10') }}
1025
1026#
1027# Interval between sending the scheduler a list of current instance
1028# UUIDs to
1029# verify that its view of instances is in sync with nova.
1030#
1031# If the CONF option 'scheduler_tracks_instance_changes' is
1032# False, the sync calls will not be made. So, changing this option
1033# will
1034# have no effect.
1035#
1036# If the out of sync situations are not very common, this interval
1037# can be increased to lower the number of RPC messages being sent.
1038# Likewise, if sync issues turn out to be a problem, the interval
1039# can be lowered to check more frequently.
1040#
1041# Possible values:
1042#
1043# * 0: Will run at the default periodic interval.
1044# * Any value < 0: Disables the option.
1045# * Any positive integer in seconds.
1046#
1047# Related options:
1048#
1049# * This option has no impact if ``scheduler_tracks_instance_changes``
1050# is set to False.
1051# (integer value)
1052#scheduler_instance_sync_interval = 120
1053
1054#
1055# Interval for updating compute resources.
1056#
1057# This option specifies how often the update_available_resources
1058# periodic task should run. A number less than 0 means to disable the
1059# task completely. Leaving this at the default of 0 will cause this to
1060# run at the default periodic interval. Setting it to any positive
1061# value will cause it to run at approximately that number of seconds.
1062#
1063# Possible values:
1064#
1065# * 0: Will run at the default periodic interval.
1066# * Any value < 0: Disables the option.
1067# * Any positive integer in seconds.
1068# (integer value)
1069#update_resources_interval = 0
1070
1071#
1072# Time interval after which an instance is hard rebooted
1073# automatically.
1074#
1075# When doing a soft reboot, it is possible that a guest kernel is
1076# completely hung in a way that causes the soft reboot task
1077# to not ever finish. Setting this option to a time period in seconds
1078# will automatically hard reboot an instance if it has been stuck
1079# in a rebooting state longer than N seconds.
1080#
1081# Possible values:
1082#
1083# * 0: Disables the option (default).
1084# * Any positive integer in seconds: Enables the option.
1085# (integer value)
1086# Minimum value: 0
1087#reboot_timeout = 0
1088
1089#
1090# Maximum time in seconds that an instance can take to build.
1091#
1092# If this timer expires, instance status will be changed to ERROR.
1093# Enabling this option will make sure an instance will not be stuck
1094# in BUILD state for a longer period.
1095#
1096# Possible values:
1097#
1098# * 0: Disables the option (default)
1099# * Any positive integer in seconds: Enables the option.
1100# (integer value)
1101# Minimum value: 0
1102#instance_build_timeout = 0
1103
1104#
1105# Interval to wait before un-rescuing an instance stuck in RESCUE.
1106#
1107# Possible values:
1108#
1109# * 0: Disables the option (default)
1110# * Any positive integer in seconds: Enables the option.
1111# (integer value)
1112# Minimum value: 0
1113#rescue_timeout = 0
1114
1115#
1116# Automatically confirm resizes after N seconds.
1117#
1118# Resize functionality will save the existing server before resizing.
1119# After the resize completes, user is requested to confirm the resize.
1120# The user has the opportunity to either confirm or revert all
1121# changes. Confirm resize removes the original server and changes
1122# server status from resized to active. Setting this option to a time
1123# period (in seconds) will automatically confirm the resize if the
1124# server is in resized state longer than that time.
1125#
1126# Possible values:
1127#
1128# * 0: Disables the option (default)
1129# * Any positive integer in seconds: Enables the option.
1130# (integer value)
1131# Minimum value: 0
1132#resize_confirm_window = 0
1133
1134#
1135# Total time to wait in seconds for an instance toperform a clean
1136# shutdown.
1137#
1138# It determines the overall period (in seconds) a VM is allowed to
1139# perform a clean shutdown. While performing stop, rescue and shelve,
1140# rebuild operations, configuring this option gives the VM a chance
1141# to perform a controlled shutdown before the instance is powered off.
1142# The default timeout is 60 seconds.
1143#
1144# The timeout value can be overridden on a per image basis by means
1145# of os_shutdown_timeout that is an image metadata setting allowing
1146# different types of operating systems to specify how much time they
1147# need to shut down cleanly.
1148#
1149# Possible values:
1150#
1151# * Any positive integer in seconds (default value is 60).
1152# (integer value)
1153# Minimum value: 1
1154#shutdown_timeout = 60
1155
1156#
1157# The compute service periodically checks for instances that have been
1158# deleted in the database but remain running on the compute node. The
1159# above option enables action to be taken when such instances are
1160# identified.
1161#
1162# Possible values:
1163#
1164# * reap: Powers down the instances and deletes them(default)
1165# * log: Logs warning message about deletion of the resource
1166# * shutdown: Powers down instances and marks them as non-
1167# bootable which can be later used for debugging/analysis
1168# * noop: Takes no action
1169#
1170# Related options:
1171#
1172# * running_deleted_instance_poll_interval
1173# * running_deleted_instance_timeout
1174# (string value)
1175# Possible values:
1176# noop - <No description provided>
1177# log - <No description provided>
1178# shutdown - <No description provided>
1179# reap - <No description provided>
1180#running_deleted_instance_action = reap
1181
1182#
1183# Time interval in seconds to wait between runs for the clean up
1184# action.
1185# If set to 0, above check will be disabled. If
1186# "running_deleted_instance
1187# _action" is set to "log" or "reap", a value greater than 0 must be
1188# set.
1189#
1190# Possible values:
1191#
1192# * Any positive integer in seconds enables the option.
1193# * 0: Disables the option.
1194# * 1800: Default value.
1195#
1196# Related options:
1197#
1198# * running_deleted_instance_action
1199# (integer value)
1200#running_deleted_instance_poll_interval = 1800
1201
1202#
1203# Time interval in seconds to wait for the instances that have
1204# been marked as deleted in database to be eligible for cleanup.
1205#
1206# Possible values:
1207#
1208# * Any positive integer in seconds(default is 0).
1209#
1210# Related options:
1211#
1212# * "running_deleted_instance_action"
1213# (integer value)
1214#running_deleted_instance_timeout = 0
1215
1216#
1217# The number of times to attempt to reap an instance's files.
1218#
1219# This option specifies the maximum number of retry attempts
1220# that can be made.
1221#
1222# Possible values:
1223#
1224# * Any positive integer defines how many attempts are made.
1225# * Any value <=0 means no delete attempts occur, but you should use
1226# ``instance_delete_interval`` to disable the delete attempts.
1227#
1228# Related options:
1229# * ``instance_delete_interval`` in interval_opts group can be used to
1230# disable
1231# this option.
1232# (integer value)
1233#maximum_instance_delete_attempts = 5
1234
1235#
1236# Sets the scope of the check for unique instance names.
1237#
1238# The default doesn't check for unique names. If a scope for the name
1239# check is
1240# set, a launch of a new instance or an update of an existing instance
1241# with a
1242# duplicate name will result in an ''InstanceExists'' error. The
1243# uniqueness is
1244# case-insensitive. Setting this option can increase the usability for
1245# end
1246# users as they don't have to distinguish among instances with the
1247# same name
1248# by their IDs.
1249#
1250# Possible values:
1251#
1252# * '': An empty value means that no uniqueness check is done and
1253# duplicate
1254# names are possible.
1255# * "project": The instance name check is done only for instances
1256# within the
1257# same project.
1258# * "global": The instance name check is done for all instances
1259# regardless of
1260# the project.
1261# (string value)
1262# Possible values:
1263# '' - <No description provided>
1264# project - <No description provided>
1265# global - <No description provided>
1266#osapi_compute_unique_server_name_scope =
1267
1268#
1269# Enable new nova-compute services on this host automatically.
1270#
1271# When a new nova-compute service starts up, it gets
1272# registered in the database as an enabled service. Sometimes it can
1273# be useful
1274# to register new compute services in disabled state and then enabled
1275# them at a
1276# later point in time. This option only sets this behavior for nova-
1277# compute
1278# services, it does not auto-disable other services like nova-
1279# conductor,
1280# nova-scheduler, nova-consoleauth, or nova-osapi_compute.
1281#
1282# Possible values:
1283#
1284# * ``True``: Each new compute service is enabled as soon as it
1285# registers itself.
1286# * ``False``: Compute services must be enabled via an os-services
1287# REST API call
1288# or with the CLI with ``nova service-enable <hostname> <binary>``,
1289# otherwise
1290# they are not ready to use.
1291# (boolean value)
1292#enable_new_services = true
1293
1294#
1295# Template string to be used to generate instance names.
1296#
1297# This template controls the creation of the database name of an
1298# instance. This
1299# is *not* the display name you enter when creating an instance (via
1300# Horizon
1301# or CLI). For a new deployment it is advisable to change the default
1302# value
1303# (which uses the database autoincrement) to another value which makes
1304# use
1305# of the attributes of an instance, like ``instance-%(uuid)s``. If you
1306# already have instances in your deployment when you change this, your
1307# deployment will break.
1308#
1309# Possible values:
1310#
1311# * A string which either uses the instance database ID (like the
1312# default)
1313# * A string with a list of named database columns, for example
1314# ``%(id)d``
1315# or ``%(uuid)s`` or ``%(hostname)s``.
1316#
1317# Related options:
1318#
1319# * not to be confused with: ``multi_instance_display_name_template``
1320# (string value)
1321#instance_name_template = instance-%08x
1322
1323#
1324# Number of times to retry live-migration before failing.
1325#
1326# Possible values:
1327#
1328# * If == -1, try until out of hosts (default)
1329# * If == 0, only try once, no retries
1330# * Integer greater than 0
1331# (integer value)
1332# Minimum value: -1
1333#migrate_max_retries = -1
1334
1335#
1336# Configuration drive format
1337#
1338# Configuration drive format that will contain metadata attached to
1339# the
1340# instance when it boots.
1341#
1342# Possible values:
1343#
1344# * iso9660: A file system image standard that is widely supported
1345# across
1346# operating systems. NOTE: Mind the libvirt bug
1347# (https://bugs.launchpad.net/nova/+bug/1246201) - If your
1348# hypervisor
1349# driver is libvirt, and you want live migrate to work without
1350# shared storage,
1351# then use VFAT.
1352# * vfat: For legacy reasons, you can configure the configuration
1353# drive to
1354# use VFAT format instead of ISO 9660.
1355#
1356# Related options:
1357#
1358# * This option is meaningful when one of the following alternatives
1359# occur:
1360# 1. force_config_drive option set to 'true'
1361# 2. the REST API call to create the instance contains an enable
1362# flag for
1363# config drive option
1364# 3. the image used to create the instance requires a config drive,
1365# this is defined by img_config_drive property for that image.
1366# * A compute node running Hyper-V hypervisor can be configured to
1367# attach
1368# configuration drive as a CD drive. To attach the configuration
1369# drive as a CD
1370# drive, set config_drive_cdrom option at hyperv section, to true.
1371# (string value)
1372# Possible values:
1373# iso9660 - <No description provided>
1374# vfat - <No description provided>
1375#config_drive_format = iso9660
1376
1377#
1378# Force injection to take place on a config drive
1379#
1380# When this option is set to true configuration drive functionality
1381# will be
1382# forced enabled by default, otherwise user can still enable
1383# configuration
1384# drives via the REST API or image metadata properties.
1385#
1386# Possible values:
1387#
1388# * True: Force to use of configuration drive regardless the user's
1389# input in the
1390# REST API call.
1391# * False: Do not force use of configuration drive. Config drives can
1392# still be
1393# enabled via the REST API or image metadata properties.
1394#
1395# Related options:
1396#
1397# * Use the 'mkisofs_cmd' flag to set the path where you install the
1398# genisoimage program. If genisoimage is in same path as the
1399# nova-compute service, you do not need to set this flag.
1400# * To use configuration drive with Hyper-V, you must set the
1401# 'mkisofs_cmd' value to the full path to an mkisofs.exe
1402# installation.
1403# Additionally, you must set the qemu_img_cmd value in the hyperv
1404# configuration section to the full path to an qemu-img command
1405# installation.
1406# (boolean value)
1407#force_config_drive = false
1408
1409#
1410# Name or path of the tool used for ISO image creation
1411#
1412# Use the mkisofs_cmd flag to set the path where you install the
1413# genisoimage
1414# program. If genisoimage is on the system path, you do not need to
1415# change
1416# the default value.
1417#
1418# To use configuration drive with Hyper-V, you must set the
1419# mkisofs_cmd value
1420# to the full path to an mkisofs.exe installation. Additionally, you
1421# must set
1422# the qemu_img_cmd value in the hyperv configuration section to the
1423# full path
1424# to an qemu-img command installation.
1425#
1426# Possible values:
1427#
1428# * Name of the ISO image creator program, in case it is in the same
1429# directory
1430# as the nova-compute service
1431# * Path to ISO image creator program
1432#
1433# Related options:
1434#
1435# * This option is meaningful when config drives are enabled.
1436# * To use configuration drive with Hyper-V, you must set the
1437# qemu_img_cmd
1438# value in the hyperv configuration section to the full path to an
1439# qemu-img
1440# command installation.
1441# (string value)
1442#mkisofs_cmd = genisoimage
1443
1444# DEPRECATED: The driver to use for database access (string value)
1445# This option is deprecated for removal since 13.0.0.
1446# Its value may be silently ignored in the future.
1447#db_driver = nova.db
1448
1449# DEPRECATED:
1450# Default flavor to use for the EC2 API only.
1451# The Nova API does not support a default flavor.
1452# (string value)
1453# This option is deprecated for removal since 14.0.0.
1454# Its value may be silently ignored in the future.
1455# Reason: The EC2 API is deprecated.
1456#default_flavor = m1.small
1457
1458#
1459# The IP address which the host is using to connect to the management
1460# network.
1461#
1462# Possible values:
1463#
1464# * String with valid IP address. Default is IPv4 address of this
1465# host.
1466#
1467# Related options:
1468#
1469# * metadata_host
1470# * my_block_storage_ip
1471# * routing_source_ip
1472# * vpn_ip
1473# (string value)
1474#my_ip = <host_ipv4>
1475my_ip={{ controller.bind.private_address }}
1476
1477#
1478# The IP address which is used to connect to the block storage
1479# network.
1480#
1481# Possible values:
1482#
1483# * String with valid IP address. Default is IP address of this host.
1484#
1485# Related options:
1486#
1487# * my_ip - if my_block_storage_ip is not set, then my_ip value is
1488# used.
1489# (string value)
1490#my_block_storage_ip = $my_ip
1491
1492#
1493# Hostname, FQDN or IP address of this host.
1494#
1495# Used as:
1496#
1497# * the oslo.messaging queue name for nova-compute worker
1498# * we use this value for the binding_host sent to neutron. This means
1499# if you use
1500# a neutron agent, it should have the same value for host.
1501# * cinder host attachment information
1502#
1503# Must be valid within AMQP key.
1504#
1505# Possible values:
1506#
1507# * String with hostname, FQDN or IP address. Default is hostname of
1508# this host.
1509# (string value)
1510#host = <current_hostname>
1511{%- if controller.host is defined %}
1512host={{ controller.host }}
1513{%- endif %}
1514
1515# DEPRECATED:
1516# This option is a list of full paths to one or more configuration
1517# files for
1518# dhcpbridge. In most cases the default path of '/etc/nova/nova-
1519# dhcpbridge.conf'
1520# should be sufficient, but if you have special needs for configuring
1521# dhcpbridge,
1522# you can change or add to this list.
1523#
1524# Possible values
1525#
1526# * A list of strings, where each string is the full path to a
1527# dhcpbridge
1528# configuration file.
1529# (multi valued)
1530# This option is deprecated for removal since 16.0.0.
1531# Its value may be silently ignored in the future.
1532# Reason:
1533# nova-network is deprecated, as are any related configuration
1534# options.
1535#dhcpbridge_flagfile = /etc/nova/nova.conf
1536
1537# DEPRECATED:
1538# The location where the network configuration files will be kept. The
1539# default is
1540# the 'networks' directory off of the location where nova's Python
1541# module is
1542# installed.
1543#
1544# Possible values
1545#
1546# * A string containing the full path to the desired configuration
1547# directory
1548# (string value)
1549# This option is deprecated for removal since 16.0.0.
1550# Its value may be silently ignored in the future.
1551# Reason:
1552# nova-network is deprecated, as are any related configuration
1553# options.
1554#networks_path = $state_path/networks
1555
1556# DEPRECATED:
1557# This is the name of the network interface for public IP addresses.
1558# The default
1559# is 'eth0'.
1560#
1561# Possible values:
1562#
1563# * Any string representing a network interface name
1564# (string value)
1565# This option is deprecated for removal since 16.0.0.
1566# Its value may be silently ignored in the future.
1567# Reason:
1568# nova-network is deprecated, as are any related configuration
1569# options.
1570#public_interface = eth0
1571
1572# DEPRECATED:
1573# The location of the binary nova-dhcpbridge. By default it is the
1574# binary named
1575# 'nova-dhcpbridge' that is installed with all the other nova
1576# binaries.
1577#
1578# Possible values:
1579#
1580# * Any string representing the full path to the binary for dhcpbridge
1581# (string value)
1582# This option is deprecated for removal since 16.0.0.
1583# Its value may be silently ignored in the future.
1584# Reason:
1585# nova-network is deprecated, as are any related configuration
1586# options.
1587#dhcpbridge = $bindir/nova-dhcpbridge
1588
1589# DEPRECATED:
1590# The public IP address of the network host.
1591#
1592# This is used when creating an SNAT rule.
1593#
1594# Possible values:
1595#
1596# * Any valid IP address
1597#
1598# Related options:
1599#
1600# * ``force_snat_range``
1601# (string value)
1602# This option is deprecated for removal since 16.0.0.
1603# Its value may be silently ignored in the future.
1604# Reason:
1605# nova-network is deprecated, as are any related configuration
1606# options.
1607#routing_source_ip = $my_ip
1608
1609# DEPRECATED:
1610# The lifetime of a DHCP lease, in seconds. The default is 86400 (one
1611# day).
1612#
1613# Possible values:
1614#
1615# * Any positive integer value.
1616# (integer value)
1617# Minimum value: 1
1618# This option is deprecated for removal since 16.0.0.
1619# Its value may be silently ignored in the future.
1620# Reason:
1621# nova-network is deprecated, as are any related configuration
1622# options.
1623#dhcp_lease_time = 86400
1624
1625# DEPRECATED:
1626# Despite the singular form of the name of this option, it is actually
1627# a list of
1628# zero or more server addresses that dnsmasq will use for DNS
1629# nameservers. If
1630# this is not empty, dnsmasq will not read /etc/resolv.conf, but will
1631# only use
1632# the servers specified in this option. If the option
1633# use_network_dns_servers is
1634# True, the dns1 and dns2 servers from the network will be appended to
1635# this list,
1636# and will be used as DNS servers, too.
1637#
1638# Possible values:
1639#
1640# * A list of strings, where each string is either an IP address or a
1641# FQDN.
1642#
1643# Related options:
1644#
1645# * ``use_network_dns_servers``
1646# (multi valued)
1647# This option is deprecated for removal since 16.0.0.
1648# Its value may be silently ignored in the future.
1649# Reason:
1650# nova-network is deprecated, as are any related configuration
1651# options.
1652#dns_server =
1653
1654# DEPRECATED:
1655# When this option is set to True, the dns1 and dns2 servers for the
1656# network
1657# specified by the user on boot will be used for DNS, as well as any
1658# specified in
1659# the `dns_server` option.
1660#
1661# Related options:
1662#
1663# * ``dns_server``
1664# (boolean value)
1665# This option is deprecated for removal since 16.0.0.
1666# Its value may be silently ignored in the future.
1667# Reason:
1668# nova-network is deprecated, as are any related configuration
1669# options.
1670#use_network_dns_servers = false
1671
1672# DEPRECATED:
1673# This option is a list of zero or more IP address ranges in your
1674# network's DMZ
1675# that should be accepted.
1676#
1677# Possible values:
1678#
1679# * A list of strings, each of which should be a valid CIDR.
1680# (list value)
1681# This option is deprecated for removal since 16.0.0.
1682# Its value may be silently ignored in the future.
1683# Reason:
1684# nova-network is deprecated, as are any related configuration
1685# options.
1686#dmz_cidr =
1687
1688# DEPRECATED:
1689# This is a list of zero or more IP ranges that traffic from the
1690# `routing_source_ip` will be SNATted to. If the list is empty, then
1691# no SNAT
1692# rules are created.
1693#
1694# Possible values:
1695#
1696# * A list of strings, each of which should be a valid CIDR.
1697#
1698# Related options:
1699#
1700# * ``routing_source_ip``
1701# (multi valued)
1702# This option is deprecated for removal since 16.0.0.
1703# Its value may be silently ignored in the future.
1704# Reason:
1705# nova-network is deprecated, as are any related configuration
1706# options.
1707#force_snat_range =
1708
1709# DEPRECATED:
1710# The path to the custom dnsmasq configuration file, if any.
1711#
1712# Possible values:
1713#
1714# * The full path to the configuration file, or an empty string if
1715# there is no
1716# custom dnsmasq configuration file.
1717# (string value)
1718# This option is deprecated for removal since 16.0.0.
1719# Its value may be silently ignored in the future.
1720# Reason:
1721# nova-network is deprecated, as are any related configuration
1722# options.
1723#dnsmasq_config_file =
1724
1725# DEPRECATED:
1726# This is the class used as the ethernet device driver for linuxnet
1727# bridge
1728# operations. The default value should be all you need for most cases,
1729# but if you
1730# wish to use a customized class, set this option to the full dot-
1731# separated
1732# import path for that class.
1733#
1734# Possible values:
1735#
1736# * Any string representing a dot-separated class path that Nova can
1737# import.
1738# (string value)
1739# This option is deprecated for removal since 16.0.0.
1740# Its value may be silently ignored in the future.
1741# Reason:
1742# nova-network is deprecated, as are any related configuration
1743# options.
1744#linuxnet_interface_driver = nova.network.linux_net.LinuxBridgeInterfaceDriver
1745
1746# DEPRECATED:
1747# The name of the Open vSwitch bridge that is used with linuxnet when
1748# connecting
1749# with Open vSwitch."
1750#
1751# Possible values:
1752#
1753# * Any string representing a valid bridge name.
1754# (string value)
1755# This option is deprecated for removal since 16.0.0.
1756# Its value may be silently ignored in the future.
1757# Reason:
1758# nova-network is deprecated, as are any related configuration
1759# options.
1760#linuxnet_ovs_integration_bridge = br-int
1761
1762#
1763# When True, when a device starts up, and upon binding floating IP
1764# addresses, arp
1765# messages will be sent to ensure that the arp caches on the compute
1766# hosts are
1767# up-to-date.
1768#
1769# Related options:
1770#
1771# * ``send_arp_for_ha_count``
1772# (boolean value)
1773#send_arp_for_ha = false
1774
1775#
1776# When arp messages are configured to be sent, they will be sent with
1777# the count
1778# set to the value of this option. Of course, if this is set to zero,
1779# no arp
1780# messages will be sent.
1781#
1782# Possible values:
1783#
1784# * Any integer greater than or equal to 0
1785#
1786# Related options:
1787#
1788# * ``send_arp_for_ha``
1789# (integer value)
1790#send_arp_for_ha_count = 3
1791
1792# DEPRECATED:
1793# When set to True, only the firt nic of a VM will get its default
1794# gateway from
1795# the DHCP server.
1796# (boolean value)
1797# This option is deprecated for removal since 16.0.0.
1798# Its value may be silently ignored in the future.
1799# Reason:
1800# nova-network is deprecated, as are any related configuration
1801# options.
1802#use_single_default_gateway = false
1803
1804# DEPRECATED:
1805# One or more interfaces that bridges can forward traffic to. If any
1806# of the items
1807# in this list is the special keyword 'all', then all traffic will be
1808# forwarded.
1809#
1810# Possible values:
1811#
1812# * A list of zero or more interface names, or the word 'all'.
1813# (multi valued)
1814# This option is deprecated for removal since 16.0.0.
1815# Its value may be silently ignored in the future.
1816# Reason:
1817# nova-network is deprecated, as are any related configuration
1818# options.
1819#forward_bridge_interface = all
1820
1821#
1822# This option determines the IP address for the network metadata API
1823# server.
1824#
1825# This is really the client side of the metadata host equation that
1826# allows
1827# nova-network to find the metadata server when doing a default multi
1828# host
1829# networking.
1830#
1831# Possible values:
1832#
1833# * Any valid IP address. The default is the address of the Nova API
1834# server.
1835#
1836# Related options:
1837#
1838# * ``metadata_port``
1839# (string value)
1840#metadata_host = $my_ip
1841
1842# DEPRECATED:
1843# This option determines the port used for the metadata API server.
1844#
1845# Related options:
1846#
1847# * ``metadata_host``
1848# (port value)
1849# Minimum value: 0
1850# Maximum value: 65535
1851# This option is deprecated for removal since 16.0.0.
1852# Its value may be silently ignored in the future.
1853# Reason:
1854# nova-network is deprecated, as are any related configuration
1855# options.
1856#metadata_port = 8775
1857
1858# DEPRECATED:
1859# This expression, if defined, will select any matching iptables rules
1860# and place
1861# them at the top when applying metadata changes to the rules.
1862#
1863# Possible values:
1864#
1865# * Any string representing a valid regular expression, or an empty
1866# string
1867#
1868# Related options:
1869#
1870# * ``iptables_bottom_regex``
1871# (string value)
1872# This option is deprecated for removal since 16.0.0.
1873# Its value may be silently ignored in the future.
1874# Reason:
1875# nova-network is deprecated, as are any related configuration
1876# options.
1877#iptables_top_regex =
1878
1879# DEPRECATED:
1880# This expression, if defined, will select any matching iptables rules
1881# and place
1882# them at the bottom when applying metadata changes to the rules.
1883#
1884# Possible values:
1885#
1886# * Any string representing a valid regular expression, or an empty
1887# string
1888#
1889# Related options:
1890#
1891# * iptables_top_regex
1892# (string value)
1893# This option is deprecated for removal since 16.0.0.
1894# Its value may be silently ignored in the future.
1895# Reason:
1896# nova-network is deprecated, as are any related configuration
1897# options.
1898#iptables_bottom_regex =
1899
1900# DEPRECATED:
1901# By default, packets that do not pass the firewall are DROPped. In
1902# many cases,
1903# though, an operator may find it more useful to change this from DROP
1904# to REJECT,
1905# so that the user issuing those packets may have a better idea as to
1906# what's
1907# going on, or LOGDROP in order to record the blocked traffic before
1908# DROPping.
1909#
1910# Possible values:
1911#
1912# * A string representing an iptables chain. The default is DROP.
1913# (string value)
1914# This option is deprecated for removal since 16.0.0.
1915# Its value may be silently ignored in the future.
1916# Reason:
1917# nova-network is deprecated, as are any related configuration
1918# options.
1919#iptables_drop_action = DROP
1920
1921# DEPRECATED:
1922# This option represents the period of time, in seconds, that the
1923# ovs_vsctl calls
1924# will wait for a response from the database before timing out. A
1925# setting of 0
1926# means that the utility should wait forever for a response.
1927#
1928# Possible values:
1929#
1930# * Any positive integer if a limited timeout is desired, or zero if
1931# the calls
1932# should wait forever for a response.
1933# (integer value)
1934# Minimum value: 0
1935# This option is deprecated for removal since 16.0.0.
1936# Its value may be silently ignored in the future.
1937# Reason:
1938# nova-network is deprecated, as are any related configuration
1939# options.
1940#ovs_vsctl_timeout = 120
1941
1942# DEPRECATED:
1943# This option is used mainly in testing to avoid calls to the
1944# underlying network
1945# utilities.
1946# (boolean value)
1947# This option is deprecated for removal since 16.0.0.
1948# Its value may be silently ignored in the future.
1949# Reason:
1950# nova-network is deprecated, as are any related configuration
1951# options.
1952#fake_network = false
1953
1954# DEPRECATED:
1955# This option determines the number of times to retry ebtables
1956# commands before
1957# giving up. The minimum number of retries is 1.
1958#
1959# Possible values:
1960#
1961# * Any positive integer
1962#
1963# Related options:
1964#
1965# * ``ebtables_retry_interval``
1966# (integer value)
1967# Minimum value: 1
1968# This option is deprecated for removal since 16.0.0.
1969# Its value may be silently ignored in the future.
1970# Reason:
1971# nova-network is deprecated, as are any related configuration
1972# options.
1973#ebtables_exec_attempts = 3
1974
1975# DEPRECATED:
1976# This option determines the time, in seconds, that the system will
1977# sleep in
1978# between ebtables retries. Note that each successive retry waits a
1979# multiple of
1980# this value, so for example, if this is set to the default of 1.0
1981# seconds, and
1982# ebtables_exec_attempts is 4, after the first failure, the system
1983# will sleep for
1984# 1 * 1.0 seconds, after the second failure it will sleep 2 * 1.0
1985# seconds, and
1986# after the third failure it will sleep 3 * 1.0 seconds.
1987#
1988# Possible values:
1989#
1990# * Any non-negative float or integer. Setting this to zero will
1991# result in no
1992# waiting between attempts.
1993#
1994# Related options:
1995#
1996# * ebtables_exec_attempts
1997# (floating point value)
1998# This option is deprecated for removal since 16.0.0.
1999# Its value may be silently ignored in the future.
2000# Reason:
2001# nova-network is deprecated, as are any related configuration
2002# options.
2003#ebtables_retry_interval = 1.0
2004
2005# DEPRECATED:
2006# Enable neutron as the backend for networking.
2007#
2008# Determine whether to use Neutron or Nova Network as the back end.
2009# Set to true
2010# to use neutron.
2011# (boolean value)
2012# This option is deprecated for removal since 15.0.0.
2013# Its value may be silently ignored in the future.
2014# Reason:
2015# nova-network is deprecated, as are any related configuration
2016# options.
2017use_neutron = true
2018
2019#
2020# This option determines whether the network setup information is
2021# injected into
2022# the VM before it is booted. While it was originally designed to be
2023# used only
2024# by nova-network, it is also used by the vmware and xenapi virt
2025# drivers to
2026# control whether network information is injected into a VM. The
2027# libvirt virt
2028# driver also uses it when we use config_drive to configure network to
2029# control
2030# whether network information is injected into a VM.
2031# (boolean value)
2032#flat_injected = false
2033
2034# DEPRECATED:
2035# This option determines the bridge used for simple network interfaces
2036# when no
2037# bridge is specified in the VM creation request.
2038#
2039# Please note that this option is only used when using nova-network
2040# instead of
2041# Neutron in your deployment.
2042#
2043# Possible values:
2044#
2045# * Any string representing a valid network bridge, such as 'br100'
2046#
2047# Related options:
2048#
2049# * ``use_neutron``
2050# (string value)
2051# This option is deprecated for removal since 15.0.0.
2052# Its value may be silently ignored in the future.
2053# Reason:
2054# nova-network is deprecated, as are any related configuration
2055# options.
2056#flat_network_bridge = <None>
2057
2058# DEPRECATED:
2059# This is the address of the DNS server for a simple network. If this
2060# option is
2061# not specified, the default of '8.8.4.4' is used.
2062#
2063# Please note that this option is only used when using nova-network
2064# instead of
2065# Neutron in your deployment.
2066#
2067# Possible values:
2068#
2069# * Any valid IP address.
2070#
2071# Related options:
2072#
2073# * ``use_neutron``
2074# (string value)
2075# This option is deprecated for removal since 15.0.0.
2076# Its value may be silently ignored in the future.
2077# Reason:
2078# nova-network is deprecated, as are any related configuration
2079# options.
2080#flat_network_dns = 8.8.4.4
2081
2082# DEPRECATED:
2083# This option is the name of the virtual interface of the VM on which
2084# the bridge
2085# will be built. While it was originally designed to be used only by
2086# nova-network, it is also used by libvirt for the bridge interface
2087# name.
2088#
2089# Possible values:
2090#
2091# * Any valid virtual interface name, such as 'eth0'
2092# (string value)
2093# This option is deprecated for removal since 15.0.0.
2094# Its value may be silently ignored in the future.
2095# Reason:
2096# nova-network is deprecated, as are any related configuration
2097# options.
2098#flat_interface = <None>
2099
2100# DEPRECATED:
2101# This is the VLAN number used for private networks. Note that the
2102# when creating
2103# the networks, if the specified number has already been assigned,
2104# nova-network
2105# will increment this number until it finds an available VLAN.
2106#
2107# Please note that this option is only used when using nova-network
2108# instead of
2109# Neutron in your deployment. It also will be ignored if the
2110# configuration option
2111# for `network_manager` is not set to the default of
2112# 'nova.network.manager.VlanManager'.
2113#
2114# Possible values:
2115#
2116# * Any integer between 1 and 4094. Values outside of that range will
2117# raise a
2118# ValueError exception.
2119#
2120# Related options:
2121#
2122# * ``network_manager``
2123# * ``use_neutron``
2124# (integer value)
2125# Minimum value: 1
2126# Maximum value: 4094
2127# This option is deprecated for removal since 15.0.0.
2128# Its value may be silently ignored in the future.
2129# Reason:
2130# nova-network is deprecated, as are any related configuration
2131# options.
2132#vlan_start = 100
2133
2134# DEPRECATED:
2135# This option is the name of the virtual interface of the VM on which
2136# the VLAN
2137# bridge will be built. While it was originally designed to be used
2138# only by
2139# nova-network, it is also used by libvirt and xenapi for the bridge
2140# interface
2141# name.
2142#
2143# Please note that this setting will be ignored in nova-network if the
2144# configuration option for `network_manager` is not set to the default
2145# of
2146# 'nova.network.manager.VlanManager'.
2147#
2148# Possible values:
2149#
2150# * Any valid virtual interface name, such as 'eth0'
2151# (string value)
2152# This option is deprecated for removal since 15.0.0.
2153# Its value may be silently ignored in the future.
2154# Reason:
2155# nova-network is deprecated, as are any related configuration
2156# options. While
2157# this option has an effect when using neutron, it incorrectly
2158# override the value
2159# provided by neutron and should therefore not be used.
2160#vlan_interface = <None>
2161
2162# DEPRECATED:
2163# This option represents the number of networks to create if not
2164# explicitly
2165# specified when the network is created. The only time this is used is
2166# if a CIDR
2167# is specified, but an explicit network_size is not. In that case, the
2168# subnets
2169# are created by diving the IP address space of the CIDR by
2170# num_networks. The
2171# resulting subnet sizes cannot be larger than the configuration
2172# option
2173# `network_size`; in that event, they are reduced to `network_size`,
2174# and a
2175# warning is logged.
2176#
2177# Please note that this option is only used when using nova-network
2178# instead of
2179# Neutron in your deployment.
2180#
2181# Possible values:
2182#
2183# * Any positive integer is technically valid, although there are
2184# practical
2185# limits based upon available IP address space and virtual
2186# interfaces.
2187#
2188# Related options:
2189#
2190# * ``use_neutron``
2191# * ``network_size``
2192# (integer value)
2193# Minimum value: 1
2194# This option is deprecated for removal since 15.0.0.
2195# Its value may be silently ignored in the future.
2196# Reason:
2197# nova-network is deprecated, as are any related configuration
2198# options.
2199#num_networks = 1
2200
2201# DEPRECATED:
2202# This option is no longer used since the /os-cloudpipe API was
2203# removed in the
2204# 16.0.0 Pike release. This is the public IP address for the cloudpipe
2205# VPN
2206# servers. It defaults to the IP address of the host.
2207#
2208# Please note that this option is only used when using nova-network
2209# instead of
2210# Neutron in your deployment. It also will be ignored if the
2211# configuration option
2212# for `network_manager` is not set to the default of
2213# 'nova.network.manager.VlanManager'.
2214#
2215# Possible values:
2216#
2217# * Any valid IP address. The default is ``$my_ip``, the IP address of
2218# the VM.
2219#
2220# Related options:
2221#
2222# * ``network_manager``
2223# * ``use_neutron``
2224# * ``vpn_start``
2225# (string value)
2226# This option is deprecated for removal since 15.0.0.
2227# Its value may be silently ignored in the future.
2228# Reason:
2229# nova-network is deprecated, as are any related configuration
2230# options.
2231#vpn_ip = $my_ip
2232
2233# DEPRECATED:
2234# This is the port number to use as the first VPN port for private
2235# networks.
2236#
2237# Please note that this option is only used when using nova-network
2238# instead of
2239# Neutron in your deployment. It also will be ignored if the
2240# configuration option
2241# for `network_manager` is not set to the default of
2242# 'nova.network.manager.VlanManager', or if you specify a value the
2243# 'vpn_start'
2244# parameter when creating a network.
2245#
2246# Possible values:
2247#
2248# * Any integer representing a valid port number. The default is 1000.
2249#
2250# Related options:
2251#
2252# * ``use_neutron``
2253# * ``vpn_ip``
2254# * ``network_manager``
2255# (port value)
2256# Minimum value: 0
2257# Maximum value: 65535
2258# This option is deprecated for removal since 15.0.0.
2259# Its value may be silently ignored in the future.
2260# Reason:
2261# nova-network is deprecated, as are any related configuration
2262# options.
2263#vpn_start = 1000
2264
2265# DEPRECATED:
2266# This option determines the number of addresses in each private
2267# subnet.
2268#
2269# Please note that this option is only used when using nova-network
2270# instead of
2271# Neutron in your deployment.
2272#
2273# Possible values:
2274#
2275# * Any positive integer that is less than or equal to the available
2276# network
2277# size. Note that if you are creating multiple networks, they must
2278# all fit in
2279# the available IP address space. The default is 256.
2280#
2281# Related options:
2282#
2283# * ``use_neutron``
2284# * ``num_networks``
2285# (integer value)
2286# Minimum value: 1
2287# This option is deprecated for removal since 15.0.0.
2288# Its value may be silently ignored in the future.
2289# Reason:
2290# nova-network is deprecated, as are any related configuration
2291# options.
2292#network_size = 256
2293
2294# DEPRECATED:
2295# This option determines the fixed IPv6 address block when creating a
2296# network.
2297#
2298# Please note that this option is only used when using nova-network
2299# instead of
2300# Neutron in your deployment.
2301#
2302# Possible values:
2303#
2304# * Any valid IPv6 CIDR
2305#
2306# Related options:
2307#
2308# * ``use_neutron``
2309# (string value)
2310# This option is deprecated for removal since 15.0.0.
2311# Its value may be silently ignored in the future.
2312# Reason:
2313# nova-network is deprecated, as are any related configuration
2314# options.
2315#fixed_range_v6 = fd00::/48
2316
2317# DEPRECATED:
2318# This is the default IPv4 gateway. It is used only in the testing
2319# suite.
2320#
2321# Please note that this option is only used when using nova-network
2322# instead of
2323# Neutron in your deployment.
2324#
2325# Possible values:
2326#
2327# * Any valid IP address.
2328#
2329# Related options:
2330#
2331# * ``use_neutron``
2332# * ``gateway_v6``
2333# (string value)
2334# This option is deprecated for removal since 15.0.0.
2335# Its value may be silently ignored in the future.
2336# Reason:
2337# nova-network is deprecated, as are any related configuration
2338# options.
2339#gateway = <None>
2340
2341# DEPRECATED:
2342# This is the default IPv6 gateway. It is used only in the testing
2343# suite.
2344#
2345# Please note that this option is only used when using nova-network
2346# instead of
2347# Neutron in your deployment.
2348#
2349# Possible values:
2350#
2351# * Any valid IP address.
2352#
2353# Related options:
2354#
2355# * ``use_neutron``
2356# * ``gateway``
2357# (string value)
2358# This option is deprecated for removal since 15.0.0.
2359# Its value may be silently ignored in the future.
2360# Reason:
2361# nova-network is deprecated, as are any related configuration
2362# options.
2363#gateway_v6 = <None>
2364
2365# DEPRECATED:
2366# This option represents the number of IP addresses to reserve at the
2367# top of the
2368# address range for VPN clients. It also will be ignored if the
2369# configuration
2370# option for `network_manager` is not set to the default of
2371# 'nova.network.manager.VlanManager'.
2372#
2373# Possible values:
2374#
2375# * Any integer, 0 or greater.
2376#
2377# Related options:
2378#
2379# * ``use_neutron``
2380# * ``network_manager``
2381# (integer value)
2382# Minimum value: 0
2383# This option is deprecated for removal since 15.0.0.
2384# Its value may be silently ignored in the future.
2385# Reason:
2386# nova-network is deprecated, as are any related configuration
2387# options.
2388#cnt_vpn_clients = 0
2389
2390# DEPRECATED:
2391# This is the number of seconds to wait before disassociating a
2392# deallocated fixed
2393# IP address. This is only used with the nova-network service, and has
2394# no effect
2395# when using neutron for networking.
2396#
2397# Possible values:
2398#
2399# * Any integer, zero or greater.
2400#
2401# Related options:
2402#
2403# * ``use_neutron``
2404# (integer value)
2405# Minimum value: 0
2406# This option is deprecated for removal since 15.0.0.
2407# Its value may be silently ignored in the future.
2408# Reason:
2409# nova-network is deprecated, as are any related configuration
2410# options.
2411#fixed_ip_disassociate_timeout = 600
2412
2413# DEPRECATED:
2414# This option determines how many times nova-network will attempt to
2415# create a
2416# unique MAC address before giving up and raising a
2417# `VirtualInterfaceMacAddressException` error.
2418#
2419# Possible values:
2420#
2421# * Any positive integer. The default is 5.
2422#
2423# Related options:
2424#
2425# * ``use_neutron``
2426# (integer value)
2427# Minimum value: 1
2428# This option is deprecated for removal since 15.0.0.
2429# Its value may be silently ignored in the future.
2430# Reason:
2431# nova-network is deprecated, as are any related configuration
2432# options.
2433#create_unique_mac_address_attempts = 5
2434
2435# DEPRECATED:
2436# Determines whether unused gateway devices, both VLAN and bridge, are
2437# deleted if
2438# the network is in nova-network VLAN mode and is multi-hosted.
2439#
2440# Related options:
2441#
2442# * ``use_neutron``
2443# * ``vpn_ip``
2444# * ``fake_network``
2445# (boolean value)
2446# This option is deprecated for removal since 15.0.0.
2447# Its value may be silently ignored in the future.
2448# Reason:
2449# nova-network is deprecated, as are any related configuration
2450# options.
2451#teardown_unused_network_gateway = false
2452
2453# DEPRECATED:
2454# When this option is True, a call is made to release the DHCP for the
2455# instance
2456# when that instance is terminated.
2457#
2458# Related options:
2459#
2460# * ``use_neutron``
2461# (boolean value)
2462# This option is deprecated for removal since 15.0.0.
2463# Its value may be silently ignored in the future.
2464# Reason:
2465# nova-network is deprecated, as are any related configuration
2466# options.
2467force_dhcp_release = {{ controller.get('force_dhcp_release', 'true') }}
2468
2469# DEPRECATED:
2470# When this option is True, whenever a DNS entry must be updated, a
2471# fanout cast
2472# message is sent to all network hosts to update their DNS entries in
2473# multi-host
2474# mode.
2475#
2476# Related options:
2477#
2478# * ``use_neutron``
2479# (boolean value)
2480# This option is deprecated for removal since 15.0.0.
2481# Its value may be silently ignored in the future.
2482# Reason:
2483# nova-network is deprecated, as are any related configuration
2484# options.
2485#update_dns_entries = false
2486
2487# DEPRECATED:
2488# This option determines the time, in seconds, to wait between
2489# refreshing DNS
2490# entries for the network.
2491#
2492# Possible values:
2493#
2494# * A positive integer
2495# * -1 to disable updates
2496#
2497# Related options:
2498#
2499# * ``use_neutron``
2500# (integer value)
2501# Minimum value: -1
2502# This option is deprecated for removal since 15.0.0.
2503# Its value may be silently ignored in the future.
2504# Reason:
2505# nova-network is deprecated, as are any related configuration
2506# options.
2507#dns_update_periodic_interval = -1
2508
2509# DEPRECATED:
2510# This option allows you to specify the domain for the DHCP server.
2511#
2512# Possible values:
2513#
2514# * Any string that is a valid domain name.
2515#
2516# Related options:
2517#
2518# * ``use_neutron``
2519# (string value)
2520# This option is deprecated for removal since 15.0.0.
2521# Its value may be silently ignored in the future.
2522# Reason:
2523# nova-network is deprecated, as are any related configuration
2524# options.
2525#dhcp_domain = novalocal
2526dhcp_domain={{ controller.get('dhcp_domain', 'novalocal') }}
2527
2528# DEPRECATED:
2529# This option allows you to specify the L3 management library to be
2530# used.
2531#
2532# Possible values:
2533#
2534# * Any dot-separated string that represents the import path to an L3
2535# networking
2536# library.
2537#
2538# Related options:
2539#
2540# * ``use_neutron``
2541# (string value)
2542# This option is deprecated for removal since 15.0.0.
2543# Its value may be silently ignored in the future.
2544# Reason:
2545# nova-network is deprecated, as are any related configuration
2546# options.
2547#l3_lib = nova.network.l3.LinuxNetL3
2548
2549# DEPRECATED:
2550# THIS VALUE SHOULD BE SET WHEN CREATING THE NETWORK.
2551#
2552# If True in multi_host mode, all compute hosts share the same dhcp
2553# address. The
2554# same IP address used for DHCP will be added on each nova-network
2555# node which is
2556# only visible to the VMs on the same host.
2557#
2558# The use of this configuration has been deprecated and may be removed
2559# in any
2560# release after Mitaka. It is recommended that instead of relying on
2561# this option,
2562# an explicit value should be passed to 'create_networks()' as a
2563# keyword argument
2564# with the name 'share_address'.
2565# (boolean value)
2566# This option is deprecated for removal since 2014.2.
2567# Its value may be silently ignored in the future.
2568#share_dhcp_address = false
2569
2570# DEPRECATED:
2571# URL for LDAP server which will store DNS entries
2572#
2573# Possible values:
2574#
2575# * A valid LDAP URL representing the server
2576# (uri value)
2577# This option is deprecated for removal since 16.0.0.
2578# Its value may be silently ignored in the future.
2579# Reason:
2580# nova-network is deprecated, as are any related configuration
2581# options.
2582#ldap_dns_url = ldap://ldap.example.com:389
2583
2584# DEPRECATED: Bind user for LDAP server (string value)
2585# This option is deprecated for removal since 16.0.0.
2586# Its value may be silently ignored in the future.
2587# Reason:
2588# nova-network is deprecated, as are any related configuration
2589# options.
2590#ldap_dns_user = uid=admin,ou=people,dc=example,dc=org
2591
2592# DEPRECATED: Bind user's password for LDAP server (string value)
2593# This option is deprecated for removal since 16.0.0.
2594# Its value may be silently ignored in the future.
2595# Reason:
2596# nova-network is deprecated, as are any related configuration
2597# options.
2598#ldap_dns_password = password
2599
2600# DEPRECATED:
2601# Hostmaster for LDAP DNS driver Statement of Authority
2602#
2603# Possible values:
2604#
2605# * Any valid string representing LDAP DNS hostmaster.
2606# (string value)
2607# This option is deprecated for removal since 16.0.0.
2608# Its value may be silently ignored in the future.
2609# Reason:
2610# nova-network is deprecated, as are any related configuration
2611# options.
2612#ldap_dns_soa_hostmaster = hostmaster@example.org
2613
2614# DEPRECATED:
2615# DNS Servers for LDAP DNS driver
2616#
2617# Possible values:
2618#
2619# * A valid URL representing a DNS server
2620# (multi valued)
2621# This option is deprecated for removal since 16.0.0.
2622# Its value may be silently ignored in the future.
2623# Reason:
2624# nova-network is deprecated, as are any related configuration
2625# options.
2626#ldap_dns_servers = dns.example.org
2627
2628# DEPRECATED:
2629# Base distinguished name for the LDAP search query
2630#
2631# This option helps to decide where to look up the host in LDAP.
2632# (string value)
2633# This option is deprecated for removal since 16.0.0.
2634# Its value may be silently ignored in the future.
2635# Reason:
2636# nova-network is deprecated, as are any related configuration
2637# options.
2638#ldap_dns_base_dn = ou=hosts,dc=example,dc=org
2639
2640# DEPRECATED:
2641# Refresh interval (in seconds) for LDAP DNS driver Start of Authority
2642#
2643# Time interval, a secondary/slave DNS server waits before requesting
2644# for
2645# primary DNS server's current SOA record. If the records are
2646# different,
2647# secondary DNS server will request a zone transfer from primary.
2648#
2649# NOTE: Lower values would cause more traffic.
2650# (integer value)
2651# This option is deprecated for removal since 16.0.0.
2652# Its value may be silently ignored in the future.
2653# Reason:
2654# nova-network is deprecated, as are any related configuration
2655# options.
2656#ldap_dns_soa_refresh = 1800
2657
2658# DEPRECATED:
2659# Retry interval (in seconds) for LDAP DNS driver Start of Authority
2660#
2661# Time interval, a secondary/slave DNS server should wait, if an
2662# attempt to transfer zone failed during the previous refresh
2663# interval.
2664# (integer value)
2665# This option is deprecated for removal since 16.0.0.
2666# Its value may be silently ignored in the future.
2667# Reason:
2668# nova-network is deprecated, as are any related configuration
2669# options.
2670#ldap_dns_soa_retry = 3600
2671
2672# DEPRECATED:
2673# Expiry interval (in seconds) for LDAP DNS driver Start of Authority
2674#
2675# Time interval, a secondary/slave DNS server holds the information
2676# before it is no longer considered authoritative.
2677# (integer value)
2678# This option is deprecated for removal since 16.0.0.
2679# Its value may be silently ignored in the future.
2680# Reason:
2681# nova-network is deprecated, as are any related configuration
2682# options.
2683#ldap_dns_soa_expiry = 86400
2684
2685# DEPRECATED:
2686# Minimum interval (in seconds) for LDAP DNS driver Start of Authority
2687#
2688# It is Minimum time-to-live applies for all resource records in the
2689# zone file. This value is supplied to other servers how long they
2690# should keep the data in cache.
2691# (integer value)
2692# This option is deprecated for removal since 16.0.0.
2693# Its value may be silently ignored in the future.
2694# Reason:
2695# nova-network is deprecated, as are any related configuration
2696# options.
2697#ldap_dns_soa_minimum = 7200
2698
2699# DEPRECATED:
2700# Default value for multi_host in networks.
2701#
2702# nova-network service can operate in a multi-host or single-host
2703# mode.
2704# In multi-host mode each compute node runs a copy of nova-network and
2705# the
2706# instances on that compute node use the compute node as a gateway to
2707# the
2708# Internet. Where as in single-host mode, a central server runs the
2709# nova-network
2710# service. All compute nodes forward traffic from the instances to the
2711# cloud controller which then forwards traffic to the Internet.
2712#
2713# If this options is set to true, some rpc network calls will be sent
2714# directly
2715# to host.
2716#
2717# Note that this option is only used when using nova-network instead
2718# of
2719# Neutron in your deployment.
2720#
2721# Related options:
2722#
2723# * ``use_neutron``
2724# (boolean value)
2725# This option is deprecated for removal since 15.0.0.
2726# Its value may be silently ignored in the future.
2727# Reason:
2728# nova-network is deprecated, as are any related configuration
2729# options.
2730#multi_host = false
2731
2732# DEPRECATED:
2733# Driver to use for network creation.
2734#
2735# Network driver initializes (creates bridges and so on) only when the
2736# first VM lands on a host node. All network managers configure the
2737# network using network drivers. The driver is not tied to any
2738# particular
2739# network manager.
2740#
2741# The default Linux driver implements vlans, bridges, and iptables
2742# rules
2743# using linux utilities.
2744#
2745# Note that this option is only used when using nova-network instead
2746# of Neutron in your deployment.
2747#
2748# Related options:
2749#
2750# * ``use_neutron``
2751# (string value)
2752# This option is deprecated for removal since 15.0.0.
2753# Its value may be silently ignored in the future.
2754# Reason:
2755# nova-network is deprecated, as are any related configuration
2756# options.
2757#network_driver = nova.network.linux_net
2758
2759# DEPRECATED:
2760# Firewall driver to use with ``nova-network`` service.
2761#
2762# This option only applies when using the ``nova-network`` service.
2763# When using
2764# another networking services, such as Neutron, this should be to set
2765# to the
2766# ``nova.virt.firewall.NoopFirewallDriver``.
2767#
2768# Possible values:
2769#
2770# * ``nova.virt.firewall.IptablesFirewallDriver``
2771# * ``nova.virt.firewall.NoopFirewallDriver``
2772# * ``nova.virt.libvirt.firewall.IptablesFirewallDriver``
2773# * [...]
2774#
2775# Related options:
2776#
2777# * ``use_neutron``: This must be set to ``False`` to enable ``nova-
2778# network``
2779# networking
2780# (string value)
2781# This option is deprecated for removal since 16.0.0.
2782# Its value may be silently ignored in the future.
2783# Reason:
2784# nova-network is deprecated, as are any related configuration
2785# options.
2786firewall_driver = nova.virt.firewall.NoopFirewallDriver
2787
2788# DEPRECATED:
2789# Determine whether to allow network traffic from same network.
2790#
2791# When set to true, hosts on the same subnet are not filtered and are
2792# allowed
2793# to pass all types of traffic between them. On a flat network, this
2794# allows
2795# all instances from all projects unfiltered communication. With VLAN
2796# networking, this allows access between instances within the same
2797# project.
2798#
2799# This option only applies when using the ``nova-network`` service.
2800# When using
2801# another networking services, such as Neutron, security groups or
2802# other
2803# approaches should be used.
2804#
2805# Possible values:
2806#
2807# * True: Network traffic should be allowed pass between all instances
2808# on the
2809# same network, regardless of their tenant and security policies
2810# * False: Network traffic should not be allowed pass between
2811# instances unless
2812# it is unblocked in a security group
2813#
2814# Related options:
2815#
2816# * ``use_neutron``: This must be set to ``False`` to enable ``nova-
2817# network``
2818# networking
2819# * ``firewall_driver``: This must be set to
2820# ``nova.virt.libvirt.firewall.IptablesFirewallDriver`` to ensure
2821# the
2822# libvirt firewall driver is enabled.
2823# (boolean value)
2824# This option is deprecated for removal since 16.0.0.
2825# Its value may be silently ignored in the future.
2826# Reason:
2827# nova-network is deprecated, as are any related configuration
2828# options.
2829#allow_same_net_traffic = true
2830
2831# DEPRECATED:
2832# Default pool for floating IPs.
2833#
2834# This option specifies the default floating IP pool for allocating
2835# floating IPs.
2836#
2837# While allocating a floating ip, users can optionally pass in the
2838# name of the
2839# pool they want to allocate from, otherwise it will be pulled from
2840# the
2841# default pool.
2842#
2843# If this option is not set, then 'nova' is used as default floating
2844# pool.
2845#
2846# Possible values:
2847#
2848# * Any string representing a floating IP pool name
2849# (string value)
2850# This option is deprecated for removal since 16.0.0.
2851# Its value may be silently ignored in the future.
2852# Reason:
2853# This option was used for two purposes: to set the floating IP pool
2854# name for
2855# nova-network and to do the same for neutron. nova-network is
2856# deprecated, as are
2857# any related configuration options. Users of neutron, meanwhile,
2858# should use the
2859# 'default_floating_pool' option in the '[neutron]' group.
2860#default_floating_pool = nova
2861
2862# DEPRECATED:
2863# Autoassigning floating IP to VM
2864#
2865# When set to True, floating IP is auto allocated and associated
2866# to the VM upon creation.
2867#
2868# Related options:
2869#
2870# * use_neutron: this options only works with nova-network.
2871# (boolean value)
2872# This option is deprecated for removal since 15.0.0.
2873# Its value may be silently ignored in the future.
2874# Reason:
2875# nova-network is deprecated, as are any related configuration
2876# options.
2877#auto_assign_floating_ip = false
2878
2879# DEPRECATED:
2880# Full class name for the DNS Manager for floating IPs.
2881#
2882# This option specifies the class of the driver that provides
2883# functionality
2884# to manage DNS entries associated with floating IPs.
2885#
2886# When a user adds a DNS entry for a specified domain to a floating
2887# IP,
2888# nova will add a DNS entry using the specified floating DNS driver.
2889# When a floating IP is deallocated, its DNS entry will automatically
2890# be deleted.
2891#
2892# Possible values:
2893#
2894# * Full Python path to the class to be used
2895#
2896# Related options:
2897#
2898# * use_neutron: this options only works with nova-network.
2899# (string value)
2900# This option is deprecated for removal since 15.0.0.
2901# Its value may be silently ignored in the future.
2902# Reason:
2903# nova-network is deprecated, as are any related configuration
2904# options.
2905#floating_ip_dns_manager = nova.network.noop_dns_driver.NoopDNSDriver
2906
2907# DEPRECATED:
2908# Full class name for the DNS Manager for instance IPs.
2909#
2910# This option specifies the class of the driver that provides
2911# functionality
2912# to manage DNS entries for instances.
2913#
2914# On instance creation, nova will add DNS entries for the instance
2915# name and
2916# id, using the specified instance DNS driver and domain. On instance
2917# deletion,
2918# nova will remove the DNS entries.
2919#
2920# Possible values:
2921#
2922# * Full Python path to the class to be used
2923#
2924# Related options:
2925#
2926# * use_neutron: this options only works with nova-network.
2927# (string value)
2928# This option is deprecated for removal since 15.0.0.
2929# Its value may be silently ignored in the future.
2930# Reason:
2931# nova-network is deprecated, as are any related configuration
2932# options.
2933#instance_dns_manager = nova.network.noop_dns_driver.NoopDNSDriver
2934
2935# DEPRECATED:
2936# If specified, Nova checks if the availability_zone of every instance
2937# matches
2938# what the database says the availability_zone should be for the
2939# specified
2940# dns_domain.
2941#
2942# Related options:
2943#
2944# * use_neutron: this options only works with nova-network.
2945# (string value)
2946# This option is deprecated for removal since 15.0.0.
2947# Its value may be silently ignored in the future.
2948# Reason:
2949# nova-network is deprecated, as are any related configuration
2950# options.
2951#instance_dns_domain =
2952
2953# DEPRECATED:
2954# Assign IPv6 and IPv4 addresses when creating instances.
2955#
2956# Related options:
2957#
2958# * use_neutron: this only works with nova-network.
2959# (boolean value)
2960# This option is deprecated for removal since 16.0.0.
2961# Its value may be silently ignored in the future.
2962# Reason:
2963# nova-network is deprecated, as are any related configuration
2964# options.
2965#use_ipv6 = false
2966
2967# DEPRECATED:
2968# Abstracts out IPv6 address generation to pluggable backends.
2969#
2970# nova-network can be put into dual-stack mode, so that it uses
2971# both IPv4 and IPv6 addresses. In dual-stack mode, by default,
2972# instances
2973# acquire IPv6 global unicast addresses with the help of stateless
2974# address
2975# auto-configuration mechanism.
2976#
2977# Related options:
2978#
2979# * use_neutron: this option only works with nova-network.
2980# * use_ipv6: this option only works if ipv6 is enabled for nova-
2981# network.
2982# (string value)
2983# Possible values:
2984# rfc2462 - <No description provided>
2985# account_identifier - <No description provided>
2986# This option is deprecated for removal since 16.0.0.
2987# Its value may be silently ignored in the future.
2988# Reason:
2989# nova-network is deprecated, as are any related configuration
2990# options.
2991#ipv6_backend = rfc2462
2992
2993# DEPRECATED:
2994# This option is used to enable or disable quota checking for tenant
2995# networks.
2996#
2997# Related options:
2998#
2999# * quota_networks
3000# (boolean value)
3001# This option is deprecated for removal since 14.0.0.
3002# Its value may be silently ignored in the future.
3003# Reason:
3004# CRUD operations on tenant networks are only available when using
3005# nova-network
3006# and nova-network is itself deprecated.
3007#enable_network_quota = false
3008
3009# DEPRECATED:
3010# This option controls the number of private networks that can be
3011# created per
3012# project (or per tenant).
3013#
3014# Related options:
3015#
3016# * enable_network_quota
3017# (integer value)
3018# Minimum value: 0
3019# This option is deprecated for removal since 14.0.0.
3020# Its value may be silently ignored in the future.
3021# Reason:
3022# CRUD operations on tenant networks are only available when using
3023# nova-network
3024# and nova-network is itself deprecated.
3025#quota_networks = 3
3026
3027#
3028# Filename that will be used for storing websocket frames received
3029# and sent by a proxy service (like VNC, spice, serial) running on
3030# this host.
3031# If this is not set, no recording will be done.
3032# (string value)
3033#record = <None>
3034
3035# Run as a background process. (boolean value)
3036#daemon = false
3037
3038# Disallow non-encrypted connections. (boolean value)
3039#ssl_only = false
Oleksandr Shyshkod8337cf2018-07-11 17:55:58 +03003040{%- if controller.novncproxy.tls.get('enabled', False) %}
3041ssl_only=True
3042cert={{controller.novncproxy.tls.server.cert_file|yaml_squote}}
3043key={{controller.novncproxy.tls.server.key_file|yaml_squote}}
3044{%- endif %}
Oleh Hryhorov5d0f13a2018-05-21 17:02:54 +00003045
3046# Set to True if source host is addressed with IPv6. (boolean value)
3047#source_is_ipv6 = false
3048
3049# Path to SSL certificate file. (string value)
3050#cert = self.pem
3051
3052# SSL key file (if separate from cert). (string value)
3053#key = <None>
3054
3055#
3056# Path to directory with content which will be served by a web server.
3057# (string value)
3058#web = /usr/share/spice-html5
3059
3060#
3061# The directory where the Nova python modules are installed.
3062#
3063# This directory is used to store template files for networking and
3064# remote
3065# console access. It is also the default path for other config options
3066# which
3067# need to persist Nova internal data. It is very unlikely that you
3068# need to
3069# change this option from its default value.
3070#
3071# Possible values:
3072#
3073# * The full path to a directory.
3074#
3075# Related options:
3076#
3077# * ``state_path``
3078# (string value)
3079#pybasedir = /usr/lib/python2.7/dist-packages
3080
3081#
3082# The directory where the Nova binaries are installed.
3083#
3084# This option is only relevant if the networking capabilities from
3085# Nova are
3086# used (see services below). Nova's networking capabilities are
3087# targeted to
3088# be fully replaced by Neutron in the future. It is very unlikely that
3089# you need
3090# to change this option from its default value.
3091#
3092# Possible values:
3093#
3094# * The full path to a directory.
3095# (string value)
3096#bindir = /usr/local/bin
3097
3098#
3099# The top-level directory for maintaining Nova's state.
3100#
3101# This directory is used to store Nova's internal state. It is used by
3102# a
3103# variety of other config options which derive from this. In some
3104# scenarios
3105# (for example migrations) it makes sense to use a storage location
3106# which is
3107# shared between multiple compute hosts (for example via NFS). Unless
3108# the
3109# option ``instances_path`` gets overwritten, this directory can grow
3110# very
3111# large.
3112#
3113# Possible values:
3114#
3115# * The full path to a directory. Defaults to value provided in
3116# ``pybasedir``.
3117# (string value)
3118state_path = /var/lib/nova
3119
3120#
3121# Number of seconds indicating how frequently the state of services on
3122# a
3123# given hypervisor is reported. Nova needs to know this to determine
3124# the
3125# overall health of the deployment.
3126#
3127# Related Options:
3128#
3129# * service_down_time
3130# report_interval should be less than service_down_time. If
3131# service_down_time
3132# is less than report_interval, services will routinely be
3133# considered down,
3134# because they report in too rarely.
3135# (integer value)
3136#report_interval = 10
3137report_interval = {{ controller.get('report_interval', '60') }}
3138
3139#
3140# Maximum time in seconds since last check-in for up service
3141#
3142# Each compute node periodically updates their database status based
3143# on the
3144# specified report interval. If the compute node hasn't updated the
3145# status
3146# for more than service_down_time, then the compute node is considered
3147# down.
3148#
3149# Related Options:
3150#
3151# * report_interval (service_down_time should not be less than
3152# report_interval)
3153# (integer value)
3154service_down_time = {{ controller.service_down_time|default('180') }}
3155
3156#
3157# Enable periodic tasks.
3158#
3159# If set to true, this option allows services to periodically run
3160# tasks
3161# on the manager.
3162#
3163# In case of running multiple schedulers or conductors you may want to
3164# run
3165# periodic tasks on only one host - in this case disable this option
3166# for all
3167# hosts but one.
3168# (boolean value)
3169#periodic_enable = true
3170
3171#
3172# Number of seconds to randomly delay when starting the periodic task
3173# scheduler to reduce stampeding.
3174#
3175# When compute workers are restarted in unison across a cluster,
3176# they all end up running the periodic tasks at the same time
3177# causing problems for the external services. To mitigate this
3178# behavior, periodic_fuzzy_delay option allows you to introduce a
3179# random initial delay when starting the periodic task scheduler.
3180#
3181# Possible Values:
3182#
3183# * Any positive integer (in seconds)
3184# * 0 : disable the random delay
3185# (integer value)
3186# Minimum value: 0
3187#periodic_fuzzy_delay = 60
3188
3189# List of APIs to be enabled by default. (list value)
3190enabled_apis = osapi_compute,metadata
3191
3192#
3193# List of APIs with enabled SSL.
3194#
3195# Nova provides SSL support for the API servers. enabled_ssl_apis
3196# option
3197# allows configuring the SSL support.
3198# (list value)
3199#enabled_ssl_apis =
3200
3201#
3202# IP address on which the OpenStack API will listen.
3203#
3204# The OpenStack API service listens on this IP address for incoming
3205# requests.
3206# (string value)
3207#osapi_compute_listen = 0.0.0.0
Oleh Hryhorovc222a4a2018-05-31 13:49:53 +03003208osapi_compute_listen = {{ controller.bind.private_address }}
Oleh Hryhorov5d0f13a2018-05-21 17:02:54 +00003209
3210#
3211# Port on which the OpenStack API will listen.
3212#
3213# The OpenStack API service listens on this port number for incoming
3214# requests.
3215# (port value)
3216# Minimum value: 0
3217# Maximum value: 65535
3218#osapi_compute_listen_port = 8774
3219
3220#
3221# Number of workers for OpenStack API service. The default will be the
3222# number
3223# of CPUs available.
3224#
3225# OpenStack API services can be configured to run as multi-process
3226# (workers).
3227# This overcomes the problem of reduction in throughput when API
3228# request
3229# concurrency increases. OpenStack API service will run in the
3230# specified
3231# number of processes.
3232#
3233# Possible Values:
3234#
3235# * Any positive integer
3236# * None (default value)
3237# (integer value)
3238# Minimum value: 1
3239#osapi_compute_workers = <None>
Vasyl Saienko09b6ac32019-01-17 15:23:58 +02003240osapi_compute_workers = {{ controller.workers }}
Oleh Hryhorov5d0f13a2018-05-21 17:02:54 +00003241
3242#
3243# IP address on which the metadata API will listen.
3244#
3245# The metadata API service listens on this IP address for incoming
3246# requests.
3247# (string value)
3248#metadata_listen = 0.0.0.0
3249metadata_listen={{ controller.get('metadata', {}).get('bind', {}).get('address', controller.bind.private_address) }}
3250
3251#
3252# Port on which the metadata API will listen.
3253#
3254# The metadata API service listens on this port number for incoming
3255# requests.
3256# (port value)
3257# Minimum value: 0
3258# Maximum value: 65535
3259#metadata_listen_port = 8775
3260{%- if controller.get('metadata', {}).get('bind', {}).port is defined %}
3261metadata_listen_port={{ controller.metadata.bind.port }}
3262{%- else %}
3263#metadata_listen_port=8775
3264{%- endif %}
3265
3266#
3267# Number of workers for metadata service. If not specified the number
3268# of
3269# available CPUs will be used.
3270#
3271# The metadata service can be configured to run as multi-process
3272# (workers).
3273# This overcomes the problem of reduction in throughput when API
3274# request
3275# concurrency increases. The metadata service will run in the
3276# specified
3277# number of processes.
3278#
3279# Possible Values:
3280#
3281# * Any positive integer
3282# * None (default value)
3283# (integer value)
3284# Minimum value: 1
3285#metadata_workers = <None>
3286metadata_workers = {{ controller.workers }}
3287
3288# Full class name for the Manager for network (string value)
3289# Possible values:
3290# nova.network.manager.FlatManager - <No description provided>
3291# nova.network.manager.FlatDHCPManager - <No description provided>
3292# nova.network.manager.VlanManager - <No description provided>
3293#network_manager = nova.network.manager.VlanManager
3294
3295#
3296# This option specifies the driver to be used for the servicegroup
3297# service.
3298#
3299# ServiceGroup API in nova enables checking status of a compute node.
3300# When a
3301# compute worker running the nova-compute daemon starts, it calls the
3302# join API
3303# to join the compute group. Services like nova scheduler can query
3304# the
3305# ServiceGroup API to check if a node is alive. Internally, the
3306# ServiceGroup
3307# client driver automatically updates the compute worker status. There
3308# are
3309# multiple backend implementations for this service: Database
3310# ServiceGroup driver
3311# and Memcache ServiceGroup driver.
3312#
3313# Possible Values:
3314#
3315# * db : Database ServiceGroup driver
3316# * mc : Memcache ServiceGroup driver
3317#
3318# Related Options:
3319#
3320# * service_down_time (maximum time since last check-in for up
3321# service)
3322# (string value)
3323# Possible values:
3324# db - <No description provided>
3325# mc - <No description provided>
3326#servicegroup_driver = db
3327
3328#
3329# From oslo.service.periodic_task
3330#
3331
3332# Some periodic tasks can be run in a separate process. Should we run
3333# them here? (boolean value)
3334#run_external_periodic_tasks = true
3335
3336#
3337# From oslo.service.service
3338#
3339
3340# Enable eventlet backdoor. Acceptable values are 0, <port>, and
3341# <start>:<end>, where 0 results in listening on a random tcp port
3342# number; <port> results in listening on the specified port number
3343# (and not enabling backdoor if that port is in use); and
3344# <start>:<end> results in listening on the smallest unused port
3345# number within the specified range of port numbers. The chosen port
3346# is displayed in the service's log file. (string value)
3347#backdoor_port = <None>
3348
3349# Enable eventlet backdoor, using the provided path as a unix socket
3350# that can receive connections. This option is mutually exclusive with
3351# 'backdoor_port' in that only one should be provided. If both are
3352# provided then the existence of this option overrides the usage of
3353# that option. (string value)
3354#backdoor_socket = <None>
3355
3356# Enables or disables logging values of all registered options when
3357# starting a service (at DEBUG level). (boolean value)
3358#log_options = true
3359
3360# Specify a timeout after which a gracefully shutdown server will
3361# exit. Zero value means endless wait. (integer value)
3362#graceful_shutdown_timeout = 60
3363
3364{%- if controller.logging is defined %}
3365{%- set _data = controller.logging %}
3366{%- include "oslo_templates/files/queens/oslo/_log.conf" %}
3367{%- endif %}
3368
3369{%- set _data = controller.message_queue %}
3370{%- include "oslo_templates/files/queens/oslo/messaging/_default.conf" %}
3371
3372[api]
3373#
3374# Options under this group are used to define Nova API.
3375
3376#
3377# From nova.conf
3378#
3379
3380#
3381# This determines the strategy to use for authentication: keystone or
3382# noauth2.
3383# 'noauth2' is designed for testing only, as it does no actual
3384# credential
3385# checking. 'noauth2' provides administrative credentials only if
3386# 'admin' is
3387# specified as the username.
3388# (string value)
3389# Possible values:
3390# keystone - <No description provided>
3391# noauth2 - <No description provided>
3392auth_strategy = keystone
3393
3394#
3395# When True, the 'X-Forwarded-For' header is treated as the canonical
3396# remote
3397# address. When False (the default), the 'remote_address' header is
3398# used.
3399#
3400# You should only enable this if you have an HTML sanitizing proxy.
3401# (boolean value)
3402#use_forwarded_for = false
3403
3404#
3405# When gathering the existing metadata for a config drive, the
3406# EC2-style
3407# metadata is returned for all versions that don't appear in this
3408# option.
3409# As of the Liberty release, the available versions are:
3410#
3411# * 1.0
3412# * 2007-01-19
3413# * 2007-03-01
3414# * 2007-08-29
3415# * 2007-10-10
3416# * 2007-12-15
3417# * 2008-02-01
3418# * 2008-09-01
3419# * 2009-04-04
3420#
3421# The option is in the format of a single string, with each version
3422# separated
3423# by a space.
3424#
3425# Possible values:
3426#
3427# * Any string that represents zero or more versions, separated by
3428# spaces.
3429# (string value)
3430#config_drive_skip_versions = 1.0 2007-01-19 2007-03-01 2007-08-29 2007-10-10 2007-12-15 2008-02-01 2008-09-01
3431
3432#
3433# A list of vendordata providers.
3434#
3435# vendordata providers are how deployers can provide metadata via
3436# configdrive
3437# and metadata that is specific to their deployment. There are
3438# currently two
3439# supported providers: StaticJSON and DynamicJSON.
3440#
3441# StaticJSON reads a JSON file configured by the flag
3442# vendordata_jsonfile_path
3443# and places the JSON from that file into vendor_data.json and
3444# vendor_data2.json.
3445#
3446# DynamicJSON is configured via the vendordata_dynamic_targets flag,
3447# which is
3448# documented separately. For each of the endpoints specified in that
3449# flag, a
3450# section is added to the vendor_data2.json.
3451#
3452# For more information on the requirements for implementing a
3453# vendordata
3454# dynamic endpoint, please see the vendordata.rst file in the nova
3455# developer
3456# reference.
3457#
3458# Possible values:
3459#
3460# * A list of vendordata providers, with StaticJSON and DynamicJSON
3461# being
3462# current options.
3463#
3464# Related options:
3465#
3466# * vendordata_dynamic_targets
3467# * vendordata_dynamic_ssl_certfile
3468# * vendordata_dynamic_connect_timeout
3469# * vendordata_dynamic_read_timeout
3470# * vendordata_dynamic_failure_fatal
3471# (list value)
3472#vendordata_providers = StaticJSON
3473
3474#
3475# A list of targets for the dynamic vendordata provider. These targets
3476# are of
3477# the form <name>@<url>.
3478#
3479# The dynamic vendordata provider collects metadata by contacting
3480# external REST
3481# services and querying them for information about the instance. This
3482# behaviour
3483# is documented in the vendordata.rst file in the nova developer
3484# reference.
3485# (list value)
3486#vendordata_dynamic_targets =
3487
3488#
3489# Path to an optional certificate file or CA bundle to verify dynamic
3490# vendordata REST services ssl certificates against.
3491#
3492# Possible values:
3493#
3494# * An empty string, or a path to a valid certificate file
3495#
3496# Related options:
3497#
3498# * vendordata_providers
3499# * vendordata_dynamic_targets
3500# * vendordata_dynamic_connect_timeout
3501# * vendordata_dynamic_read_timeout
3502# * vendordata_dynamic_failure_fatal
3503# (string value)
3504#vendordata_dynamic_ssl_certfile =
3505
3506#
3507# Maximum wait time for an external REST service to connect.
3508#
3509# Possible values:
3510#
3511# * Any integer with a value greater than three (the TCP packet
3512# retransmission
3513# timeout). Note that instance start may be blocked during this wait
3514# time,
3515# so this value should be kept small.
3516#
3517# Related options:
3518#
3519# * vendordata_providers
3520# * vendordata_dynamic_targets
3521# * vendordata_dynamic_ssl_certfile
3522# * vendordata_dynamic_read_timeout
3523# * vendordata_dynamic_failure_fatal
3524# (integer value)
3525# Minimum value: 3
3526#vendordata_dynamic_connect_timeout = 5
3527
3528#
3529# Maximum wait time for an external REST service to return data once
3530# connected.
3531#
3532# Possible values:
3533#
3534# * Any integer. Note that instance start is blocked during this wait
3535# time,
3536# so this value should be kept small.
3537#
3538# Related options:
3539#
3540# * vendordata_providers
3541# * vendordata_dynamic_targets
3542# * vendordata_dynamic_ssl_certfile
3543# * vendordata_dynamic_connect_timeout
3544# * vendordata_dynamic_failure_fatal
3545# (integer value)
3546# Minimum value: 0
3547#vendordata_dynamic_read_timeout = 5
3548
3549#
3550# Should failures to fetch dynamic vendordata be fatal to instance
3551# boot?
3552#
3553# Related options:
3554#
3555# * vendordata_providers
3556# * vendordata_dynamic_targets
3557# * vendordata_dynamic_ssl_certfile
3558# * vendordata_dynamic_connect_timeout
3559# * vendordata_dynamic_read_timeout
3560# (boolean value)
3561#vendordata_dynamic_failure_fatal = false
3562
3563#
3564# This option is the time (in seconds) to cache metadata. When set to
3565# 0,
3566# metadata caching is disabled entirely; this is generally not
3567# recommended for
3568# performance reasons. Increasing this setting should improve response
3569# times
3570# of the metadata API when under heavy load. Higher values may
3571# increase memory
3572# usage, and result in longer times for host metadata changes to take
3573# effect.
3574# (integer value)
3575# Minimum value: 0
3576#metadata_cache_expiration = 15
3577
3578#
3579# Cloud providers may store custom data in vendor data file that will
3580# then be
3581# available to the instances via the metadata service, and to the
3582# rendering of
3583# config-drive. The default class for this, JsonFileVendorData, loads
3584# this
3585# information from a JSON file, whose path is configured by this
3586# option. If
3587# there is no path set by this option, the class returns an empty
3588# dictionary.
3589#
3590# Possible values:
3591#
3592# * Any string representing the path to the data file, or an empty
3593# string
3594# (default).
3595# (string value)
3596#vendordata_jsonfile_path = <None>
3597
3598#
3599# As a query can potentially return many thousands of items, you can
3600# limit the
3601# maximum number of items in a single response by setting this option.
3602# (integer value)
3603# Minimum value: 0
3604# Deprecated group/name - [DEFAULT]/osapi_max_limit
3605#max_limit = 1000
3606max_limit={{ controller.osapi_max_limit|default('1000') }}
3607
3608#
3609# This string is prepended to the normal URL that is returned in links
3610# to the
3611# OpenStack Compute API. If it is empty (the default), the URLs are
3612# returned
3613# unchanged.
3614#
3615# Possible values:
3616#
3617# * Any string, including an empty string (the default).
3618# (string value)
3619# Deprecated group/name - [DEFAULT]/osapi_compute_link_prefix
3620#compute_link_prefix = <None>
3621
3622#
3623# This string is prepended to the normal URL that is returned in links
3624# to
3625# Glance resources. If it is empty (the default), the URLs are
3626# returned
3627# unchanged.
3628#
3629# Possible values:
3630#
3631# * Any string, including an empty string (the default).
3632# (string value)
3633# Deprecated group/name - [DEFAULT]/osapi_glance_link_prefix
3634#glance_link_prefix = <None>
3635
3636# DEPRECATED:
3637# Operators can turn off the ability for a user to take snapshots of
3638# their
3639# instances by setting this option to False. When disabled, any
3640# attempt to
3641# take a snapshot will result in a HTTP 400 response ("Bad Request").
3642# (boolean value)
3643# This option is deprecated for removal since 16.0.0.
3644# Its value may be silently ignored in the future.
3645# Reason: This option disables the createImage server action API in a
3646# non-discoverable way and is thus a barrier to interoperability.
3647# Also, it is not used for other APIs that create snapshots like
3648# shelve or createBackup. Disabling snapshots should be done via
3649# policy if so desired.
3650#allow_instance_snapshots = true
3651
3652# DEPRECATED:
3653# This option is a list of all instance states for which network
3654# address
3655# information should not be returned from the API.
3656#
3657# Possible values:
3658#
3659# A list of strings, where each string is a valid VM state, as
3660# defined in
3661# nova/compute/vm_states.py. As of the Newton release, they are:
3662#
3663# * "active"
3664# * "building"
3665# * "paused"
3666# * "suspended"
3667# * "stopped"
3668# * "rescued"
3669# * "resized"
3670# * "soft-delete"
3671# * "deleted"
3672# * "error"
3673# * "shelved"
3674# * "shelved_offloaded"
3675# (list value)
3676# Deprecated group/name - [DEFAULT]/osapi_hide_server_address_states
3677# This option is deprecated for removal since 17.0.0.
3678# Its value may be silently ignored in the future.
3679# Reason: This option hide the server address in server representation
3680# for configured server states. Which makes GET server API controlled
3681# by this config options. Due to this config options, user would not
3682# be able to discover the API behavior on different clouds which leads
3683# to the interop issue.
3684#hide_server_address_states = building
3685
3686# The full path to the fping binary. (string value)
3687fping_path = /usr/sbin/fping
3688
3689#
3690# When True, the TenantNetworkController will query the Neutron API to
3691# get the
3692# default networks to use.
3693#
3694# Related options:
3695#
3696# * neutron_default_tenant_id
3697# (boolean value)
3698#use_neutron_default_nets = false
3699
3700#
3701# Tenant ID for getting the default network from Neutron API (also
3702# referred in
3703# some places as the 'project ID') to use.
3704#
3705# Related options:
3706#
3707# * use_neutron_default_nets
3708# (string value)
3709#neutron_default_tenant_id = default
3710
3711#
3712# Enables returning of the instance password by the relevant server
3713# API calls
3714# such as create, rebuild, evacuate, or rescue. If the hypervisor does
3715# not
3716# support password injection, then the password returned will not be
3717# correct,
3718# so if your hypervisor does not support password injection, set this
3719# to False.
3720# (boolean value)
3721#enable_instance_password = true
3722
3723
3724[api_database]
Vasyl Saienkobc9d1202018-05-25 10:05:14 +03003725{%- set _data = {} %}
3726{%- do _data.update(controller.database) %}
3727{%- do _data.update({'name': 'nova_api'}) %}
3728{%- if _data.ssl is defined and 'cacert_file' not in _data.get('ssl', {}).keys() %}{% do _data['ssl'].update({'cacert_file': controller.cacert_file}) %}{% endif %}
3729{%- include "oslo_templates/files/queens/oslo/_database.conf" %}
Oleh Hryhorov5d0f13a2018-05-21 17:02:54 +00003730
3731{%- if controller.get('barbican', {}).get('enabled', False) %}
3732{%- set _data = controller.identity %}
3733[barbican]
3734{%- include "oslo_templates/files/queens/castellan/_barbican.conf" %}
3735{%- endif %}
3736
3737[cache]
3738
3739#
3740# From nova.conf
3741#
3742{%- if controller.cache is defined %}
3743backend = oslo_cache.memcache_pool
3744enabled = true
3745memcache_servers={%- for member in controller.cache.members %}{{ member.host }}:11211{% if not loop.last %},{% endif %}{%- endfor %}
3746{%- endif %}
3747
3748# Prefix for building the configuration dictionary for the cache
3749# region. This should not need to be changed unless there is another
3750# dogpile.cache region with the same configuration name. (string
3751# value)
3752#config_prefix = cache.oslo
3753
3754# Default TTL, in seconds, for any cached item in the dogpile.cache
3755# region. This applies to any cached method that doesn't have an
3756# explicit cache expiration time defined for it. (integer value)
3757#expiration_time = 600
3758
3759# Cache backend module. For eventlet-based or environments with
3760# hundreds of threaded servers, Memcache with pooling
3761# (oslo_cache.memcache_pool) is recommended. For environments with
3762# less than 100 threaded servers, Memcached (dogpile.cache.memcached)
3763# or Redis (dogpile.cache.redis) is recommended. Test environments
3764# with a single instance of the server can use the
3765# dogpile.cache.memory backend. (string value)
3766# Possible values:
3767# oslo_cache.memcache_pool - <No description provided>
3768# oslo_cache.dict - <No description provided>
3769# oslo_cache.mongo - <No description provided>
3770# oslo_cache.etcd3gw - <No description provided>
3771# dogpile.cache.memcached - <No description provided>
3772# dogpile.cache.pylibmc - <No description provided>
3773# dogpile.cache.bmemcached - <No description provided>
3774# dogpile.cache.dbm - <No description provided>
3775# dogpile.cache.redis - <No description provided>
3776# dogpile.cache.memory - <No description provided>
3777# dogpile.cache.memory_pickle - <No description provided>
3778# dogpile.cache.null - <No description provided>
3779#backend = dogpile.cache.null
3780
3781# Arguments supplied to the backend module. Specify this option once
3782# per argument to be passed to the dogpile.cache backend. Example
3783# format: "<argname>:<value>". (multi valued)
3784#backend_argument =
3785
3786# Proxy classes to import that will affect the way the dogpile.cache
3787# backend functions. See the dogpile.cache documentation on changing-
3788# backend-behavior. (list value)
3789#proxies =
3790
3791# Global toggle for caching. (boolean value)
3792#enabled = false
3793
3794# Extra debugging from the cache backend (cache keys,
3795# get/set/delete/etc calls). This is only really useful if you need to
3796# see the specific cache-backend get/set/delete calls with the
3797# keys/values. Typically this should be left set to false. (boolean
3798# value)
3799#debug_cache_backend = false
3800
3801# Memcache servers in the format of "host:port".
3802# (dogpile.cache.memcache and oslo_cache.memcache_pool backends only).
3803# (list value)
3804#memcache_servers = localhost:11211
3805
3806# Number of seconds memcached server is considered dead before it is
3807# tried again. (dogpile.cache.memcache and oslo_cache.memcache_pool
3808# backends only). (integer value)
3809#memcache_dead_retry = 300
3810
3811# Timeout in seconds for every call to a server.
3812# (dogpile.cache.memcache and oslo_cache.memcache_pool backends only).
3813# (integer value)
3814#memcache_socket_timeout = 3
3815
3816# Max total number of open connections to every memcached server.
3817# (oslo_cache.memcache_pool backend only). (integer value)
3818#memcache_pool_maxsize = 10
3819
3820# Number of seconds a connection to memcached is held unused in the
3821# pool before it is closed. (oslo_cache.memcache_pool backend only).
3822# (integer value)
3823#memcache_pool_unused_timeout = 60
3824
3825# Number of seconds that an operation will wait to get a memcache
3826# client connection. (integer value)
3827#memcache_pool_connection_get_timeout = 10
3828
3829
3830[cells]
3831#
3832# DEPRECATED: Cells options allow you to use cells v1 functionality in
3833# an
3834# OpenStack deployment.
3835#
3836# Note that the options in this group are only for cells v1
3837# functionality, which
3838# is considered experimental and not recommended for new deployments.
3839# Cells v1
3840# is being replaced with cells v2, which starting in the 15.0.0 Ocata
3841# release is
3842# required and all Nova deployments will be at least a cells v2 cell
3843# of one.
3844#
3845
3846#
3847# From nova.conf
3848#
3849
3850# DEPRECATED:
3851# Enable cell v1 functionality.
3852#
3853# Note that cells v1 is considered experimental and not recommended
3854# for new
3855# Nova deployments. Cells v1 is being replaced by cells v2 which
3856# starting in
3857# the 15.0.0 Ocata release, all Nova deployments are at least a cells
3858# v2 cell
3859# of one. Setting this option, or any other options in the [cells]
3860# group, is
3861# not required for cells v2.
3862#
3863# When this functionality is enabled, it lets you to scale an
3864# OpenStack
3865# Compute cloud in a more distributed fashion without having to use
3866# complicated technologies like database and message queue clustering.
3867# Cells are configured as a tree. The top-level cell should have a
3868# host
3869# that runs a nova-api service, but no nova-compute services. Each
3870# child cell should run all of the typical nova-* services in a
3871# regular
3872# Compute cloud except for nova-api. You can think of cells as a
3873# normal
3874# Compute deployment in that each cell has its own database server and
3875# message queue broker.
3876#
3877# Related options:
3878#
3879# * name: A unique cell name must be given when this functionality
3880# is enabled.
3881# * cell_type: Cell type should be defined for all cells.
3882# (boolean value)
3883# This option is deprecated for removal since 16.0.0.
3884# Its value may be silently ignored in the future.
3885# Reason: Cells v1 is being replaced with Cells v2.
Oleh Hryhorov597af682018-11-07 15:08:13 +00003886# enable = false
Oleh Hryhorov5d0f13a2018-05-21 17:02:54 +00003887
3888# DEPRECATED:
3889# Name of the current cell.
3890#
3891# This value must be unique for each cell. Name of a cell is used as
3892# its id, leaving this option unset or setting the same name for
3893# two or more cells may cause unexpected behaviour.
3894#
3895# Related options:
3896#
3897# * enabled: This option is meaningful only when cells service
3898# is enabled
3899# (string value)
3900# This option is deprecated for removal since 16.0.0.
3901# Its value may be silently ignored in the future.
3902# Reason: Cells v1 is being replaced with Cells v2.
3903#name = nova
3904
3905# DEPRECATED:
3906# Cell capabilities.
3907#
3908# List of arbitrary key=value pairs defining capabilities of the
3909# current cell to be sent to the parent cells. These capabilities
3910# are intended to be used in cells scheduler filters/weighers.
3911#
3912# Possible values:
3913#
3914# * key=value pairs list for example;
3915# ``hypervisor=xenserver;kvm,os=linux;windows``
3916# (list value)
3917# This option is deprecated for removal since 16.0.0.
3918# Its value may be silently ignored in the future.
3919# Reason: Cells v1 is being replaced with Cells v2.
3920#capabilities = hypervisor=xenserver;kvm,os=linux;windows
3921
3922# DEPRECATED:
3923# Call timeout.
3924#
3925# Cell messaging module waits for response(s) to be put into the
3926# eventlet queue. This option defines the seconds waited for
3927# response from a call to a cell.
3928#
3929# Possible values:
3930#
3931# * An integer, corresponding to the interval time in seconds.
3932# (integer value)
3933# Minimum value: 0
3934# This option is deprecated for removal since 16.0.0.
3935# Its value may be silently ignored in the future.
3936# Reason: Cells v1 is being replaced with Cells v2.
3937#call_timeout = 60
3938
3939# DEPRECATED:
3940# Reserve percentage
3941#
3942# Percentage of cell capacity to hold in reserve, so the minimum
3943# amount of free resource is considered to be;
3944#
3945# min_free = total * (reserve_percent / 100.0)
3946#
3947# This option affects both memory and disk utilization.
3948#
3949# The primary purpose of this reserve is to ensure some space is
3950# available for users who want to resize their instance to be larger.
3951# Note that currently once the capacity expands into this reserve
3952# space this option is ignored.
3953#
3954# Possible values:
3955#
3956# * An integer or float, corresponding to the percentage of cell
3957# capacity to
3958# be held in reserve.
3959# (floating point value)
3960# This option is deprecated for removal since 16.0.0.
3961# Its value may be silently ignored in the future.
3962# Reason: Cells v1 is being replaced with Cells v2.
3963#reserve_percent = 10.0
3964
3965# DEPRECATED:
3966# Type of cell.
3967#
3968# When cells feature is enabled the hosts in the OpenStack Compute
3969# cloud are partitioned into groups. Cells are configured as a tree.
3970# The top-level cell's cell_type must be set to ``api``. All other
3971# cells are defined as a ``compute cell`` by default.
3972#
3973# Related option:
3974#
3975# * quota_driver: Disable quota checking for the child cells.
3976# (nova.quota.NoopQuotaDriver)
3977# (string value)
3978# Possible values:
3979# api - <No description provided>
3980# compute - <No description provided>
3981# This option is deprecated for removal since 16.0.0.
3982# Its value may be silently ignored in the future.
3983# Reason: Cells v1 is being replaced with Cells v2.
3984#cell_type = compute
3985
3986# DEPRECATED:
3987# Mute child interval.
3988#
3989# Number of seconds after which a lack of capability and capacity
3990# update the child cell is to be treated as a mute cell. Then the
3991# child cell will be weighed as recommend highly that it be skipped.
3992#
3993# Possible values:
3994#
3995# * An integer, corresponding to the interval time in seconds.
3996# (integer value)
3997# This option is deprecated for removal since 16.0.0.
3998# Its value may be silently ignored in the future.
3999# Reason: Cells v1 is being replaced with Cells v2.
4000#mute_child_interval = 300
4001
4002# DEPRECATED:
4003# Bandwidth update interval.
4004#
4005# Seconds between bandwidth usage cache updates for cells.
4006#
4007# Possible values:
4008#
4009# * An integer, corresponding to the interval time in seconds.
4010# (integer value)
4011# This option is deprecated for removal since 16.0.0.
4012# Its value may be silently ignored in the future.
4013# Reason: Cells v1 is being replaced with Cells v2.
4014#bandwidth_update_interval = 600
4015
4016# DEPRECATED:
4017# Instance update sync database limit.
4018#
4019# Number of instances to pull from the database at one time for
4020# a sync. If there are more instances to update the results will
4021# be paged through.
4022#
4023# Possible values:
4024#
4025# * An integer, corresponding to a number of instances.
4026# (integer value)
4027# This option is deprecated for removal since 16.0.0.
4028# Its value may be silently ignored in the future.
4029# Reason: Cells v1 is being replaced with Cells v2.
4030#instance_update_sync_database_limit = 100
4031
4032# DEPRECATED:
4033# Mute weight multiplier.
4034#
4035# Multiplier used to weigh mute children. Mute children cells are
4036# recommended to be skipped so their weight is multiplied by this
4037# negative value.
4038#
4039# Possible values:
4040#
4041# * Negative numeric number
4042# (floating point value)
4043# This option is deprecated for removal since 16.0.0.
4044# Its value may be silently ignored in the future.
4045# Reason: Cells v1 is being replaced with Cells v2.
4046#mute_weight_multiplier = -10000.0
4047
4048# DEPRECATED:
4049# Ram weight multiplier.
4050#
4051# Multiplier used for weighing ram. Negative numbers indicate that
4052# Compute should stack VMs on one host instead of spreading out new
4053# VMs to more hosts in the cell.
4054#
4055# Possible values:
4056#
4057# * Numeric multiplier
4058# (floating point value)
4059# This option is deprecated for removal since 16.0.0.
4060# Its value may be silently ignored in the future.
4061# Reason: Cells v1 is being replaced with Cells v2.
4062#ram_weight_multiplier = 10.0
4063
4064# DEPRECATED:
4065# Offset weight multiplier
4066#
4067# Multiplier used to weigh offset weigher. Cells with higher
4068# weight_offsets in the DB will be preferred. The weight_offset
4069# is a property of a cell stored in the database. It can be used
4070# by a deployer to have scheduling decisions favor or disfavor
4071# cells based on the setting.
4072#
4073# Possible values:
4074#
4075# * Numeric multiplier
4076# (floating point value)
4077# This option is deprecated for removal since 16.0.0.
4078# Its value may be silently ignored in the future.
4079# Reason: Cells v1 is being replaced with Cells v2.
4080#offset_weight_multiplier = 1.0
4081
4082# DEPRECATED:
4083# Instance updated at threshold
4084#
4085# Number of seconds after an instance was updated or deleted to
4086# continue to update cells. This option lets cells manager to only
4087# attempt to sync instances that have been updated recently.
4088# i.e., a threshold of 3600 means to only update instances that
4089# have modified in the last hour.
4090#
4091# Possible values:
4092#
4093# * Threshold in seconds
4094#
4095# Related options:
4096#
4097# * This value is used with the ``instance_update_num_instances``
4098# value in a periodic task run.
4099# (integer value)
4100# This option is deprecated for removal since 16.0.0.
4101# Its value may be silently ignored in the future.
4102# Reason: Cells v1 is being replaced with Cells v2.
4103#instance_updated_at_threshold = 3600
4104
4105# DEPRECATED:
4106# Instance update num instances
4107#
4108# On every run of the periodic task, nova cells manager will attempt
4109# to
4110# sync instance_updated_at_threshold number of instances. When the
4111# manager gets the list of instances, it shuffles them so that
4112# multiple
4113# nova-cells services do not attempt to sync the same instances in
4114# lockstep.
4115#
4116# Possible values:
4117#
4118# * Positive integer number
4119#
4120# Related options:
4121#
4122# * This value is used with the ``instance_updated_at_threshold``
4123# value in a periodic task run.
4124# (integer value)
4125# This option is deprecated for removal since 16.0.0.
4126# Its value may be silently ignored in the future.
4127# Reason: Cells v1 is being replaced with Cells v2.
4128#instance_update_num_instances = 1
4129
4130# DEPRECATED:
4131# Maximum hop count
4132#
4133# When processing a targeted message, if the local cell is not the
4134# target, a route is defined between neighbouring cells. And the
4135# message is processed across the whole routing path. This option
4136# defines the maximum hop counts until reaching the target.
4137#
4138# Possible values:
4139#
4140# * Positive integer value
4141# (integer value)
4142# This option is deprecated for removal since 16.0.0.
4143# Its value may be silently ignored in the future.
4144# Reason: Cells v1 is being replaced with Cells v2.
4145#max_hop_count = 10
4146
4147# DEPRECATED:
4148# Cells scheduler.
4149#
4150# The class of the driver used by the cells scheduler. This should be
4151# the full Python path to the class to be used. If nothing is
4152# specified
4153# in this option, the CellsScheduler is used.
4154# (string value)
4155# This option is deprecated for removal since 16.0.0.
4156# Its value may be silently ignored in the future.
4157# Reason: Cells v1 is being replaced with Cells v2.
4158#scheduler = nova.cells.scheduler.CellsScheduler
4159
4160# DEPRECATED:
4161# RPC driver queue base.
4162#
4163# When sending a message to another cell by JSON-ifying the message
4164# and making an RPC cast to 'process_message', a base queue is used.
4165# This option defines the base queue name to be used when
4166# communicating
4167# between cells. Various topics by message type will be appended to
4168# this.
4169#
4170# Possible values:
4171#
4172# * The base queue name to be used when communicating between cells.
4173# (string value)
4174# This option is deprecated for removal since 16.0.0.
4175# Its value may be silently ignored in the future.
4176# Reason: Cells v1 is being replaced with Cells v2.
4177#rpc_driver_queue_base = cells.intercell
4178
4179# DEPRECATED:
4180# Scheduler filter classes.
4181#
4182# Filter classes the cells scheduler should use. An entry of
4183# "nova.cells.filters.all_filters" maps to all cells filters
4184# included with nova. As of the Mitaka release the following
4185# filter classes are available:
4186#
4187# Different cell filter: A scheduler hint of 'different_cell'
4188# with a value of a full cell name may be specified to route
4189# a build away from a particular cell.
4190#
4191# Image properties filter: Image metadata named
4192# 'hypervisor_version_requires' with a version specification
4193# may be specified to ensure the build goes to a cell which
4194# has hypervisors of the required version. If either the version
4195# requirement on the image or the hypervisor capability of the
4196# cell is not present, this filter returns without filtering out
4197# the cells.
4198#
4199# Target cell filter: A scheduler hint of 'target_cell' with a
4200# value of a full cell name may be specified to route a build to
4201# a particular cell. No error handling is done as there's no way
4202# to know whether the full path is a valid.
4203#
4204# As an admin user, you can also add a filter that directs builds
4205# to a particular cell.
4206#
4207# (list value)
4208# This option is deprecated for removal since 16.0.0.
4209# Its value may be silently ignored in the future.
4210# Reason: Cells v1 is being replaced with Cells v2.
4211#scheduler_filter_classes = nova.cells.filters.all_filters
4212
4213# DEPRECATED:
4214# Scheduler weight classes.
4215#
4216# Weigher classes the cells scheduler should use. An entry of
4217# "nova.cells.weights.all_weighers" maps to all cell weighers
4218# included with nova. As of the Mitaka release the following
4219# weight classes are available:
4220#
4221# mute_child: Downgrades the likelihood of child cells being
4222# chosen for scheduling requests, which haven't sent capacity
4223# or capability updates in a while. Options include
4224# mute_weight_multiplier (multiplier for mute children; value
4225# should be negative).
4226#
4227# ram_by_instance_type: Select cells with the most RAM capacity
4228# for the instance type being requested. Because higher weights
4229# win, Compute returns the number of available units for the
4230# instance type requested. The ram_weight_multiplier option defaults
4231# to 10.0 that adds to the weight by a factor of 10. Use a negative
4232# number to stack VMs on one host instead of spreading out new VMs
4233# to more hosts in the cell.
4234#
4235# weight_offset: Allows modifying the database to weight a particular
4236# cell. The highest weight will be the first cell to be scheduled for
4237# launching an instance. When the weight_offset of a cell is set to 0,
4238# it is unlikely to be picked but it could be picked if other cells
4239# have a lower weight, like if they're full. And when the
4240# weight_offset
4241# is set to a very high value (for example, '999999999999999'), it is
4242# likely to be picked if another cell do not have a higher weight.
4243# (list value)
4244# This option is deprecated for removal since 16.0.0.
4245# Its value may be silently ignored in the future.
4246# Reason: Cells v1 is being replaced with Cells v2.
4247#scheduler_weight_classes = nova.cells.weights.all_weighers
4248
4249# DEPRECATED:
4250# Scheduler retries.
4251#
4252# How many retries when no cells are available. Specifies how many
4253# times the scheduler tries to launch a new instance when no cells
4254# are available.
4255#
4256# Possible values:
4257#
4258# * Positive integer value
4259#
4260# Related options:
4261#
4262# * This value is used with the ``scheduler_retry_delay`` value
4263# while retrying to find a suitable cell.
4264# (integer value)
4265# This option is deprecated for removal since 16.0.0.
4266# Its value may be silently ignored in the future.
4267# Reason: Cells v1 is being replaced with Cells v2.
4268#scheduler_retries = 10
4269
4270# DEPRECATED:
4271# Scheduler retry delay.
4272#
4273# Specifies the delay (in seconds) between scheduling retries when no
4274# cell can be found to place the new instance on. When the instance
4275# could not be scheduled to a cell after ``scheduler_retries`` in
4276# combination with ``scheduler_retry_delay``, then the scheduling
4277# of the instance failed.
4278#
4279# Possible values:
4280#
4281# * Time in seconds.
4282#
4283# Related options:
4284#
4285# * This value is used with the ``scheduler_retries`` value
4286# while retrying to find a suitable cell.
4287# (integer value)
4288# This option is deprecated for removal since 16.0.0.
4289# Its value may be silently ignored in the future.
4290# Reason: Cells v1 is being replaced with Cells v2.
4291#scheduler_retry_delay = 2
4292
4293# DEPRECATED:
4294# DB check interval.
4295#
4296# Cell state manager updates cell status for all cells from the DB
4297# only after this particular interval time is passed. Otherwise cached
4298# status are used. If this value is 0 or negative all cell status are
4299# updated from the DB whenever a state is needed.
4300#
4301# Possible values:
4302#
4303# * Interval time, in seconds.
4304#
4305# (integer value)
4306# This option is deprecated for removal since 16.0.0.
4307# Its value may be silently ignored in the future.
4308# Reason: Cells v1 is being replaced with Cells v2.
4309#db_check_interval = 60
4310
4311# DEPRECATED:
4312# Optional cells configuration.
4313#
4314# Configuration file from which to read cells configuration. If given,
4315# overrides reading cells from the database.
4316#
4317# Cells store all inter-cell communication data, including user names
4318# and passwords, in the database. Because the cells data is not
4319# updated
4320# very frequently, use this option to specify a JSON file to store
4321# cells data. With this configuration, the database is no longer
4322# consulted when reloading the cells data. The file must have columns
4323# present in the Cell model (excluding common database fields and the
4324# id column). You must specify the queue connection information
4325# through
4326# a transport_url field, instead of username, password, and so on.
4327#
4328# The transport_url has the following form:
4329# rabbit://USERNAME:PASSWORD@HOSTNAME:PORT/VIRTUAL_HOST
4330#
4331# Possible values:
4332#
4333# The scheme can be either qpid or rabbit, the following sample shows
4334# this optional configuration:
4335#
4336# {
4337# "parent": {
4338# "name": "parent",
4339# "api_url": "http://api.example.com:8774",
4340# "transport_url": "rabbit://rabbit.example.com",
4341# "weight_offset": 0.0,
4342# "weight_scale": 1.0,
4343# "is_parent": true
4344# },
4345# "cell1": {
4346# "name": "cell1",
4347# "api_url": "http://api.example.com:8774",
4348# "transport_url": "rabbit://rabbit1.example.com",
4349# "weight_offset": 0.0,
4350# "weight_scale": 1.0,
4351# "is_parent": false
4352# },
4353# "cell2": {
4354# "name": "cell2",
4355# "api_url": "http://api.example.com:8774",
4356# "transport_url": "rabbit://rabbit2.example.com",
4357# "weight_offset": 0.0,
4358# "weight_scale": 1.0,
4359# "is_parent": false
4360# }
4361# }
4362#
4363# (string value)
4364# This option is deprecated for removal since 16.0.0.
4365# Its value may be silently ignored in the future.
4366# Reason: Cells v1 is being replaced with Cells v2.
4367#cells_config = <None>
4368
4369
4370[cinder]
4371
4372#
4373# From nova.conf
4374#
Oleh Hryhorov5d0f13a2018-05-21 17:02:54 +00004375
4376#
4377# Info to match when looking for cinder in the service catalog.
4378#
4379# Possible values:
4380#
4381# * Format is separated values of the form:
4382# <service_type>:<service_name>:<endpoint_type>
4383#
4384# Note: Nova does not support the Cinder v2 API since the Nova 17.0.0
4385# Queens
4386# release.
4387#
4388# Related options:
4389#
4390# * endpoint_template - Setting this option will override catalog_info
4391# (string value)
4392#catalog_info = volumev3:cinderv3:publicURL
Michael Polenchuk4c55a202018-06-15 15:13:28 +04004393catalog_info = volumev3:cinderv3:internalURL
Oleh Hryhorov5d0f13a2018-05-21 17:02:54 +00004394
4395#
4396# If this option is set then it will override service catalog lookup
4397# with
4398# this template for cinder endpoint
4399#
4400# Possible values:
4401#
4402# * URL for cinder endpoint API
4403# e.g. http://localhost:8776/v3/%(project_id)s
4404#
4405# Note: Nova does not support the Cinder v2 API since the Nova 17.0.0
4406# Queens
4407# release.
4408#
4409# Related options:
4410#
4411# * catalog_info - If endpoint_template is not set, catalog_info will
4412# be used.
4413# (string value)
4414#endpoint_template = <None>
4415
Oleh Hryhorov5d0f13a2018-05-21 17:02:54 +00004416# * Any string representing region name
4417# (string value)
4418#os_region_name = <None>
4419os_region_name = {{ controller.identity.region }}
4420
4421#
4422# Number of times cinderclient should retry on any failed http call.
4423# 0 means connection is attempted only once. Setting it to any
4424# positive integer
4425# means that on failure connection is retried that many times e.g.
4426# setting it
4427# to 3 means total attempts to connect will be 4.
4428#
4429# Possible values:
4430#
4431# * Any integer value. 0 means connection is attempted only once
4432# (integer value)
4433# Minimum value: 0
4434#http_retries = 3
4435
4436#
4437# Allow attach between instance and volume in different availability
4438# zones.
4439#
4440# If False, volumes attached to an instance must be in the same
4441# availability
4442# zone in Cinder as the instance availability zone in Nova.
4443# This also means care should be taken when booting an instance from a
4444# volume
4445# where source is not "volume" because Nova will attempt to create a
4446# volume using
4447# the same availability zone as what is assigned to the instance.
4448# If that AZ is not in Cinder (or
4449# allow_availability_zone_fallback=False in
4450# cinder.conf), the volume create request will fail and the instance
4451# will fail
4452# the build request.
4453# By default there is no availability zone restriction on volume
4454# attach.
4455# (boolean value)
4456#cross_az_attach = true
4457{%- if controller.cross_az_attach is defined %}
4458cross_az_attach={{ controller.cross_az_attach }}
4459{%- endif %}
4460
Vasyl Saienkob6066be2018-05-25 15:41:55 +03004461{%- set _data = controller.get('cinder', controller.get('identity', {})) %}
Mykyta Karpin7ce6f692018-07-09 12:58:21 +03004462{%- if 'cacert_file' not in _data.keys() %}{% do _data.update({'cacert_file': controller.cacert_file}) %}{% endif %}
Vasyl Saienkob6066be2018-05-25 15:41:55 +03004463{%- set auth_type = _data.get('auth_type', 'password') %}
4464{%- include "oslo_templates/files/queens/keystoneauth/_type_" + auth_type + ".conf" %}
Oleh Hryhorov5d0f13a2018-05-21 17:02:54 +00004465
4466[conductor]
4467#
4468# Options under this group are used to define Conductor's
4469# communication,
4470# which manager should be act as a proxy between computes and
4471# database,
4472# and finally, how many worker processes will be used.
4473
4474#
4475# From nova.conf
4476#
4477
4478# DEPRECATED:
4479# Topic exchange name on which conductor nodes listen.
4480# (string value)
4481# This option is deprecated for removal since 15.0.0.
4482# Its value may be silently ignored in the future.
4483# Reason:
4484# There is no need to let users choose the RPC topic for all services
4485# - there
4486# is little gain from this. Furthermore, it makes it really easy to
4487# break Nova
4488# by using this option.
4489#topic = conductor
4490
4491#
4492# Number of workers for OpenStack Conductor service. The default will
4493# be the
4494# number of CPUs available.
4495# (integer value)
Vasyl Saienko09b6ac32019-01-17 15:23:58 +02004496
4497workers = {{ controller.get('conductor', {}).get('workers', controller.workers) }}
Oleh Hryhorov5d0f13a2018-05-21 17:02:54 +00004498
4499
4500[console]
4501#
4502# Options under this group allow to tune the configuration of the
4503# console proxy
4504# service.
4505#
4506# Note: in configuration of every compute is a ``console_host``
4507# option,
4508# which allows to select the console proxy service to connect to.
4509
4510#
4511# From nova.conf
4512#
4513
4514#
4515# Adds list of allowed origins to the console websocket proxy to allow
4516# connections from other origin hostnames.
4517# Websocket proxy matches the host header with the origin header to
4518# prevent cross-site requests. This list specifies if any there are
4519# values other than host are allowed in the origin header.
4520#
4521# Possible values:
4522#
4523# * A list where each element is an allowed origin hostnames, else an
4524# empty list
4525# (list value)
4526# Deprecated group/name - [DEFAULT]/console_allowed_origins
4527#allowed_origins =
4528
4529
4530[consoleauth]
4531
4532#
4533# From nova.conf
4534#
4535
4536#
4537# The lifetime of a console auth token (in seconds).
4538#
4539# A console auth token is used in authorizing console access for a
4540# user.
4541# Once the auth token time to live count has elapsed, the token is
4542# considered expired. Expired tokens are then deleted.
4543# (integer value)
4544# Minimum value: 0
4545# Deprecated group/name - [DEFAULT]/console_token_ttl
4546#token_ttl = 600
Gleb Galkin32a54092018-10-02 17:16:40 +03004547{% if controller.consoleauth_token_ttl is defined %}
sgarbuzcc02c7f2018-10-25 14:29:30 +03004548{%- set token_ttl = controller.consoleauth_token_ttl %}
4549token_ttl = {{ token_ttl }}
4550{%- elif controller.get('consoleauth', {}).token_ttl is defined %}
4551token_ttl = {{ controller.consoleauth.token_ttl }}
Gleb Galkin32a54092018-10-02 17:16:40 +03004552{% endif %}
Oleh Hryhorov5d0f13a2018-05-21 17:02:54 +00004553
4554[cors]
4555{%- if controller.cors is defined %}
4556{%- set _data = controller.cors %}
4557{%- include "oslo_templates/files/queens/oslo/_cors.conf" %}
4558{%- endif %}
4559
4560[crypto]
4561
4562#
4563# From nova.conf
4564#
4565
4566#
4567# Filename of root CA (Certificate Authority). This is a container
4568# format
4569# and includes root certificates.
4570#
4571# Possible values:
4572#
4573# * Any file name containing root CA, cacert.pem is default
4574#
4575# Related options:
4576#
4577# * ca_path
4578# (string value)
4579#ca_file = cacert.pem
4580
4581#
4582# Filename of a private key.
4583#
4584# Related options:
4585#
4586# * keys_path
4587# (string value)
4588#key_file = private/cakey.pem
4589
4590#
4591# Filename of root Certificate Revocation List (CRL). This is a list
4592# of
4593# certificates that have been revoked, and therefore, entities
4594# presenting
4595# those (revoked) certificates should no longer be trusted.
4596#
4597# Related options:
4598#
4599# * ca_path
4600# (string value)
4601#crl_file = crl.pem
4602
4603#
4604# Directory path where keys are located.
4605#
4606# Related options:
4607#
4608# * key_file
4609# (string value)
4610#keys_path = $state_path/keys
4611
4612#
4613# Directory path where root CA is located.
4614#
4615# Related options:
4616#
4617# * ca_file
4618# (string value)
4619#ca_path = $state_path/CA
4620
4621# Option to enable/disable use of CA for each project. (boolean value)
4622#use_project_ca = false
4623
4624#
4625# Subject for certificate for users, %s for
4626# project, user, timestamp
4627# (string value)
4628#user_cert_subject = /C=US/ST=California/O=OpenStack/OU=NovaDev/CN=%.16s-%.16s-%s
4629
4630#
4631# Subject for certificate for projects, %s for
4632# project, timestamp
4633# (string value)
4634#project_cert_subject = /C=US/ST=California/O=OpenStack/OU=NovaDev/CN=project-ca-%.16s-%s
4635
4636
4637[devices]
4638
4639#
4640# From nova.conf
4641#
4642
4643#
4644# A list of the vGPU types enabled in the compute node.
4645#
4646# Some pGPUs (e.g. NVIDIA GRID K1) support different vGPU types. User
4647# can use
4648# this option to specify a list of enabled vGPU types that may be
4649# assigned to a
4650# guest instance. But please note that Nova only supports a single
4651# type in the
4652# Queens release. If more than one vGPU type is specified (as a comma-
4653# separated
4654# list), only the first one will be used. An example is as the
4655# following:
4656# [devices]
4657# enabled_vgpu_types = GRID K100,Intel GVT-g,MxGPU.2,nvidia-11
4658# (list value)
4659#enabled_vgpu_types =
4660
4661
4662[ephemeral_storage_encryption]
4663
4664#
4665# From nova.conf
4666#
4667
4668#
4669# Enables/disables LVM ephemeral storage encryption.
4670# (boolean value)
4671#enabled = false
4672
4673#
4674# Cipher-mode string to be used.
4675#
4676# The cipher and mode to be used to encrypt ephemeral storage. The set
4677# of
4678# cipher-mode combinations available depends on kernel support.
4679# According
4680# to the dm-crypt documentation, the cipher is expected to be in the
4681# format:
4682# "<cipher>-<chainmode>-<ivmode>".
4683#
4684# Possible values:
4685#
4686# * Any crypto option listed in ``/proc/crypto``.
4687# (string value)
4688#cipher = aes-xts-plain64
4689
4690#
4691# Encryption key length in bits.
4692#
4693# The bit length of the encryption key to be used to encrypt ephemeral
4694# storage.
4695# In XTS mode only half of the bits are used for encryption key.
4696# (integer value)
4697# Minimum value: 1
4698#key_size = 512
4699
4700
4701[filter_scheduler]
4702
4703#
4704# From nova.conf
4705#
4706
4707#
4708# Size of subset of best hosts selected by scheduler.
4709#
4710# New instances will be scheduled on a host chosen randomly from a
4711# subset of the
4712# N best hosts, where N is the value set by this option.
4713#
4714# Setting this to a value greater than 1 will reduce the chance that
4715# multiple
4716# scheduler processes handling similar requests will select the same
4717# host,
4718# creating a potential race condition. By selecting a host randomly
4719# from the N
4720# hosts that best fit the request, the chance of a conflict is
4721# reduced. However,
4722# the higher you set this value, the less optimal the chosen host may
4723# be for a
4724# given request.
4725#
4726# This option is only used by the FilterScheduler and its subclasses;
4727# if you use
4728# a different scheduler, this option has no effect.
4729#
4730# Possible values:
4731#
4732# * An integer, where the integer corresponds to the size of a host
4733# subset. Any
4734# integer is valid, although any value less than 1 will be treated
4735# as 1
4736# (integer value)
4737# Minimum value: 1
4738# Deprecated group/name - [DEFAULT]/scheduler_host_subset_size
4739host_subset_size = 30
4740
4741#
4742# The number of instances that can be actively performing IO on a
4743# host.
4744#
4745# Instances performing IO includes those in the following states:
4746# build, resize,
4747# snapshot, migrate, rescue, unshelve.
4748#
4749# This option is only used by the FilterScheduler and its subclasses;
4750# if you use
4751# a different scheduler, this option has no effect. Also note that
4752# this setting
4753# only affects scheduling if the 'io_ops_filter' filter is enabled.
4754#
4755# Possible values:
4756#
4757# * An integer, where the integer corresponds to the max number of
4758# instances
4759# that can be actively performing IO on any given host.
4760# (integer value)
4761max_io_ops_per_host = 8
4762
4763#
4764# Maximum number of instances that be active on a host.
4765#
4766# If you need to limit the number of instances on any given host, set
4767# this option
4768# to the maximum number of instances you want to allow. The
4769# num_instances_filter
4770# will reject any host that has at least as many instances as this
4771# option's
4772# value.
4773#
4774# This option is only used by the FilterScheduler and its subclasses;
4775# if you use
4776# a different scheduler, this option has no effect. Also note that
4777# this setting
4778# only affects scheduling if the 'num_instances_filter' filter is
4779# enabled.
4780#
4781# Possible values:
4782#
4783# * An integer, where the integer corresponds to the max instances
4784# that can be
4785# scheduled on a host.
4786# (integer value)
4787# Minimum value: 1
4788max_instances_per_host = 50
4789
4790#
4791# Enable querying of individual hosts for instance information.
4792#
4793# The scheduler may need information about the instances on a host in
4794# order to
4795# evaluate its filters and weighers. The most common need for this
4796# information is
4797# for the (anti-)affinity filters, which need to choose a host based
4798# on the
4799# instances already running on a host.
4800#
4801# If the configured filters and weighers do not need this information,
4802# disabling
4803# this option will improve performance. It may also be disabled when
4804# the tracking
4805# overhead proves too heavy, although this will cause classes
4806# requiring host
4807# usage data to query the database on each request instead.
4808#
4809# This option is only used by the FilterScheduler and its subclasses;
4810# if you use
4811# a different scheduler, this option has no effect.
4812#
4813# NOTE: In a multi-cell (v2) setup where the cell MQ is separated from
4814# the
4815# top-level, computes cannot directly communicate with the scheduler.
4816# Thus,
4817# this option cannot be enabled in that scenario. See also the
4818# [workarounds]/disable_group_policy_check_upcall option.
4819# (boolean value)
4820# Deprecated group/name - [DEFAULT]/scheduler_tracks_instance_changes
4821#track_instance_changes = true
4822
4823#
4824# Filters that the scheduler can use.
4825#
4826# An unordered list of the filter classes the nova scheduler may
4827# apply. Only the
4828# filters specified in the 'enabled_filters' option will be used, but
4829# any filter appearing in that option must also be included in this
4830# list.
4831#
4832# By default, this is set to all filters that are included with nova.
4833#
4834# This option is only used by the FilterScheduler and its subclasses;
4835# if you use
4836# a different scheduler, this option has no effect.
4837#
4838# Possible values:
4839#
4840# * A list of zero or more strings, where each string corresponds to
4841# the name of
4842# a filter that may be used for selecting a host
4843#
4844# Related options:
4845#
4846# * enabled_filters
4847# (multi valued)
4848# Deprecated group/name - [DEFAULT]/scheduler_available_filters
4849#available_filters = nova.scheduler.filters.all_filters
4850available_filters=nova.scheduler.filters.all_filters
4851available_filters=nova.scheduler.filters.pci_passthrough_filter.PciPassthroughFilter
4852{% for filter in controller.get('scheduler_custom_filters', []) %}
4853scheduler_available_filters = {{ filter }}
4854{% endfor %}
4855
4856#
4857# Filters that the scheduler will use.
4858#
4859# An ordered list of filter class names that will be used for
4860# filtering
4861# hosts. These filters will be applied in the order they are listed so
4862# place your most restrictive filters first to make the filtering
4863# process more
4864# efficient.
4865#
4866# This option is only used by the FilterScheduler and its subclasses;
4867# if you use
4868# a different scheduler, this option has no effect.
4869#
4870# Possible values:
4871#
4872# * A list of zero or more strings, where each string corresponds to
4873# the name of
4874# a filter to be used for selecting a host
4875#
4876# Related options:
4877#
4878# * All of the filters in this option *must* be present in the
4879# 'scheduler_available_filters' option, or a
4880# SchedulerHostFilterNotFound
4881# exception will be raised.
4882# (list value)
4883# Deprecated group/name - [DEFAULT]/scheduler_default_filters
4884#enabled_filters = RetryFilter,AvailabilityZoneFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,ServerGroupAntiAffinityFilter,ServerGroupAffinityFilter
Michael Polenchuk2bce2cb2018-09-17 16:05:43 +04004885{%- set _enabled_filters = controller.scheduler_default_filters if controller.scheduler_default_filters is string else controller.scheduler_default_filters|unique|sort|join(',') %}
4886enabled_filters = {{ _enabled_filters }}
Oleh Hryhorov5d0f13a2018-05-21 17:02:54 +00004887
4888# DEPRECATED:
4889# Filters used for filtering baremetal hosts.
4890#
4891# Filters are applied in order, so place your most restrictive filters
4892# first to
4893# make the filtering process more efficient.
4894#
4895# This option is only used by the FilterScheduler and its subclasses;
4896# if you use
4897# a different scheduler, this option has no effect.
4898#
4899# Possible values:
4900#
4901# * A list of zero or more strings, where each string corresponds to
4902# the name of
4903# a filter to be used for selecting a baremetal host
4904#
4905# Related options:
4906#
4907# * If the 'scheduler_use_baremetal_filters' option is False, this
4908# option has
4909# no effect.
4910# (list value)
4911# Deprecated group/name - [DEFAULT]/baremetal_scheduler_default_filters
4912# This option is deprecated for removal.
4913# Its value may be silently ignored in the future.
4914# Reason:
4915# These filters were used to overcome some of the baremetal scheduling
4916# limitations in Nova prior to the use of the Placement API. Now
4917# scheduling will
4918# use the custom resource class defined for each baremetal node to
4919# make its
4920# selection.
4921#baremetal_enabled_filters = RetryFilter,AvailabilityZoneFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,ExactRamFilter,ExactDiskFilter,ExactCoreFilter
4922
4923# DEPRECATED:
4924# Enable baremetal filters.
4925#
4926# Set this to True to tell the nova scheduler that it should use the
4927# filters
4928# specified in the 'baremetal_enabled_filters' option. If you are not
4929# scheduling baremetal nodes, leave this at the default setting of
4930# False.
4931#
4932# This option is only used by the FilterScheduler and its subclasses;
4933# if you use
4934# a different scheduler, this option has no effect.
4935#
4936# Related options:
4937#
4938# * If this option is set to True, then the filters specified in the
4939# 'baremetal_enabled_filters' are used instead of the filters
4940# specified in 'enabled_filters'.
4941# (boolean value)
4942# Deprecated group/name - [DEFAULT]/scheduler_use_baremetal_filters
4943# This option is deprecated for removal.
4944# Its value may be silently ignored in the future.
4945# Reason:
4946# These filters were used to overcome some of the baremetal scheduling
4947# limitations in Nova prior to the use of the Placement API. Now
4948# scheduling will
4949# use the custom resource class defined for each baremetal node to
4950# make its
4951# selection.
4952#use_baremetal_filters = false
4953
4954#
4955# Weighers that the scheduler will use.
4956#
4957# Only hosts which pass the filters are weighed. The weight for any
4958# host starts
4959# at 0, and the weighers order these hosts by adding to or subtracting
4960# from the
4961# weight assigned by the previous weigher. Weights may become
4962# negative. An
4963# instance will be scheduled to one of the N most-weighted hosts,
4964# where N is
4965# 'scheduler_host_subset_size'.
4966#
4967# By default, this is set to all weighers that are included with Nova.
4968#
4969# This option is only used by the FilterScheduler and its subclasses;
4970# if you use
4971# a different scheduler, this option has no effect.
4972#
4973# Possible values:
4974#
4975# * A list of zero or more strings, where each string corresponds to
4976# the name of
4977# a weigher that will be used for selecting a host
4978# (list value)
4979# Deprecated group/name - [DEFAULT]/scheduler_weight_classes
4980#weight_classes = nova.scheduler.weights.all_weighers
4981
4982#
4983# Ram weight multipler ratio.
4984#
4985# This option determines how hosts with more or less available RAM are
4986# weighed. A
4987# positive value will result in the scheduler preferring hosts with
4988# more
4989# available RAM, and a negative number will result in the scheduler
4990# preferring
4991# hosts with less available RAM. Another way to look at it is that
4992# positive
4993# values for this option will tend to spread instances across many
4994# hosts, while
4995# negative values will tend to fill up (stack) hosts as much as
4996# possible before
4997# scheduling to a less-used host. The absolute value, whether positive
4998# or
4999# negative, controls how strong the RAM weigher is relative to other
5000# weighers.
5001#
5002# This option is only used by the FilterScheduler and its subclasses;
5003# if you use
5004# a different scheduler, this option has no effect. Also note that
5005# this setting
5006# only affects scheduling if the 'ram' weigher is enabled.
5007#
5008# Possible values:
5009#
5010# * An integer or float value, where the value corresponds to the
5011# multipler
5012# ratio for this weigher.
5013# (floating point value)
5014#ram_weight_multiplier = 1.0
5015
5016#
5017# Disk weight multipler ratio.
5018#
5019# Multiplier used for weighing free disk space. Negative numbers mean
5020# to
5021# stack vs spread.
5022#
5023# This option is only used by the FilterScheduler and its subclasses;
5024# if you use
5025# a different scheduler, this option has no effect. Also note that
5026# this setting
5027# only affects scheduling if the 'disk' weigher is enabled.
5028#
5029# Possible values:
5030#
5031# * An integer or float value, where the value corresponds to the
5032# multipler
5033# ratio for this weigher.
5034# (floating point value)
5035#disk_weight_multiplier = 1.0
5036
5037#
5038# IO operations weight multipler ratio.
5039#
5040# This option determines how hosts with differing workloads are
5041# weighed. Negative
5042# values, such as the default, will result in the scheduler preferring
5043# hosts with
5044# lighter workloads whereas positive values will prefer hosts with
5045# heavier
5046# workloads. Another way to look at it is that positive values for
5047# this option
5048# will tend to schedule instances onto hosts that are already busy,
5049# while
5050# negative values will tend to distribute the workload across more
5051# hosts. The
5052# absolute value, whether positive or negative, controls how strong
5053# the io_ops
5054# weigher is relative to other weighers.
5055#
5056# This option is only used by the FilterScheduler and its subclasses;
5057# if you use
5058# a different scheduler, this option has no effect. Also note that
5059# this setting
5060# only affects scheduling if the 'io_ops' weigher is enabled.
5061#
5062# Possible values:
5063#
5064# * An integer or float value, where the value corresponds to the
5065# multipler
5066# ratio for this weigher.
5067# (floating point value)
5068#io_ops_weight_multiplier = -1.0
5069
5070#
5071# PCI device affinity weight multiplier.
5072#
5073# The PCI device affinity weighter computes a weighting based on the
5074# number of
5075# PCI devices on the host and the number of PCI devices requested by
5076# the
5077# instance. The ``NUMATopologyFilter`` filter must be enabled for this
5078# to have
5079# any significance. For more information, refer to the filter
5080# documentation:
5081#
5082# https://docs.openstack.org/nova/latest/user/filter-
5083# scheduler.html
5084#
5085# Possible values:
5086#
5087# * A positive integer or float value, where the value corresponds to
5088# the
5089# multiplier ratio for this weigher.
5090# (floating point value)
5091# Minimum value: 0
5092#pci_weight_multiplier = 1.0
5093
5094#
5095# Multiplier used for weighing hosts for group soft-affinity.
5096#
5097# Possible values:
5098#
5099# * An integer or float value, where the value corresponds to weight
5100# multiplier
5101# for hosts with group soft affinity. Only a positive value are
5102# meaningful, as
5103# negative values would make this behave as a soft anti-affinity
5104# weigher.
5105# (floating point value)
5106#soft_affinity_weight_multiplier = 1.0
5107
5108#
5109# Multiplier used for weighing hosts for group soft-anti-affinity.
5110#
5111# Possible values:
5112#
5113# * An integer or float value, where the value corresponds to weight
5114# multiplier
5115# for hosts with group soft anti-affinity. Only a positive value are
5116# meaningful, as negative values would make this behave as a soft
5117# affinity
5118# weigher.
5119# (floating point value)
5120#soft_anti_affinity_weight_multiplier = 1.0
5121
5122#
5123# Enable spreading the instances between hosts with the same best
5124# weight.
5125#
5126# Enabling it is beneficial for cases when host_subset_size is 1
5127# (default), but there is a large number of hosts with same maximal
5128# weight.
5129# This scenario is common in Ironic deployments where there are
5130# typically many
5131# baremetal nodes with identical weights returned to the scheduler.
5132# In such case enabling this option will reduce contention and chances
5133# for
5134# rescheduling events.
5135# At the same time it will make the instance packing (even in
5136# unweighed case)
5137# less dense.
5138# (boolean value)
5139#shuffle_best_same_weighed_hosts = false
5140
5141#
5142# The default architecture to be used when using the image properties
5143# filter.
5144#
5145# When using the ImagePropertiesFilter, it is possible that you want
5146# to define
5147# a default architecture to make the user experience easier and avoid
5148# having
5149# something like x86_64 images landing on aarch64 compute nodes
5150# because the
5151# user did not specify the 'hw_architecture' property in Glance.
5152#
5153# Possible values:
5154#
5155# * CPU Architectures such as x86_64, aarch64, s390x.
5156# (string value)
5157# Possible values:
5158# alpha - <No description provided>
5159# armv6 - <No description provided>
5160# armv7l - <No description provided>
5161# armv7b - <No description provided>
5162# aarch64 - <No description provided>
5163# cris - <No description provided>
5164# i686 - <No description provided>
5165# ia64 - <No description provided>
5166# lm32 - <No description provided>
5167# m68k - <No description provided>
5168# microblaze - <No description provided>
5169# microblazeel - <No description provided>
5170# mips - <No description provided>
5171# mipsel - <No description provided>
5172# mips64 - <No description provided>
5173# mips64el - <No description provided>
5174# openrisc - <No description provided>
5175# parisc - <No description provided>
5176# parisc64 - <No description provided>
5177# ppc - <No description provided>
5178# ppcle - <No description provided>
5179# ppc64 - <No description provided>
5180# ppc64le - <No description provided>
5181# ppcemb - <No description provided>
5182# s390 - <No description provided>
5183# s390x - <No description provided>
5184# sh4 - <No description provided>
5185# sh4eb - <No description provided>
5186# sparc - <No description provided>
5187# sparc64 - <No description provided>
5188# unicore32 - <No description provided>
5189# x86_64 - <No description provided>
5190# xtensa - <No description provided>
5191# xtensaeb - <No description provided>
5192#image_properties_default_architecture = <None>
5193
5194#
5195# List of UUIDs for images that can only be run on certain hosts.
5196#
5197# If there is a need to restrict some images to only run on certain
5198# designated
5199# hosts, list those image UUIDs here.
5200#
5201# This option is only used by the FilterScheduler and its subclasses;
5202# if you use
5203# a different scheduler, this option has no effect. Also note that
5204# this setting
5205# only affects scheduling if the 'IsolatedHostsFilter' filter is
5206# enabled.
5207#
5208# Possible values:
5209#
5210# * A list of UUID strings, where each string corresponds to the UUID
5211# of an
5212# image
5213#
5214# Related options:
5215#
5216# * scheduler/isolated_hosts
5217# * scheduler/restrict_isolated_hosts_to_isolated_images
5218# (list value)
5219#isolated_images =
5220
5221#
5222# List of hosts that can only run certain images.
5223#
5224# If there is a need to restrict some images to only run on certain
5225# designated
5226# hosts, list those host names here.
5227#
5228# This option is only used by the FilterScheduler and its subclasses;
5229# if you use
5230# a different scheduler, this option has no effect. Also note that
5231# this setting
5232# only affects scheduling if the 'IsolatedHostsFilter' filter is
5233# enabled.
5234#
5235# Possible values:
5236#
5237# * A list of strings, where each string corresponds to the name of a
5238# host
5239#
5240# Related options:
5241#
5242# * scheduler/isolated_images
5243# * scheduler/restrict_isolated_hosts_to_isolated_images
5244# (list value)
5245#isolated_hosts =
5246
5247#
5248# Prevent non-isolated images from being built on isolated hosts.
5249#
5250# This option is only used by the FilterScheduler and its subclasses;
5251# if you use
5252# a different scheduler, this option has no effect. Also note that
5253# this setting
5254# only affects scheduling if the 'IsolatedHostsFilter' filter is
5255# enabled. Even
5256# then, this option doesn't affect the behavior of requests for
5257# isolated images,
5258# which will *always* be restricted to isolated hosts.
5259#
5260# Related options:
5261#
5262# * scheduler/isolated_images
5263# * scheduler/isolated_hosts
5264# (boolean value)
5265#restrict_isolated_hosts_to_isolated_images = true
5266
5267#
5268# Image property namespace for use in the host aggregate.
5269#
5270# Images and hosts can be configured so that certain images can only
5271# be scheduled
5272# to hosts in a particular aggregate. This is done with metadata
5273# values set on
5274# the host aggregate that are identified by beginning with the value
5275# of this
5276# option. If the host is part of an aggregate with such a metadata
5277# key, the image
5278# in the request spec must have the value of that metadata in its
5279# properties in
5280# order for the scheduler to consider the host as acceptable.
5281#
5282# This option is only used by the FilterScheduler and its subclasses;
5283# if you use
5284# a different scheduler, this option has no effect. Also note that
5285# this setting
5286# only affects scheduling if the
5287# 'aggregate_image_properties_isolation' filter is
5288# enabled.
5289#
5290# Possible values:
5291#
5292# * A string, where the string corresponds to an image property
5293# namespace
5294#
5295# Related options:
5296#
5297# * aggregate_image_properties_isolation_separator
5298# (string value)
5299#aggregate_image_properties_isolation_namespace = <None>
5300
5301#
5302# Separator character(s) for image property namespace and name.
5303#
5304# When using the aggregate_image_properties_isolation filter, the
5305# relevant
5306# metadata keys are prefixed with the namespace defined in the
5307# aggregate_image_properties_isolation_namespace configuration option
5308# plus a
5309# separator. This option defines the separator to be used.
5310#
5311# This option is only used by the FilterScheduler and its subclasses;
5312# if you use
5313# a different scheduler, this option has no effect. Also note that
5314# this setting
5315# only affects scheduling if the
5316# 'aggregate_image_properties_isolation' filter
5317# is enabled.
5318#
5319# Possible values:
5320#
5321# * A string, where the string corresponds to an image property
5322# namespace
5323# separator character
5324#
5325# Related options:
5326#
5327# * aggregate_image_properties_isolation_namespace
5328# (string value)
5329#aggregate_image_properties_isolation_separator = .
5330
5331
5332[glance]
5333# Configuration options for the Image service
5334
5335#
5336# From nova.conf
5337#
5338
5339#
5340# List of glance api servers endpoints available to nova.
5341#
5342# https is used for ssl-based glance api servers.
5343#
5344# NOTE: The preferred mechanism for endpoint discovery is via
5345# keystoneauth1
5346# loading options. Only use api_servers if you need multiple endpoints
5347# and are
5348# unable to use a load balancer for some reason.
5349#
5350# Possible values:
5351#
5352# * A list of any fully qualified url of the form
5353# "scheme://hostname:port[/path]"
5354# (i.e. "http://10.0.1.0:9292" or "https://my.glance.server/image").
5355# (list value)
5356#api_servers = <None>
5357api_servers = {{ controller.glance.get('protocol', 'http') }}://{{ controller.glance.host }}:{{ controller.glance.get('port', 9292) }}
5358
5359#
5360# Enable glance operation retries.
5361#
5362# Specifies the number of retries when uploading / downloading
5363# an image to / from glance. 0 means no retries.
5364# (integer value)
5365# Minimum value: 0
5366#num_retries = 0
5367
5368# DEPRECATED:
5369# List of url schemes that can be directly accessed.
5370#
5371# This option specifies a list of url schemes that can be downloaded
5372# directly via the direct_url. This direct_URL can be fetched from
5373# Image metadata which can be used by nova to get the
5374# image more efficiently. nova-compute could benefit from this by
5375# invoking a copy when it has access to the same file system as
5376# glance.
5377#
5378# Possible values:
5379#
5380# * [file], Empty list (default)
5381# (list value)
5382# This option is deprecated for removal since 17.0.0.
5383# Its value may be silently ignored in the future.
5384# Reason:
5385# This was originally added for the 'nova.image.download.file'
5386# FileTransfer
5387# extension which was removed in the 16.0.0 Pike release. The
5388# 'nova.image.download.modules' extension point is not maintained
5389# and there is no indication of its use in production clouds.
5390#allowed_direct_url_schemes =
5391
5392#
5393# Enable image signature verification.
5394#
5395# nova uses the image signature metadata from glance and verifies the
5396# signature
5397# of a signed image while downloading that image. If the image
5398# signature cannot
5399# be verified or if the image signature metadata is either incomplete
5400# or
5401# unavailable, then nova will not boot the image and instead will
5402# place the
5403# instance into an error state. This provides end users with stronger
5404# assurances
5405# of the integrity of the image data they are using to create servers.
5406#
5407# Related options:
5408#
5409# * The options in the `key_manager` group, as the key_manager is used
5410# for the signature validation.
5411# * Both enable_certificate_validation and
5412# default_trusted_certificate_ids
5413# below depend on this option being enabled.
5414# (boolean value)
5415{%- if controller.get('barbican', {}).get('enabled', False) %}
5416verify_glance_signatures=true
5417{%- else %}
5418#verify_glance_signatures=false
5419{%- endif %}
5420
5421# DEPRECATED:
5422# Enable certificate validation for image signature verification.
5423#
5424# During image signature verification nova will first verify the
5425# validity of the
5426# image's signing certificate using the set of trusted certificates
5427# associated
5428# with the instance. If certificate validation fails, signature
5429# verification
5430# will not be performed and the image will be placed into an error
5431# state. This
5432# provides end users with stronger assurances that the image data is
5433# unmodified
5434# and trustworthy. If left disabled, image signature verification can
5435# still
5436# occur but the end user will not have any assurance that the signing
5437# certificate used to generate the image signature is still
5438# trustworthy.
5439#
5440# Related options:
5441#
5442# * This option only takes effect if verify_glance_signatures is
5443# enabled.
5444# * The value of default_trusted_certificate_ids may be used when this
5445# option
5446# is enabled.
5447# (boolean value)
5448# This option is deprecated for removal since 16.0.0.
5449# Its value may be silently ignored in the future.
5450# Reason:
5451# This option is intended to ease the transition for deployments
5452# leveraging
5453# image signature verification. The intended state long-term is for
5454# signature
5455# verification and certificate validation to always happen together.
5456#enable_certificate_validation = false
5457
5458#
5459# List of certificate IDs for certificates that should be trusted.
5460#
5461# May be used as a default list of trusted certificate IDs for
5462# certificate
5463# validation. The value of this option will be ignored if the user
5464# provides a
5465# list of trusted certificate IDs with an instance API request. The
5466# value of
5467# this option will be persisted with the instance data if signature
5468# verification
5469# and certificate validation are enabled and if the user did not
5470# provide an
5471# alternative list. If left empty when certificate validation is
5472# enabled the
5473# user must provide a list of trusted certificate IDs otherwise
5474# certificate
5475# validation will fail.
5476#
5477# Related options:
5478#
5479# * The value of this option may be used if both
5480# verify_glance_signatures and
5481# enable_certificate_validation are enabled.
5482# (list value)
5483#default_trusted_certificate_ids =
5484
5485# Enable or disable debug logging with glanceclient. (boolean value)
5486#debug = false
5487
5488# PEM encoded Certificate Authority to use when verifying HTTPs
5489# connections. (string value)
5490#cafile = <None>
5491
5492# PEM encoded client certificate cert file (string value)
5493#certfile = <None>
5494
5495# PEM encoded client certificate key file (string value)
5496#keyfile = <None>
5497
5498# Verify HTTPS connections. (boolean value)
5499#insecure = false
5500
5501# Timeout value for http requests (integer value)
5502#timeout = <None>
5503
5504# The default service_type for endpoint URL discovery. (string value)
5505#service_type = image
5506
5507# The default service_name for endpoint URL discovery. (string value)
5508#service_name = <None>
5509
5510# List of interfaces, in order of preference, for endpoint URL. (list
5511# value)
5512#valid_interfaces = internal,public
5513
5514# The default region_name for endpoint URL discovery. (string value)
5515#region_name = <None>
5516
5517# Always use this endpoint URL for requests for this client. NOTE: The
5518# unversioned endpoint should be specified here; to request a
5519# particular API version, use the `version`, `min-version`, and/or
5520# `max-version` options. (string value)
5521#endpoint_override = <None>
5522
5523
5524[guestfs]
5525#
5526# libguestfs is a set of tools for accessing and modifying virtual
5527# machine (VM) disk images. You can use this for viewing and editing
5528# files inside guests, scripting changes to VMs, monitoring disk
5529# used/free statistics, creating guests, P2V, V2V, performing backups,
5530# cloning VMs, building VMs, formatting disks and resizing disks.
5531
5532#
5533# From nova.conf
5534#
5535
5536#
5537# Enable/disables guestfs logging.
5538#
5539# This configures guestfs to debug messages and push them to OpenStack
5540# logging system. When set to True, it traces libguestfs API calls and
5541# enable verbose debug messages. In order to use the above feature,
5542# "libguestfs" package must be installed.
5543#
5544# Related options:
5545# Since libguestfs access and modifies VM's managed by libvirt, below
5546# options
5547# should be set to give access to those VM's.
5548# * libvirt.inject_key
5549# * libvirt.inject_partition
5550# * libvirt.inject_password
5551# (boolean value)
5552#debug = false
5553
5554
5555[hyperv]
5556#
5557# The hyperv feature allows you to configure the Hyper-V hypervisor
5558# driver to be used within an OpenStack deployment.
5559
5560#
5561# From nova.conf
5562#
5563
5564#
5565# Dynamic memory ratio
5566#
5567# Enables dynamic memory allocation (ballooning) when set to a value
5568# greater than 1. The value expresses the ratio between the total RAM
5569# assigned to an instance and its startup RAM amount. For example a
5570# ratio of 2.0 for an instance with 1024MB of RAM implies 512MB of
5571# RAM allocated at startup.
5572#
5573# Possible values:
5574#
5575# * 1.0: Disables dynamic memory allocation (Default).
5576# * Float values greater than 1.0: Enables allocation of total implied
5577# RAM divided by this value for startup.
5578# (floating point value)
5579#dynamic_memory_ratio = 1.0
5580
5581#
5582# Enable instance metrics collection
5583#
5584# Enables metrics collections for an instance by using Hyper-V's
5585# metric APIs. Collected data can be retrieved by other apps and
5586# services, e.g.: Ceilometer.
5587# (boolean value)
5588#enable_instance_metrics_collection = false
5589
5590#
5591# Instances path share
5592#
5593# The name of a Windows share mapped to the "instances_path" dir
5594# and used by the resize feature to copy files to the target host.
5595# If left blank, an administrative share (hidden network share) will
5596# be used, looking for the same "instances_path" used locally.
5597#
5598# Possible values:
5599#
5600# * "": An administrative share will be used (Default).
5601# * Name of a Windows share.
5602#
5603# Related options:
5604#
5605# * "instances_path": The directory which will be used if this option
5606# here is left blank.
5607# (string value)
5608#instances_path_share =
5609
5610#
5611# Limit CPU features
5612#
5613# This flag is needed to support live migration to hosts with
5614# different CPU features and checked during instance creation
5615# in order to limit the CPU features used by the instance.
5616# (boolean value)
5617#limit_cpu_features = false
5618
5619#
5620# Mounted disk query retry count
5621#
5622# The number of times to retry checking for a mounted disk.
5623# The query runs until the device can be found or the retry
5624# count is reached.
5625#
5626# Possible values:
5627#
5628# * Positive integer values. Values greater than 1 is recommended
5629# (Default: 10).
5630#
5631# Related options:
5632#
5633# * Time interval between disk mount retries is declared with
5634# "mounted_disk_query_retry_interval" option.
5635# (integer value)
5636# Minimum value: 0
5637#mounted_disk_query_retry_count = 10
5638
5639#
5640# Mounted disk query retry interval
5641#
5642# Interval between checks for a mounted disk, in seconds.
5643#
5644# Possible values:
5645#
5646# * Time in seconds (Default: 5).
5647#
5648# Related options:
5649#
5650# * This option is meaningful when the mounted_disk_query_retry_count
5651# is greater than 1.
5652# * The retry loop runs with mounted_disk_query_retry_count and
5653# mounted_disk_query_retry_interval configuration options.
5654# (integer value)
5655# Minimum value: 0
5656#mounted_disk_query_retry_interval = 5
5657
5658#
5659# Power state check timeframe
5660#
5661# The timeframe to be checked for instance power state changes.
5662# This option is used to fetch the state of the instance from Hyper-V
5663# through the WMI interface, within the specified timeframe.
5664#
5665# Possible values:
5666#
5667# * Timeframe in seconds (Default: 60).
5668# (integer value)
5669# Minimum value: 0
5670#power_state_check_timeframe = 60
5671
5672#
5673# Power state event polling interval
5674#
5675# Instance power state change event polling frequency. Sets the
5676# listener interval for power state events to the given value.
5677# This option enhances the internal lifecycle notifications of
5678# instances that reboot themselves. It is unlikely that an operator
5679# has to change this value.
5680#
5681# Possible values:
5682#
5683# * Time in seconds (Default: 2).
5684# (integer value)
5685# Minimum value: 0
5686#power_state_event_polling_interval = 2
5687
5688#
5689# qemu-img command
5690#
5691# qemu-img is required for some of the image related operations
5692# like converting between different image types. You can get it
5693# from here: (http://qemu.weilnetz.de/) or you can install the
5694# Cloudbase OpenStack Hyper-V Compute Driver
5695# (https://cloudbase.it/openstack-hyperv-driver/) which automatically
5696# sets the proper path for this config option. You can either give the
5697# full path of qemu-img.exe or set its path in the PATH environment
5698# variable and leave this option to the default value.
5699#
5700# Possible values:
5701#
5702# * Name of the qemu-img executable, in case it is in the same
5703# directory as the nova-compute service or its path is in the
5704# PATH environment variable (Default).
5705# * Path of qemu-img command (DRIVELETTER:\PATH\TO\QEMU-IMG\COMMAND).
5706#
5707# Related options:
5708#
5709# * If the config_drive_cdrom option is False, qemu-img will be used
5710# to
5711# convert the ISO to a VHD, otherwise the configuration drive will
5712# remain an ISO. To use configuration drive with Hyper-V, you must
5713# set the mkisofs_cmd value to the full path to an mkisofs.exe
5714# installation.
5715# (string value)
5716#qemu_img_cmd = qemu-img.exe
5717
5718#
5719# External virtual switch name
5720#
5721# The Hyper-V Virtual Switch is a software-based layer-2 Ethernet
5722# network switch that is available with the installation of the
5723# Hyper-V server role. The switch includes programmatically managed
5724# and extensible capabilities to connect virtual machines to both
5725# virtual networks and the physical network. In addition, Hyper-V
5726# Virtual Switch provides policy enforcement for security, isolation,
5727# and service levels. The vSwitch represented by this config option
5728# must be an external one (not internal or private).
5729#
5730# Possible values:
5731#
5732# * If not provided, the first of a list of available vswitches
5733# is used. This list is queried using WQL.
5734# * Virtual switch name.
5735# (string value)
5736#vswitch_name = <None>
5737
5738#
5739# Wait soft reboot seconds
5740#
5741# Number of seconds to wait for instance to shut down after soft
5742# reboot request is made. We fall back to hard reboot if instance
5743# does not shutdown within this window.
5744#
5745# Possible values:
5746#
5747# * Time in seconds (Default: 60).
5748# (integer value)
5749# Minimum value: 0
5750#wait_soft_reboot_seconds = 60
5751
5752#
5753# Configuration drive cdrom
5754#
5755# OpenStack can be configured to write instance metadata to
5756# a configuration drive, which is then attached to the
5757# instance before it boots. The configuration drive can be
5758# attached as a disk drive (default) or as a CD drive.
5759#
5760# Possible values:
5761#
5762# * True: Attach the configuration drive image as a CD drive.
5763# * False: Attach the configuration drive image as a disk drive
5764# (Default).
5765#
5766# Related options:
5767#
5768# * This option is meaningful with force_config_drive option set to
5769# 'True'
5770# or when the REST API call to create an instance will have
5771# '--config-drive=True' flag.
5772# * config_drive_format option must be set to 'iso9660' in order to
5773# use
5774# CD drive as the configuration drive image.
5775# * To use configuration drive with Hyper-V, you must set the
5776# mkisofs_cmd value to the full path to an mkisofs.exe installation.
5777# Additionally, you must set the qemu_img_cmd value to the full path
5778# to an qemu-img command installation.
5779# * You can configure the Compute service to always create a
5780# configuration
5781# drive by setting the force_config_drive option to 'True'.
5782# (boolean value)
5783#config_drive_cdrom = false
5784
5785#
5786# Configuration drive inject password
5787#
5788# Enables setting the admin password in the configuration drive image.
5789#
5790# Related options:
5791#
5792# * This option is meaningful when used with other options that enable
5793# configuration drive usage with Hyper-V, such as
5794# force_config_drive.
5795# * Currently, the only accepted config_drive_format is 'iso9660'.
5796# (boolean value)
5797#config_drive_inject_password = false
5798
5799#
5800# Volume attach retry count
5801#
5802# The number of times to retry attaching a volume. Volume attachment
5803# is retried until success or the given retry count is reached.
5804#
5805# Possible values:
5806#
5807# * Positive integer values (Default: 10).
5808#
5809# Related options:
5810#
5811# * Time interval between attachment attempts is declared with
5812# volume_attach_retry_interval option.
5813# (integer value)
5814# Minimum value: 0
5815#volume_attach_retry_count = 10
5816
5817#
5818# Volume attach retry interval
5819#
5820# Interval between volume attachment attempts, in seconds.
5821#
5822# Possible values:
5823#
5824# * Time in seconds (Default: 5).
5825#
5826# Related options:
5827#
5828# * This options is meaningful when volume_attach_retry_count
5829# is greater than 1.
5830# * The retry loop runs with volume_attach_retry_count and
5831# volume_attach_retry_interval configuration options.
5832# (integer value)
5833# Minimum value: 0
5834#volume_attach_retry_interval = 5
5835
5836#
5837# Enable RemoteFX feature
5838#
5839# This requires at least one DirectX 11 capable graphics adapter for
5840# Windows / Hyper-V Server 2012 R2 or newer and RDS-Virtualization
5841# feature has to be enabled.
5842#
5843# Instances with RemoteFX can be requested with the following flavor
5844# extra specs:
5845#
5846# **os:resolution**. Guest VM screen resolution size. Acceptable
5847# values::
5848#
5849# 1024x768, 1280x1024, 1600x1200, 1920x1200, 2560x1600, 3840x2160
5850#
5851# ``3840x2160`` is only available on Windows / Hyper-V Server 2016.
5852#
5853# **os:monitors**. Guest VM number of monitors. Acceptable values::
5854#
5855# [1, 4] - Windows / Hyper-V Server 2012 R2
5856# [1, 8] - Windows / Hyper-V Server 2016
5857#
5858# **os:vram**. Guest VM VRAM amount. Only available on
5859# Windows / Hyper-V Server 2016. Acceptable values::
5860#
5861# 64, 128, 256, 512, 1024
5862# (boolean value)
5863#enable_remotefx = false
5864
5865#
5866# Use multipath connections when attaching iSCSI or FC disks.
5867#
5868# This requires the Multipath IO Windows feature to be enabled. MPIO
5869# must be
5870# configured to claim such devices.
5871# (boolean value)
5872#use_multipath_io = false
5873
5874#
5875# List of iSCSI initiators that will be used for estabilishing iSCSI
5876# sessions.
5877#
5878# If none are specified, the Microsoft iSCSI initiator service will
5879# choose the
5880# initiator.
5881# (list value)
5882#iscsi_initiator_list =
5883
5884{% if controller.ironic is defined -%}
5885[ironic]
5886#
5887# Configuration options for Ironic driver (Bare Metal).
5888# If using the Ironic driver following options must be set:
5889# * auth_type
5890# * auth_url
5891# * project_name
5892# * username
5893# * password
5894# * project_domain_id or project_domain_name
5895# * user_domain_id or user_domain_name
5896
5897#
5898# From nova.conf
5899#
5900
5901# DEPRECATED: URL override for the Ironic API endpoint. (uri value)
5902# This option is deprecated for removal.
5903# Its value may be silently ignored in the future.
5904# Reason: Endpoint lookup uses the service catalog via common
5905# keystoneauth1 Adapter configuration options. In the current release,
5906# api_endpoint will override this behavior, but will be ignored and/or
5907# removed in a future release. To achieve the same result, use the
5908# endpoint_override option instead.
5909#api_endpoint = http://ironic.example.org:6385/
5910api_endpoint={{ controller.ironic.get('protocol', 'http') }}://{{ controller.ironic.host }}:{{ controller.ironic.port }}
5911
5912#
5913# The number of times to retry when a request conflicts.
5914# If set to 0, only try once, no retries.
5915#
5916# Related options:
5917#
5918# * api_retry_interval
5919# (integer value)
5920# Minimum value: 0
5921#api_max_retries = 60
5922
5923#
5924# The number of seconds to wait before retrying the request.
5925#
5926# Related options:
5927#
5928# * api_max_retries
5929# (integer value)
5930# Minimum value: 0
5931#api_retry_interval = 2
5932
5933# Timeout (seconds) to wait for node serial console state changed. Set
5934# to 0 to disable timeout. (integer value)
5935# Minimum value: 0
5936#serial_console_state_timeout = 10
5937
5938# PEM encoded Certificate Authority to use when verifying HTTPs
5939# connections. (string value)
5940#cafile = <None>
5941{%- if controller.ironic.get('protocol', 'http') == 'https' %}
5942cafile={{ controller.identity.get('cacert_file', controller.cacert_file) }}
5943{%- endif %}
5944
5945# PEM encoded client certificate cert file (string value)
5946#certfile = <None>
5947
5948# PEM encoded client certificate key file (string value)
5949#keyfile = <None>
5950
5951# Verify HTTPS connections. (boolean value)
5952#insecure = false
5953
5954# Timeout value for http requests (integer value)
5955#timeout = <None>
5956
5957# Authentication type to load (string value)
5958# Deprecated group/name - [ironic]/auth_plugin
5959#auth_type = <None>
5960auth_type={{ controller.ironic.auth_type }}
5961
5962# Config Section from which to load plugin specific options (string
5963# value)
5964#auth_section = <None>
5965
5966# Authentication URL (string value)
5967#auth_url = <None>
5968auth_url={{ controller.identity.get('protocol', 'http') }}://{{ controller.identity.host }}:{{ controller.identity.port }}/v3
5969
5970# Scope for system operations (string value)
5971#system_scope = <None>
5972
5973# Domain ID to scope to (string value)
5974#domain_id = <None>
5975
5976# Domain name to scope to (string value)
5977#domain_name = <None>
5978
5979# Project ID to scope to (string value)
5980#project_id = <None>
5981
5982# Project name to scope to (string value)
5983#project_name = <None>
5984project_name={{ controller.identity.tenant }}
5985
5986# Domain ID containing project (string value)
5987#project_domain_id = <None>
5988
5989# Domain name containing project (string value)
5990#project_domain_name = <None>
5991project_domain_name={{ controller.ironic.project_domain_name }}
5992
5993# Trust ID (string value)
5994#trust_id = <None>
5995
5996# User ID (string value)
5997#user_id = <None>
5998
5999# Username (string value)
6000# Deprecated group/name - [ironic]/user_name
6001#username = <None>
6002username={{ controller.ironic.user }}
6003
6004# User's domain id (string value)
6005#user_domain_id = <None>
6006
6007# User's domain name (string value)
6008#user_domain_name = <None>
6009user_domain_name={{ controller.ironic.user_domain_name }}
6010
6011
6012# User's password (string value)
6013#password = <None>
6014password={{ controller.ironic.password }}
6015# The default service_type for endpoint URL discovery. (string value)
6016#service_type = baremetal
6017
6018# The default service_name for endpoint URL discovery. (string value)
6019#service_name = <None>
6020
6021# List of interfaces, in order of preference, for endpoint URL. (list
6022# value)
6023#valid_interfaces = internal,public
6024
6025# The default region_name for endpoint URL discovery. (string value)
6026#region_name = <None>
6027
6028# Always use this endpoint URL for requests for this client. NOTE: The
6029# unversioned endpoint should be specified here; to request a
6030# particular API version, use the `version`, `min-version`, and/or
6031# `max-version` options. (string value)
6032# Deprecated group/name - [ironic]/api_endpoint
6033#endpoint_override = <None>
6034{%- endif %}
6035
6036
6037[key_manager]
6038
6039#
6040# From nova.conf
6041#
6042
6043#
6044# Fixed key returned by key manager, specified in hex.
6045#
6046# Possible values:
6047#
6048# * Empty string or a key in hex value
6049# (string value)
6050#fixed_key = <None>
6051{%- if controller.get('barbican', {}).get('enabled', False) %}
6052api_class=castellan.key_manager.barbican_key_manager.BarbicanKeyManager
6053{%- endif %}
6054
6055# Specify the key manager implementation. Options are "barbican" and
6056# "vault". Default is "barbican". Will support the values earlier
6057# set using [key_manager]/api_class for some time. (string value)
6058# Deprecated group/name - [key_manager]/api_class
6059#backend = barbican
6060
6061# The type of authentication credential to create. Possible values are
6062# 'token', 'password', 'keystone_token', and 'keystone_password'.
6063# Required if no context is passed to the credential factory. (string
6064# value)
6065#auth_type = <None>
6066
6067# Token for authentication. Required for 'token' and 'keystone_token'
6068# auth_type if no context is passed to the credential factory. (string
6069# value)
6070#token = <None>
6071
6072# Username for authentication. Required for 'password' auth_type.
6073# Optional for the 'keystone_password' auth_type. (string value)
6074#username = <None>
6075
6076# Password for authentication. Required for 'password' and
6077# 'keystone_password' auth_type. (string value)
6078#password = <None>
6079
6080# Use this endpoint to connect to Keystone. (string value)
6081#auth_url = <None>
6082
6083# User ID for authentication. Optional for 'keystone_token' and
6084# 'keystone_password' auth_type. (string value)
6085#user_id = <None>
6086
6087# User's domain ID for authentication. Optional for 'keystone_token'
6088# and 'keystone_password' auth_type. (string value)
6089#user_domain_id = <None>
6090
6091# User's domain name for authentication. Optional for 'keystone_token'
6092# and 'keystone_password' auth_type. (string value)
6093#user_domain_name = <None>
6094
6095# Trust ID for trust scoping. Optional for 'keystone_token' and
6096# 'keystone_password' auth_type. (string value)
6097#trust_id = <None>
6098
6099# Domain ID for domain scoping. Optional for 'keystone_token' and
6100# 'keystone_password' auth_type. (string value)
6101#domain_id = <None>
6102
6103# Domain name for domain scoping. Optional for 'keystone_token' and
6104# 'keystone_password' auth_type. (string value)
6105#domain_name = <None>
6106
6107# Project ID for project scoping. Optional for 'keystone_token' and
6108# 'keystone_password' auth_type. (string value)
6109#project_id = <None>
6110
6111# Project name for project scoping. Optional for 'keystone_token' and
6112# 'keystone_password' auth_type. (string value)
6113#project_name = <None>
6114
6115# Project's domain ID for project. Optional for 'keystone_token' and
6116# 'keystone_password' auth_type. (string value)
6117#project_domain_id = <None>
6118
6119# Project's domain name for project. Optional for 'keystone_token' and
6120# 'keystone_password' auth_type. (string value)
6121#project_domain_name = <None>
6122
6123# Allow fetching a new token if the current one is going to expire.
6124# Optional for 'keystone_token' and 'keystone_password' auth_type.
6125# (boolean value)
6126#reauthenticate = true
6127
6128
6129[keystone]
6130# Configuration options for the identity service
6131
6132#
6133# From nova.conf
6134#
6135
6136# PEM encoded Certificate Authority to use when verifying HTTPs
6137# connections. (string value)
6138#cafile = <None>
6139
6140# PEM encoded client certificate cert file (string value)
6141#certfile = <None>
6142
6143# PEM encoded client certificate key file (string value)
6144#keyfile = <None>
6145
6146# Verify HTTPS connections. (boolean value)
6147#insecure = false
6148
6149# Timeout value for http requests (integer value)
6150#timeout = <None>
6151
6152# The default service_type for endpoint URL discovery. (string value)
6153#service_type = identity
6154
6155# The default service_name for endpoint URL discovery. (string value)
6156#service_name = <None>
6157
6158# List of interfaces, in order of preference, for endpoint URL. (list
6159# value)
6160#valid_interfaces = internal,public
6161
6162# The default region_name for endpoint URL discovery. (string value)
6163#region_name = <None>
6164
6165# Always use this endpoint URL for requests for this client. NOTE: The
6166# unversioned endpoint should be specified here; to request a
6167# particular API version, use the `version`, `min-version`, and/or
6168# `max-version` options. (string value)
6169#endpoint_override = <None>
6170
6171
6172[libvirt]
6173#
6174# Libvirt options allows cloud administrator to configure related
6175# libvirt hypervisor driver to be used within an OpenStack deployment.
6176#
6177# Almost all of the libvirt config options are influence by
6178# ``virt_type`` config
6179# which describes the virtualization type (or so called domain type)
6180# libvirt
6181# should use for specific features such as live migration, snapshot.
6182
6183#
6184# From nova.conf
6185#
6186virt_type = kvm
6187
6188inject_partition = -1
6189
6190#
6191# The ID of the image to boot from to rescue data from a corrupted
6192# instance.
6193#
6194# If the rescue REST API operation doesn't provide an ID of an image
6195# to
6196# use, the image which is referenced by this ID is used. If this
6197# option is not set, the image from the instance is used.
6198#
6199# Possible values:
6200#
6201# * An ID of an image or nothing. If it points to an *Amazon Machine
6202# Image* (AMI), consider to set the config options
6203# ``rescue_kernel_id``
6204# and ``rescue_ramdisk_id`` too. If nothing is set, the image of the
6205# instance
6206# is used.
6207#
6208# Related options:
6209#
6210# * ``rescue_kernel_id``: If the chosen rescue image allows the
6211# separate
6212# definition of its kernel disk, the value of this option is used,
6213# if specified. This is the case when *Amazon*'s AMI/AKI/ARI image
6214# format is used for the rescue image.
6215# * ``rescue_ramdisk_id``: If the chosen rescue image allows the
6216# separate
6217# definition of its RAM disk, the value of this option is used if,
6218# specified. This is the case when *Amazon*'s AMI/AKI/ARI image
6219# format is used for the rescue image.
6220# (string value)
6221#rescue_image_id = <None>
6222
6223#
6224# The ID of the kernel (AKI) image to use with the rescue image.
6225#
6226# If the chosen rescue image allows the separate definition of its
6227# kernel
6228# disk, the value of this option is used, if specified. This is the
6229# case
6230# when *Amazon*'s AMI/AKI/ARI image format is used for the rescue
6231# image.
6232#
6233# Possible values:
6234#
6235# * An ID of an kernel image or nothing. If nothing is specified, the
6236# kernel
6237# disk from the instance is used if it was launched with one.
6238#
6239# Related options:
6240#
6241# * ``rescue_image_id``: If that option points to an image in
6242# *Amazon*'s
6243# AMI/AKI/ARI image format, it's useful to use ``rescue_kernel_id``
6244# too.
6245# (string value)
6246#rescue_kernel_id = <None>
6247
6248#
6249# The ID of the RAM disk (ARI) image to use with the rescue image.
6250#
6251# If the chosen rescue image allows the separate definition of its RAM
6252# disk, the value of this option is used, if specified. This is the
6253# case
6254# when *Amazon*'s AMI/AKI/ARI image format is used for the rescue
6255# image.
6256#
6257# Possible values:
6258#
6259# * An ID of a RAM disk image or nothing. If nothing is specified, the
6260# RAM
6261# disk from the instance is used if it was launched with one.
6262#
6263# Related options:
6264#
6265# * ``rescue_image_id``: If that option points to an image in
6266# *Amazon*'s
6267# AMI/AKI/ARI image format, it's useful to use ``rescue_ramdisk_id``
6268# too.
6269# (string value)
6270#rescue_ramdisk_id = <None>
6271
6272#
6273# Describes the virtualization type (or so called domain type) libvirt
6274# should
6275# use.
6276#
6277# The choice of this type must match the underlying virtualization
6278# strategy
6279# you have chosen for this host.
6280#
6281# Possible values:
6282#
6283# * See the predefined set of case-sensitive values.
6284#
6285# Related options:
6286#
6287# * ``connection_uri``: depends on this
6288# * ``disk_prefix``: depends on this
6289# * ``cpu_mode``: depends on this
6290# * ``cpu_model``: depends on this
6291# (string value)
6292# Possible values:
6293# kvm - <No description provided>
6294# lxc - <No description provided>
6295# qemu - <No description provided>
6296# uml - <No description provided>
6297# xen - <No description provided>
6298# parallels - <No description provided>
6299#virt_type = kvm
6300
6301#
6302# Overrides the default libvirt URI of the chosen virtualization type.
6303#
6304# If set, Nova will use this URI to connect to libvirt.
6305#
6306# Possible values:
6307#
6308# * An URI like ``qemu:///system`` or ``xen+ssh://oirase/`` for
6309# example.
6310# This is only necessary if the URI differs to the commonly known
6311# URIs
6312# for the chosen virtualization type.
6313#
6314# Related options:
6315#
6316# * ``virt_type``: Influences what is used as default value here.
6317# (string value)
6318#connection_uri =
6319
6320#
6321# Algorithm used to hash the injected password.
6322# Note that it must be supported by libc on the compute host
6323# _and_ by libc inside *any guest image* that will be booted by this
6324# compute
6325# host whith requested password injection.
6326# In case the specified algorithm is not supported by libc on the
6327# compute host,
6328# a fallback to DES algorithm will be performed.
6329#
6330# Related options:
6331#
6332# * ``inject_password``
6333# * ``inject_partition``
6334# (string value)
6335# Possible values:
6336# SHA-512 - <No description provided>
6337# SHA-256 - <No description provided>
6338# MD5 - <No description provided>
6339#inject_password_algorithm = MD5
6340
6341#
6342# Allow the injection of an admin password for instance only at
6343# ``create`` and
6344# ``rebuild`` process.
6345#
6346# There is no agent needed within the image to do this. If
6347# *libguestfs* is
6348# available on the host, it will be used. Otherwise *nbd* is used. The
6349# file
6350# system of the image will be mounted and the admin password, which is
6351# provided
6352# in the REST API call will be injected as password for the root user.
6353# If no
6354# root user is available, the instance won't be launched and an error
6355# is thrown.
6356# Be aware that the injection is *not* possible when the instance gets
6357# launched
6358# from a volume.
6359#
6360# Possible values:
6361#
6362# * True: Allows the injection.
6363# * False (default): Disallows the injection. Any via the REST API
6364# provided
6365# admin password will be silently ignored.
6366#
6367# Related options:
6368#
6369# * ``inject_partition``: That option will decide about the discovery
6370# and usage
6371# of the file system. It also can disable the injection at all.
6372# (boolean value)
6373#inject_password = false
6374
6375#
6376# Allow the injection of an SSH key at boot time.
6377#
6378# There is no agent needed within the image to do this. If
6379# *libguestfs* is
6380# available on the host, it will be used. Otherwise *nbd* is used. The
6381# file
6382# system of the image will be mounted and the SSH key, which is
6383# provided
6384# in the REST API call will be injected as SSH key for the root user
6385# and
6386# appended to the ``authorized_keys`` of that user. The SELinux
6387# context will
6388# be set if necessary. Be aware that the injection is *not* possible
6389# when the
6390# instance gets launched from a volume.
6391#
6392# This config option will enable directly modifying the instance disk
6393# and does
6394# not affect what cloud-init may do using data from config_drive
6395# option or the
6396# metadata service.
6397#
6398# Related options:
6399#
6400# * ``inject_partition``: That option will decide about the discovery
6401# and usage
6402# of the file system. It also can disable the injection at all.
6403# (boolean value)
6404#inject_key = false
6405
6406#
6407# Determines the way how the file system is chosen to inject data into
6408# it.
6409#
6410# *libguestfs* will be used a first solution to inject data. If that's
6411# not
6412# available on the host, the image will be locally mounted on the host
6413# as a
6414# fallback solution. If libguestfs is not able to determine the root
6415# partition
6416# (because there are more or less than one root partition) or cannot
6417# mount the
6418# file system it will result in an error and the instance won't be
6419# boot.
6420#
6421# Possible values:
6422#
6423# * -2 => disable the injection of data.
6424# * -1 => find the root partition with the file system to mount with
6425# libguestfs
6426# * 0 => The image is not partitioned
6427# * >0 => The number of the partition to use for the injection
6428#
6429# Related options:
6430#
6431# * ``inject_key``: If this option allows the injection of a SSH key
6432# it depends
6433# on value greater or equal to -1 for ``inject_partition``.
6434# * ``inject_password``: If this option allows the injection of an
6435# admin password
6436# it depends on value greater or equal to -1 for
6437# ``inject_partition``.
6438# * ``guestfs`` You can enable the debug log level of libguestfs with
6439# this
6440# config option. A more verbose output will help in debugging
6441# issues.
6442# * ``virt_type``: If you use ``lxc`` as virt_type it will be treated
6443# as a
6444# single partition image
6445# (integer value)
6446# Minimum value: -2
6447#inject_partition = -2
6448
6449# DEPRECATED:
6450# Enable a mouse cursor within a graphical VNC or SPICE sessions.
6451#
6452# This will only be taken into account if the VM is fully virtualized
6453# and VNC
6454# and/or SPICE is enabled. If the node doesn't support a graphical
6455# framebuffer,
6456# then it is valid to set this to False.
6457#
6458# Related options:
6459# * ``[vnc]enabled``: If VNC is enabled, ``use_usb_tablet`` will have
6460# an effect.
6461# * ``[spice]enabled`` + ``[spice].agent_enabled``: If SPICE is
6462# enabled and the
6463# spice agent is disabled, the config value of ``use_usb_tablet``
6464# will have
6465# an effect.
6466# (boolean value)
6467# This option is deprecated for removal since 14.0.0.
6468# Its value may be silently ignored in the future.
6469# Reason: This option is being replaced by the 'pointer_model' option.
6470use_usb_tablet = true
6471
6472#
6473# The IP address or hostname to be used as the target for live
6474# migration traffic.
6475#
6476# If this option is set to None, the hostname of the migration target
6477# compute
6478# node will be used.
6479#
6480# This option is useful in environments where the live-migration
6481# traffic can
6482# impact the network plane significantly. A separate network for live-
6483# migration
6484# traffic can then use this config option and avoids the impact on the
6485# management network.
6486#
6487# Possible values:
6488#
6489# * A valid IP address or hostname, else None.
6490#
6491# Related options:
6492#
6493# * ``live_migration_tunnelled``: The live_migration_inbound_addr
6494# value is
6495# ignored if tunneling is enabled.
6496# (string value)
6497#live_migration_inbound_addr = <None>
6498
6499# DEPRECATED:
6500# Live migration target URI to use.
6501#
6502# Override the default libvirt live migration target URI (which is
6503# dependent
6504# on virt_type). Any included "%s" is replaced with the migration
6505# target
6506# hostname.
6507#
6508# If this option is set to None (which is the default), Nova will
6509# automatically
6510# generate the `live_migration_uri` value based on only 4 supported
6511# `virt_type`
6512# in following list:
6513#
6514# * 'kvm': 'qemu+tcp://%s/system'
6515# * 'qemu': 'qemu+tcp://%s/system'
6516# * 'xen': 'xenmigr://%s/system'
6517# * 'parallels': 'parallels+tcp://%s/system'
6518#
6519# Related options:
6520#
6521# * ``live_migration_inbound_addr``: If
6522# ``live_migration_inbound_addr`` value
6523# is not None and ``live_migration_tunnelled`` is False, the
6524# ip/hostname
6525# address of target compute node is used instead of
6526# ``live_migration_uri`` as
6527# the uri for live migration.
6528# * ``live_migration_scheme``: If ``live_migration_uri`` is not set,
6529# the scheme
6530# used for live migration is taken from ``live_migration_scheme``
6531# instead.
6532# (string value)
6533# This option is deprecated for removal since 15.0.0.
6534# Its value may be silently ignored in the future.
6535# Reason:
6536# live_migration_uri is deprecated for removal in favor of two other
6537# options that
6538# allow to change live migration scheme and target URI:
6539# ``live_migration_scheme``
6540# and ``live_migration_inbound_addr`` respectively.
6541#live_migration_uri = <None>
6542
6543#
6544# URI scheme used for live migration.
6545#
6546# Override the default libvirt live migration scheme (which is
6547# dependent on
6548# virt_type). If this option is set to None, nova will automatically
6549# choose a
6550# sensible default based on the hypervisor. It is not recommended that
6551# you change
6552# this unless you are very sure that hypervisor supports a particular
6553# scheme.
6554#
6555# Related options:
6556#
6557# * ``virt_type``: This option is meaningful only when ``virt_type``
6558# is set to
6559# `kvm` or `qemu`.
6560# * ``live_migration_uri``: If ``live_migration_uri`` value is not
6561# None, the
6562# scheme used for live migration is taken from
6563# ``live_migration_uri`` instead.
6564# (string value)
6565#live_migration_scheme = <None>
6566
6567#
6568# Enable tunnelled migration.
6569#
6570# This option enables the tunnelled migration feature, where migration
6571# data is
6572# transported over the libvirtd connection. If enabled, we use the
6573# VIR_MIGRATE_TUNNELLED migration flag, avoiding the need to configure
6574# the network to allow direct hypervisor to hypervisor communication.
6575# If False, use the native transport. If not set, Nova will choose a
6576# sensible default based on, for example the availability of native
6577# encryption support in the hypervisor. Enabling this option will
6578# definitely
6579# impact performance massively.
6580#
6581# Note that this option is NOT compatible with use of block migration.
6582#
6583# Related options:
6584#
6585# * ``live_migration_inbound_addr``: The live_migration_inbound_addr
6586# value is
6587# ignored if tunneling is enabled.
6588# (boolean value)
6589#live_migration_tunnelled = false
6590
6591#
6592# Maximum bandwidth(in MiB/s) to be used during migration.
6593#
6594# If set to 0, the hypervisor will choose a suitable default. Some
6595# hypervisors
6596# do not support this feature and will return an error if bandwidth is
6597# not 0.
6598# Please refer to the libvirt documentation for further details.
6599# (integer value)
6600#live_migration_bandwidth = 0
6601
6602#
6603# Maximum permitted downtime, in milliseconds, for live migration
6604# switchover.
6605#
6606# Will be rounded up to a minimum of 100ms. You can increase this
6607# value
6608# if you want to allow live-migrations to complete faster, or avoid
6609# live-migration timeout errors by allowing the guest to be paused for
6610# longer during the live-migration switch over.
6611#
6612# Related options:
6613#
6614# * live_migration_completion_timeout
6615# (integer value)
6616# Minimum value: 100
6617#live_migration_downtime = 500
6618
6619#
6620# Number of incremental steps to reach max downtime value.
6621#
6622# Will be rounded up to a minimum of 3 steps.
6623# (integer value)
6624# Minimum value: 3
6625#live_migration_downtime_steps = 10
6626
6627#
6628# Time to wait, in seconds, between each step increase of the
6629# migration
6630# downtime.
6631#
6632# Minimum delay is 3 seconds. Value is per GiB of guest RAM + disk to
6633# be
6634# transferred, with lower bound of a minimum of 2 GiB per device.
6635# (integer value)
6636# Minimum value: 3
6637#live_migration_downtime_delay = 75
6638
6639#
6640# Time to wait, in seconds, for migration to successfully complete
6641# transferring
6642# data before aborting the operation.
6643#
6644# Value is per GiB of guest RAM + disk to be transferred, with lower
6645# bound of
6646# a minimum of 2 GiB. Should usually be larger than downtime delay *
6647# downtime
6648# steps. Set to 0 to disable timeouts.
6649#
6650# Related options:
6651#
6652# * live_migration_downtime
6653# * live_migration_downtime_steps
6654# * live_migration_downtime_delay
6655# (integer value)
6656# Note: This option can be changed without restarting.
6657#live_migration_completion_timeout = 800
6658
6659# DEPRECATED:
6660# Time to wait, in seconds, for migration to make forward progress in
6661# transferring data before aborting the operation.
6662#
6663# Set to 0 to disable timeouts.
6664#
6665# This is deprecated, and now disabled by default because we have
6666# found serious
6667# bugs in this feature that caused false live-migration timeout
6668# failures. This
6669# feature will be removed or replaced in a future release.
6670# (integer value)
6671# Note: This option can be changed without restarting.
6672# This option is deprecated for removal.
6673# Its value may be silently ignored in the future.
6674# Reason: Serious bugs found in this feature.
6675#live_migration_progress_timeout = 0
6676
6677#
6678# This option allows nova to switch an on-going live migration to
6679# post-copy
6680# mode, i.e., switch the active VM to the one on the destination node
6681# before the
6682# migration is complete, therefore ensuring an upper bound on the
6683# memory that
6684# needs to be transferred. Post-copy requires libvirt>=1.3.3 and
6685# QEMU>=2.5.0.
6686#
6687# When permitted, post-copy mode will be automatically activated if a
6688# live-migration memory copy iteration does not make percentage
6689# increase of at
6690# least 10% over the last iteration.
6691#
6692# The live-migration force complete API also uses post-copy when
6693# permitted. If
6694# post-copy mode is not available, force complete falls back to
6695# pausing the VM
6696# to ensure the live-migration operation will complete.
6697#
6698# When using post-copy mode, if the source and destination hosts loose
6699# network
6700# connectivity, the VM being live-migrated will need to be rebooted.
6701# For more
6702# details, please see the Administration guide.
6703#
6704# Related options:
6705#
6706# * live_migration_permit_auto_converge
6707# (boolean value)
6708#live_migration_permit_post_copy = false
6709
6710#
6711# This option allows nova to start live migration with auto converge
6712# on.
6713#
6714# Auto converge throttles down CPU if a progress of on-going live
6715# migration
6716# is slow. Auto converge will only be used if this flag is set to True
6717# and
6718# post copy is not permitted or post copy is unavailable due to the
6719# version
6720# of libvirt and QEMU in use.
6721#
6722# Related options:
6723#
6724# * live_migration_permit_post_copy
6725# (boolean value)
6726#live_migration_permit_auto_converge = false
6727{%- if controller.get('libvirt', {}).live_migration_permit_auto_converge is defined %}
6728live_migration_permit_auto_converge={{ controller.libvirt.live_migration_permit_auto_converge|lower }}
6729{%- endif %}
6730
6731#
6732# Determine the snapshot image format when sending to the image
6733# service.
6734#
6735# If set, this decides what format is used when sending the snapshot
6736# to the
6737# image service.
6738# If not set, defaults to same type as source image.
6739#
6740# Possible values:
6741#
6742# * ``raw``: RAW disk format
6743# * ``qcow2``: KVM default disk format
6744# * ``vmdk``: VMWare default disk format
6745# * ``vdi``: VirtualBox default disk format
6746# * If not set, defaults to same type as source image.
6747# (string value)
6748# Possible values:
6749# raw - <No description provided>
6750# qcow2 - <No description provided>
6751# vmdk - <No description provided>
6752# vdi - <No description provided>
6753#snapshot_image_format = <None>
6754
6755#
6756# Override the default disk prefix for the devices attached to an
6757# instance.
6758#
6759# If set, this is used to identify a free disk device name for a bus.
6760#
6761# Possible values:
6762#
6763# * Any prefix which will result in a valid disk device name like
6764# 'sda' or 'hda'
6765# for example. This is only necessary if the device names differ to
6766# the
6767# commonly known device name prefixes for a virtualization type such
6768# as: sd,
6769# xvd, uvd, vd.
6770#
6771# Related options:
6772#
6773# * ``virt_type``: Influences which device type is used, which
6774# determines
6775# the default disk prefix.
6776# (string value)
6777#disk_prefix = <None>
6778
6779# Number of seconds to wait for instance to shut down after soft
6780# reboot request is made. We fall back to hard reboot if instance does
6781# not shutdown within this window. (integer value)
6782#wait_soft_reboot_seconds = 120
6783
6784#
6785# Is used to set the CPU mode an instance should have.
6786#
6787# If virt_type="kvm|qemu", it will default to "host-model", otherwise
6788# it will
6789# default to "none".
6790#
6791# Possible values:
6792#
6793# * ``host-model``: Clones the host CPU feature flags
6794# * ``host-passthrough``: Use the host CPU model exactly
6795# * ``custom``: Use a named CPU model
6796# * ``none``: Don't set a specific CPU model. For instances with
6797# ``virt_type`` as KVM/QEMU, the default CPU model from QEMU will be
6798# used,
6799# which provides a basic set of CPU features that are compatible with
6800# most
6801# hosts.
6802#
6803# Related options:
6804#
6805# * ``cpu_model``: This should be set ONLY when ``cpu_mode`` is set to
6806# ``custom``. Otherwise, it would result in an error and the instance
6807# launch will fail.
6808#
6809# (string value)
6810# Possible values:
6811# host-model - <No description provided>
6812# host-passthrough - <No description provided>
6813# custom - <No description provided>
6814# none - <No description provided>
Vasyl Saienko4be5cca2018-05-25 16:15:49 +03006815# cpu_model = <None>
6816{%- if controller.cpu_mode is defined %}
6817cpu_mode = {{ controller.cpu_mode }}
6818{%- endif %}
Oleh Hryhorov5d0f13a2018-05-21 17:02:54 +00006819
6820#
6821# Set the name of the libvirt CPU model the instance should use.
6822#
6823# Possible values:
6824#
6825# * The named CPU models listed in ``/usr/share/libvirt/cpu_map.xml``
6826#
6827# Related options:
6828#
6829# * ``cpu_mode``: This should be set to ``custom`` ONLY when you want
6830# to
6831# configure (via ``cpu_model``) a specific named CPU model.
6832# Otherwise, it
6833# would result in an error and the instance launch will fail.
6834#
6835# * ``virt_type``: Only the virtualization types ``kvm`` and ``qemu``
6836# use this.
6837# (string value)
6838#cpu_model = <None>
Vasyl Saienko4be5cca2018-05-25 16:15:49 +03006839{%- if controller.get('libvirt', {}).cpu_model is defined and controller.cpu_mode == 'custom' %}
6840cpu_model = {{ controller.libvirt.cpu_model }}
6841{%- endif %}
6842
Oleh Hryhorov5d0f13a2018-05-21 17:02:54 +00006843
6844#
6845# This allows specifying granular CPU feature flags when specifying
6846# CPU
6847# models. For example, to explicitly specify the ``pcid``
6848# (Process-Context ID, an Intel processor feature) flag to the
6849# "IvyBridge"
6850# virtual CPU model::
6851#
6852# [libvirt]
6853# cpu_mode = custom
6854# cpu_model = IvyBridge
6855# cpu_model_extra_flags = pcid
6856#
6857# Currently, the choice is restricted to only one option: ``pcid``
6858# (the
6859# option is case-insensitive, so ``PCID`` is also valid). This flag
6860# is
6861# now required to address the guest performance degradation as a
6862# result of
6863# applying the "Meltdown" CVE fixes on certain Intel CPU models.
6864#
6865# Note that when using this config attribute to set the 'PCID' CPU
6866# flag,
6867# not all virtual (i.e. libvirt / QEMU) CPU models need it:
6868#
6869# * The only virtual CPU models that include the 'PCID' capability are
6870# Intel "Haswell", "Broadwell", and "Skylake" variants.
6871#
6872# * The libvirt / QEMU CPU models "Nehalem", "Westmere",
6873# "SandyBridge",
6874# and "IvyBridge" will _not_ expose the 'PCID' capability by
6875# default,
6876# even if the host CPUs by the same name include it. I.e. 'PCID'
6877# needs
6878# to be explicitly specified when using the said virtual CPU models.
6879#
6880# For now, the ``cpu_model_extra_flags`` config attribute is valid
6881# only in
6882# combination with ``cpu_mode`` + ``cpu_model`` options.
6883#
6884# Besides ``custom``, the libvirt driver has two other CPU modes: The
6885# default, ``host-model``, tells it to do the right thing with respect
6886# to
6887# handling 'PCID' CPU flag for the guest -- *assuming* you are running
6888# updated processor microcode, host and guest kernel, libvirt, and
6889# QEMU.
6890# The other mode, ``host-passthrough``, checks if 'PCID' is available
6891# in
6892# the hardware, and if so directly passes it through to the Nova
6893# guests.
6894# Thus, in context of 'PCID', with either of these CPU modes
6895# (``host-model`` or ``host-passthrough``), there is no need to use
6896# the
6897# ``cpu_model_extra_flags``.
6898#
6899# Related options:
6900#
6901# * cpu_mode
6902# * cpu_model
6903# (list value)
6904#cpu_model_extra_flags =
6905
6906# Location where libvirt driver will store snapshots before uploading
6907# them to image service (string value)
6908#snapshots_directory = $instances_path/snapshots
6909
6910# Location where the Xen hvmloader is kept (string value)
6911#xen_hvmloader_path = /usr/lib/xen/boot/hvmloader
6912
6913#
6914# Specific cache modes to use for different disk types.
6915#
6916# For example: file=directsync,block=none,network=writeback
6917#
6918# For local or direct-attached storage, it is recommended that you use
6919# writethrough (default) mode, as it ensures data integrity and has
6920# acceptable
6921# I/O performance for applications running in the guest, especially
6922# for read
6923# operations. However, caching mode none is recommended for remote NFS
6924# storage,
6925# because direct I/O operations (O_DIRECT) perform better than
6926# synchronous I/O
6927# operations (with O_SYNC). Caching mode none effectively turns all
6928# guest I/O
6929# operations into direct I/O operations on the host, which is the NFS
6930# client in
6931# this environment.
6932#
6933# Possible cache modes:
6934#
6935# * default: Same as writethrough.
6936# * none: With caching mode set to none, the host page cache is
6937# disabled, but
6938# the disk write cache is enabled for the guest. In this mode, the
6939# write
6940# performance in the guest is optimal because write operations
6941# bypass the host
6942# page cache and go directly to the disk write cache. If the disk
6943# write cache
6944# is battery-backed, or if the applications or storage stack in the
6945# guest
6946# transfer data properly (either through fsync operations or file
6947# system
6948# barriers), then data integrity can be ensured. However, because
6949# the host
6950# page cache is disabled, the read performance in the guest would
6951# not be as
6952# good as in the modes where the host page cache is enabled, such as
6953# writethrough mode. Shareable disk devices, like for a multi-
6954# attachable block
6955# storage volume, will have their cache mode set to 'none'
6956# regardless of
6957# configuration.
6958# * writethrough: writethrough mode is the default caching mode. With
6959# caching set to writethrough mode, the host page cache is enabled,
6960# but the
6961# disk write cache is disabled for the guest. Consequently, this
6962# caching mode
6963# ensures data integrity even if the applications and storage stack
6964# in the
6965# guest do not transfer data to permanent storage properly (either
6966# through
6967# fsync operations or file system barriers). Because the host page
6968# cache is
6969# enabled in this mode, the read performance for applications
6970# running in the
6971# guest is generally better. However, the write performance might be
6972# reduced
6973# because the disk write cache is disabled.
6974# * writeback: With caching set to writeback mode, both the host page
6975# cache
6976# and the disk write cache are enabled for the guest. Because of
6977# this, the
6978# I/O performance for applications running in the guest is good, but
6979# the data
6980# is not protected in a power failure. As a result, this caching
6981# mode is
6982# recommended only for temporary data where potential data loss is
6983# not a
6984# concern.
6985# * directsync: Like "writethrough", but it bypasses the host page
6986# cache.
6987# * unsafe: Caching mode of unsafe ignores cache transfer operations
6988# completely. As its name implies, this caching mode should be used
6989# only for
6990# temporary data where data loss is not a concern. This mode can be
6991# useful for
6992# speeding up guest installations, but you should switch to another
6993# caching
6994# mode in production environments.
6995# (list value)
6996#disk_cachemodes =
6997
6998# A path to a device that will be used as source of entropy on the
6999# host. Permitted options are: /dev/random or /dev/hwrng (string
7000# value)
7001#rng_dev_path = <None>
7002
7003# For qemu or KVM guests, set this option to specify a default machine
7004# type per host architecture. You can find a list of supported machine
7005# types in your environment by checking the output of the "virsh
7006# capabilities"command. The format of the value for this config option
7007# is host-arch=machine-type. For example:
7008# x86_64=machinetype1,armv7l=machinetype2 (list value)
7009#hw_machine_type = <None>
7010
7011# The data source used to the populate the host "serial" UUID exposed
7012# to guest in the virtual BIOS. (string value)
7013# Possible values:
7014# none - <No description provided>
7015# os - <No description provided>
7016# hardware - <No description provided>
7017# auto - <No description provided>
7018#sysinfo_serial = auto
7019
7020# A number of seconds to memory usage statistics period. Zero or
7021# negative value mean to disable memory usage statistics. (integer
7022# value)
7023#mem_stats_period_seconds = 10
7024
7025# List of uid targets and ranges.Syntax is guest-uid:host-
7026# uid:countMaximum of 5 allowed. (list value)
7027#uid_maps =
7028
7029# List of guid targets and ranges.Syntax is guest-gid:host-
7030# gid:countMaximum of 5 allowed. (list value)
7031#gid_maps =
7032
7033# In a realtime host context vCPUs for guest will run in that
7034# scheduling priority. Priority depends on the host kernel (usually
7035# 1-99) (integer value)
7036#realtime_scheduler_priority = 1
7037
7038#
7039# This is a performance event list which could be used as monitor.
7040# These events
7041# will be passed to libvirt domain xml while creating a new instances.
7042# Then event statistics data can be collected from libvirt. The
7043# minimum
7044# libvirt version is 2.0.0. For more information about `Performance
7045# monitoring
7046# events`, refer https://libvirt.org/formatdomain.html#elementsPerf .
7047#
7048# Possible values:
7049# * A string list. For example: ``enabled_perf_events = cmt, mbml,
7050# mbmt``
7051# The supported events list can be found in
7052# https://libvirt.org/html/libvirt-libvirt-domain.html ,
7053# which you may need to search key words ``VIR_PERF_PARAM_*``
7054# (list value)
7055#enabled_perf_events =
7056
7057#
7058# VM Images format.
7059#
7060# If default is specified, then use_cow_images flag is used instead of
7061# this
7062# one.
7063#
7064# Related options:
7065#
7066# * virt.use_cow_images
7067# * images_volume_group
7068# (string value)
7069# Possible values:
7070# raw - <No description provided>
7071# flat - <No description provided>
7072# qcow2 - <No description provided>
7073# lvm - <No description provided>
7074# rbd - <No description provided>
7075# ploop - <No description provided>
7076# default - <No description provided>
7077#images_type = default
7078
7079#
7080# LVM Volume Group that is used for VM images, when you specify
7081# images_type=lvm
7082#
7083# Related options:
7084#
7085# * images_type
7086# (string value)
7087#images_volume_group = <None>
7088
7089#
7090# Create sparse logical volumes (with virtualsize) if this flag is set
7091# to True.
7092# (boolean value)
7093#sparse_logical_volumes = false
7094
7095# The RADOS pool in which rbd volumes are stored (string value)
7096#images_rbd_pool = rbd
7097
7098# Path to the ceph configuration file to use (string value)
7099#images_rbd_ceph_conf =
7100
7101#
7102# Discard option for nova managed disks.
7103#
7104# Requires:
7105#
7106# * Libvirt >= 1.0.6
7107# * Qemu >= 1.5 (raw format)
7108# * Qemu >= 1.6 (qcow2 format)
7109# (string value)
7110# Possible values:
7111# ignore - <No description provided>
7112# unmap - <No description provided>
7113#hw_disk_discard = <None>
7114
7115# DEPRECATED: Allows image information files to be stored in non-
7116# standard locations (string value)
7117# This option is deprecated for removal since 14.0.0.
7118# Its value may be silently ignored in the future.
7119# Reason: Image info files are no longer used by the image cache
7120#image_info_filename_pattern = $instances_path/$image_cache_subdirectory_name/%(image)s.info
7121
7122# Unused resized base images younger than this will not be removed
7123# (integer value)
7124#remove_unused_resized_minimum_age_seconds = 3600
7125
7126# DEPRECATED: Write a checksum for files in _base to disk (boolean
7127# value)
7128# This option is deprecated for removal since 14.0.0.
7129# Its value may be silently ignored in the future.
7130# Reason: The image cache no longer periodically calculates checksums
7131# of stored images. Data integrity can be checked at the block or
7132# filesystem level.
7133#checksum_base_images = false
7134
7135# DEPRECATED: How frequently to checksum base images (integer value)
7136# This option is deprecated for removal since 14.0.0.
7137# Its value may be silently ignored in the future.
7138# Reason: The image cache no longer periodically calculates checksums
7139# of stored images. Data integrity can be checked at the block or
7140# filesystem level.
7141#checksum_interval_seconds = 3600
7142
7143#
7144# Method used to wipe ephemeral disks when they are deleted. Only
7145# takes effect
7146# if LVM is set as backing storage.
7147#
7148# Possible values:
7149#
7150# * none - do not wipe deleted volumes
7151# * zero - overwrite volumes with zeroes
7152# * shred - overwrite volume repeatedly
7153#
7154# Related options:
7155#
7156# * images_type - must be set to ``lvm``
7157# * volume_clear_size
7158# (string value)
7159# Possible values:
7160# none - <No description provided>
7161# zero - <No description provided>
7162# shred - <No description provided>
7163#volume_clear = zero
7164
7165#
7166# Size of area in MiB, counting from the beginning of the allocated
7167# volume,
7168# that will be cleared using method set in ``volume_clear`` option.
7169#
7170# Possible values:
7171#
7172# * 0 - clear whole volume
7173# * >0 - clear specified amount of MiB
7174#
7175# Related options:
7176#
7177# * images_type - must be set to ``lvm``
7178# * volume_clear - must be set and the value must be different than
7179# ``none``
7180# for this option to have any impact
7181# (integer value)
7182# Minimum value: 0
7183#volume_clear_size = 0
7184
7185#
7186# Enable snapshot compression for ``qcow2`` images.
7187#
7188# Note: you can set ``snapshot_image_format`` to ``qcow2`` to force
7189# all
7190# snapshots to be in ``qcow2`` format, independently from their
7191# original image
7192# type.
7193#
7194# Related options:
7195#
7196# * snapshot_image_format
7197# (boolean value)
7198#snapshot_compression = false
7199
7200# Use virtio for bridge interfaces with KVM/QEMU (boolean value)
7201use_virtio_for_bridges = true
7202
7203#
7204# Use multipath connection of the iSCSI or FC volume
7205#
7206# Volumes can be connected in the LibVirt as multipath devices. This
7207# will
7208# provide high availability and fault tolerance.
7209# (boolean value)
7210# Deprecated group/name - [libvirt]/iscsi_use_multipath
7211#volume_use_multipath = false
7212
7213#
7214# Number of times to scan given storage protocol to find volume.
7215# (integer value)
7216# Deprecated group/name - [libvirt]/num_iscsi_scan_tries
7217#num_volume_scan_tries = 5
7218
7219#
7220# Number of times to rediscover AoE target to find volume.
7221#
7222# Nova provides support for block storage attaching to hosts via AOE
7223# (ATA over
7224# Ethernet). This option allows the user to specify the maximum number
7225# of retry
7226# attempts that can be made to discover the AoE device.
7227# (integer value)
7228#num_aoe_discover_tries = 3
7229
7230#
7231# The iSCSI transport iface to use to connect to target in case
7232# offload support
7233# is desired.
7234#
7235# Default format is of the form <transport_name>.<hwaddress> where
7236# <transport_name> is one of (be2iscsi, bnx2i, cxgb3i, cxgb4i,
7237# qla4xxx, ocs) and
7238# <hwaddress> is the MAC address of the interface and can be generated
7239# via the
7240# iscsiadm -m iface command. Do not confuse the iscsi_iface parameter
7241# to be
7242# provided here with the actual transport name.
7243# (string value)
7244# Deprecated group/name - [libvirt]/iscsi_transport
7245#iscsi_iface = <None>
7246
7247#
7248# Number of times to scan iSER target to find volume.
7249#
7250# iSER is a server network protocol that extends iSCSI protocol to use
7251# Remote
7252# Direct Memory Access (RDMA). This option allows the user to specify
7253# the maximum
7254# number of scan attempts that can be made to find iSER volume.
7255# (integer value)
7256#num_iser_scan_tries = 5
7257
7258#
7259# Use multipath connection of the iSER volume.
7260#
7261# iSER volumes can be connected as multipath devices. This will
7262# provide high
7263# availability and fault tolerance.
7264# (boolean value)
7265#iser_use_multipath = false
7266
7267#
7268# The RADOS client name for accessing rbd(RADOS Block Devices)
7269# volumes.
7270#
7271# Libvirt will refer to this user when connecting and authenticating
7272# with
7273# the Ceph RBD server.
7274# (string value)
7275#rbd_user = <None>
7276
7277#
7278# The libvirt UUID of the secret for the rbd_user volumes.
7279# (string value)
7280#rbd_secret_uuid = <None>
7281
7282#
7283# Directory where the NFS volume is mounted on the compute node.
7284# The default is 'mnt' directory of the location where nova's Python
7285# module
7286# is installed.
7287#
7288# NFS provides shared storage for the OpenStack Block Storage service.
7289#
7290# Possible values:
7291#
7292# * A string representing absolute path of mount point.
7293# (string value)
7294#nfs_mount_point_base = $state_path/mnt
7295
7296#
7297# Mount options passed to the NFS client. See section of the nfs man
7298# page
7299# for details.
7300#
7301# Mount options controls the way the filesystem is mounted and how the
7302# NFS client behaves when accessing files on this mount point.
7303#
7304# Possible values:
7305#
7306# * Any string representing mount options separated by commas.
7307# * Example string: vers=3,lookupcache=pos
7308# (string value)
Martin Polreichb8f389f2018-08-29 10:48:45 +02007309{%- if controller.nfs_mount_options is defined %}
7310nfs_mount_options="{{ controller.nfs_mount_options }}"
7311{%- endif %}
Oleh Hryhorov5d0f13a2018-05-21 17:02:54 +00007312
7313#
7314# Directory where the Quobyte volume is mounted on the compute node.
7315#
7316# Nova supports Quobyte volume driver that enables storing Block
7317# Storage
7318# service volumes on a Quobyte storage back end. This Option specifies
7319# the
7320# path of the directory where Quobyte volume is mounted.
7321#
7322# Possible values:
7323#
7324# * A string representing absolute path of mount point.
7325# (string value)
7326#quobyte_mount_point_base = $state_path/mnt
7327
7328# Path to a Quobyte Client configuration file. (string value)
7329#quobyte_client_cfg = <None>
7330
7331#
7332# Directory where the SMBFS shares are mounted on the compute node.
7333# (string value)
7334#smbfs_mount_point_base = $state_path/mnt
7335
7336#
7337# Mount options passed to the SMBFS client.
7338#
7339# Provide SMBFS options as a single string containing all parameters.
7340# See mount.cifs man page for details. Note that the libvirt-qemu
7341# ``uid``
7342# and ``gid`` must be specified.
7343# (string value)
7344#smbfs_mount_options =
7345
7346#
7347# libvirt's transport method for remote file operations.
7348#
7349# Because libvirt cannot use RPC to copy files over network to/from
7350# other
7351# compute nodes, other method must be used for:
7352#
7353# * creating directory on remote host
7354# * creating file on remote host
7355# * removing file from remote host
7356# * copying file to remote host
7357# (string value)
7358# Possible values:
7359# ssh - <No description provided>
7360# rsync - <No description provided>
7361#remote_filesystem_transport = ssh
7362
7363#
7364# Directory where the Virtuozzo Storage clusters are mounted on the
7365# compute
7366# node.
7367#
7368# This option defines non-standard mountpoint for Vzstorage cluster.
7369#
7370# Related options:
7371#
7372# * vzstorage_mount_* group of parameters
7373# (string value)
7374#vzstorage_mount_point_base = $state_path/mnt
7375
7376#
7377# Mount owner user name.
7378#
7379# This option defines the owner user of Vzstorage cluster mountpoint.
7380#
7381# Related options:
7382#
7383# * vzstorage_mount_* group of parameters
7384# (string value)
7385#vzstorage_mount_user = stack
7386
7387#
7388# Mount owner group name.
7389#
7390# This option defines the owner group of Vzstorage cluster mountpoint.
7391#
7392# Related options:
7393#
7394# * vzstorage_mount_* group of parameters
7395# (string value)
7396#vzstorage_mount_group = qemu
7397
7398#
7399# Mount access mode.
7400#
7401# This option defines the access bits of Vzstorage cluster mountpoint,
7402# in the format similar to one of chmod(1) utility, like this: 0770.
7403# It consists of one to four digits ranging from 0 to 7, with missing
7404# lead digits assumed to be 0's.
7405#
7406# Related options:
7407#
7408# * vzstorage_mount_* group of parameters
7409# (string value)
7410#vzstorage_mount_perms = 0770
7411
7412#
7413# Path to vzstorage client log.
7414#
7415# This option defines the log of cluster operations,
7416# it should include "%(cluster_name)s" template to separate
7417# logs from multiple shares.
7418#
7419# Related options:
7420#
7421# * vzstorage_mount_opts may include more detailed logging options.
7422# (string value)
7423#vzstorage_log_path = /var/log/vstorage/%(cluster_name)s/nova.log.gz
7424
7425#
7426# Path to the SSD cache file.
7427#
7428# You can attach an SSD drive to a client and configure the drive to
7429# store
7430# a local cache of frequently accessed data. By having a local cache
7431# on a
7432# client's SSD drive, you can increase the overall cluster performance
7433# by
7434# up to 10 and more times.
7435# WARNING! There is a lot of SSD models which are not server grade and
7436# may loose arbitrary set of data changes on power loss.
7437# Such SSDs should not be used in Vstorage and are dangerous as may
7438# lead
7439# to data corruptions and inconsistencies. Please consult with the
7440# manual
7441# on which SSD models are known to be safe or verify it using
7442# vstorage-hwflush-check(1) utility.
7443#
7444# This option defines the path which should include "%(cluster_name)s"
7445# template to separate caches from multiple shares.
7446#
7447# Related options:
7448#
7449# * vzstorage_mount_opts may include more detailed cache options.
7450# (string value)
7451#vzstorage_cache_path = <None>
7452
7453#
7454# Extra mount options for pstorage-mount
7455#
7456# For full description of them, see
7457# https://static.openvz.org/vz-man/man1/pstorage-mount.1.gz.html
7458# Format is a python string representation of arguments list, like:
7459# "['-v', '-R', '500']"
7460# Shouldn't include -c, -l, -C, -u, -g and -m as those have
7461# explicit vzstorage_* options.
7462#
7463# Related options:
7464#
7465# * All other vzstorage_* options
7466# (list value)
7467#vzstorage_mount_opts =
7468
7469
7470[metrics]
7471#
7472# Configuration options for metrics
7473#
7474# Options under this group allow to adjust how values assigned to
7475# metrics are
7476# calculated.
7477
7478#
7479# From nova.conf
7480#
7481
7482#
7483# When using metrics to weight the suitability of a host, you can use
7484# this option
7485# to change how the calculated weight influences the weight assigned
7486# to a host as
7487# follows:
7488#
7489# * >1.0: increases the effect of the metric on overall weight
7490# * 1.0: no change to the calculated weight
7491# * >0.0,<1.0: reduces the effect of the metric on overall weight
7492# * 0.0: the metric value is ignored, and the value of the
7493# 'weight_of_unavailable' option is returned instead
7494# * >-1.0,<0.0: the effect is reduced and reversed
7495# * -1.0: the effect is reversed
7496# * <-1.0: the effect is increased proportionally and reversed
7497#
7498# This option is only used by the FilterScheduler and its subclasses;
7499# if you use
7500# a different scheduler, this option has no effect.
7501#
7502# Possible values:
7503#
7504# * An integer or float value, where the value corresponds to the
7505# multipler
7506# ratio for this weigher.
7507#
7508# Related options:
7509#
7510# * weight_of_unavailable
7511# (floating point value)
7512#weight_multiplier = 1.0
7513
7514#
7515# This setting specifies the metrics to be weighed and the relative
7516# ratios for
7517# each metric. This should be a single string value, consisting of a
7518# series of
7519# one or more 'name=ratio' pairs, separated by commas, where 'name' is
7520# the name
7521# of the metric to be weighed, and 'ratio' is the relative weight for
7522# that
7523# metric.
7524#
7525# Note that if the ratio is set to 0, the metric value is ignored, and
7526# instead
7527# the weight will be set to the value of the 'weight_of_unavailable'
7528# option.
7529#
7530# As an example, let's consider the case where this option is set to:
7531#
7532# ``name1=1.0, name2=-1.3``
7533#
7534# The final weight will be:
7535#
7536# ``(name1.value * 1.0) + (name2.value * -1.3)``
7537#
7538# This option is only used by the FilterScheduler and its subclasses;
7539# if you use
7540# a different scheduler, this option has no effect.
7541#
7542# Possible values:
7543#
7544# * A list of zero or more key/value pairs separated by commas, where
7545# the key is
7546# a string representing the name of a metric and the value is a
7547# numeric weight
7548# for that metric. If any value is set to 0, the value is ignored
7549# and the
7550# weight will be set to the value of the 'weight_of_unavailable'
7551# option.
7552#
7553# Related options:
7554#
7555# * weight_of_unavailable
7556# (list value)
7557#weight_setting =
7558
7559#
7560# This setting determines how any unavailable metrics are treated. If
7561# this option
7562# is set to True, any hosts for which a metric is unavailable will
7563# raise an
7564# exception, so it is recommended to also use the MetricFilter to
7565# filter out
7566# those hosts before weighing.
7567#
7568# This option is only used by the FilterScheduler and its subclasses;
7569# if you use
7570# a different scheduler, this option has no effect.
7571#
7572# Possible values:
7573#
7574# * True or False, where False ensures any metric being unavailable
7575# for a host
7576# will set the host weight to 'weight_of_unavailable'.
7577#
7578# Related options:
7579#
7580# * weight_of_unavailable
7581# (boolean value)
7582#required = true
7583
7584#
7585# When any of the following conditions are met, this value will be
7586# used in place
7587# of any actual metric value:
7588#
7589# * One of the metrics named in 'weight_setting' is not available for
7590# a host,
7591# and the value of 'required' is False
7592# * The ratio specified for a metric in 'weight_setting' is 0
7593# * The 'weight_multiplier' option is set to 0
7594#
7595# This option is only used by the FilterScheduler and its subclasses;
7596# if you use
7597# a different scheduler, this option has no effect.
7598#
7599# Possible values:
7600#
7601# * An integer or float value, where the value corresponds to the
7602# multipler
7603# ratio for this weigher.
7604#
7605# Related options:
7606#
7607# * weight_setting
7608# * required
7609# * weight_multiplier
7610# (floating point value)
7611#weight_of_unavailable = -10000.0
7612
7613
7614[mks]
7615#
7616# Nova compute node uses WebMKS, a desktop sharing protocol to provide
7617# instance console access to VM's created by VMware hypervisors.
7618#
7619# Related options:
7620# Following options must be set to provide console access.
7621# * mksproxy_base_url
7622# * enabled
7623
7624#
7625# From nova.conf
7626#
7627
7628#
7629# Location of MKS web console proxy
7630#
7631# The URL in the response points to a WebMKS proxy which
7632# starts proxying between client and corresponding vCenter
7633# server where instance runs. In order to use the web based
7634# console access, WebMKS proxy should be installed and configured
7635#
7636# Possible values:
7637#
7638# * Must be a valid URL of the form:``http://host:port/`` or
7639# ``https://host:port/``
7640# (uri value)
7641#mksproxy_base_url = http://127.0.0.1:6090/
7642
7643#
7644# Enables graphical console access for virtual machines.
7645# (boolean value)
7646#enabled = false
7647
7648
7649[neutron]
7650#
7651# Configuration options for neutron (network connectivity as a
7652# service).
7653
7654#
7655# From nova.conf
7656#
7657
7658# DEPRECATED:
7659# This option specifies the URL for connecting to Neutron.
7660#
7661# Possible values:
7662#
7663# * Any valid URL that points to the Neutron API service is
7664# appropriate here.
7665# This typically matches the URL returned for the 'network' service
7666# type
7667# from the Keystone service catalog.
7668# (uri value)
7669# This option is deprecated for removal since 17.0.0.
7670# Its value may be silently ignored in the future.
7671# Reason: Endpoint lookup uses the service catalog via common
7672# keystoneauth1 Adapter configuration options. In the current release,
7673# "url" will override this behavior, but will be ignored and/or
7674# removed in a future release. To achieve the same result, use the
7675# endpoint_override option instead.
7676#url = http://127.0.0.1:9696
7677
7678#
7679# Default name for the Open vSwitch integration bridge.
7680#
7681# Specifies the name of an integration bridge interface used by
7682# OpenvSwitch.
7683# This option is only used if Neutron does not specify the OVS bridge
7684# name in
7685# port binding responses.
7686# (string value)
7687#ovs_bridge = br-int
7688
7689#
7690# Default name for the floating IP pool.
7691#
7692# Specifies the name of floating IP pool used for allocating floating
7693# IPs. This
7694# option is only used if Neutron does not specify the floating IP pool
7695# name in
7696# port binding reponses.
7697# (string value)
7698#default_floating_pool = nova
7699
7700#
7701# Integer value representing the number of seconds to wait before
7702# querying
7703# Neutron for extensions. After this number of seconds the next time
7704# Nova
7705# needs to create a resource in Neutron it will requery Neutron for
7706# the
7707# extensions that it has loaded. Setting value to 0 will refresh the
7708# extensions with no wait.
7709# (integer value)
7710# Minimum value: 0
7711#extension_sync_interval = 600
7712
7713#
7714# When set to True, this option indicates that Neutron will be used to
7715# proxy
7716# metadata requests and resolve instance ids. Otherwise, the instance
7717# ID must be
7718# passed to the metadata request in the 'X-Instance-ID' header.
7719#
7720# Related options:
7721#
7722# * metadata_proxy_shared_secret
7723# (boolean value)
7724#service_metadata_proxy = false
Vasyl Saienkoe54a6832018-05-29 09:24:27 +03007725service_metadata_proxy=True
Oleh Hryhorov5d0f13a2018-05-21 17:02:54 +00007726
7727#
7728# This option holds the shared secret string used to validate proxy
7729# requests to
7730# Neutron metadata requests. In order to be used, the
7731# 'X-Metadata-Provider-Signature' header must be supplied in the
7732# request.
7733#
7734# Related options:
7735#
7736# * service_metadata_proxy
7737# (string value)
7738#metadata_proxy_shared_secret =
Vasyl Saienkoe54a6832018-05-29 09:24:27 +03007739metadata_proxy_shared_secret = {{ controller.metadata.password }}
Oleh Hryhorov5d0f13a2018-05-21 17:02:54 +00007740
7741# PEM encoded Certificate Authority to use when verifying HTTPs
7742# connections. (string value)
7743#cafile = <None>
7744{%- if controller.network.get('protocol', 'http') == 'https' %}
7745cafile={{ controller.network.get('cacert_file', controller.cacert_file) }}
7746{%- endif %}
7747
7748# PEM encoded client certificate cert file (string value)
7749#certfile = <None>
7750
7751# PEM encoded client certificate key file (string value)
7752#keyfile = <None>
7753
7754# Verify HTTPS connections. (boolean value)
7755#insecure = false
7756
7757# Timeout value for http requests (integer value)
7758#timeout = <None>
7759timeout=300
7760
7761# Authentication type to load (string value)
7762# Deprecated group/name - [neutron]/auth_plugin
7763#auth_type = <None>
7764auth_type = v3password
7765
7766# Config Section from which to load plugin specific options (string
7767# value)
7768#auth_section = <None>
7769
7770# Authentication URL (string value)
7771#auth_url = <None>
7772auth_url = {{ controller.identity.get('protocol', 'http') }}://{{ controller.identity.host }}:{{ controller.identity.port }}/v3
7773
7774# Scope for system operations (string value)
7775#system_scope = <None>
7776
7777# Domain ID to scope to (string value)
7778#domain_id = <None>
7779
7780# Domain name to scope to (string value)
7781#domain_name = <None>
7782
7783# Project ID to scope to (string value)
7784#project_id = <None>
7785
7786# Project name to scope to (string value)
7787#project_name = <None>
7788
7789# Domain ID containing project (string value)
7790#project_domain_id = <None>
7791
7792# Domain name containing project (string value)
7793#project_domain_name = <None>
7794project_domain_name = {{ controller.get('project_domain_name', 'Default') }}
7795
7796# Trust ID (string value)
7797#trust_id = <None>
7798
7799# Optional domain ID to use with v3 and v2 parameters. It will be used
7800# for both the user and project domain in v3 and ignored in v2
7801# authentication. (string value)
7802#default_domain_id = <None>
7803
7804# Optional domain name to use with v3 API and v2 parameters. It will
7805# be used for both the user and project domain in v3 and ignored in v2
7806# authentication. (string value)
7807#default_domain_name = <None>
7808
7809# User ID (string value)
7810#user_id = <None>
7811
7812# Username (string value)
7813# Deprecated group/name - [neutron]/user_name
7814#username = <None>
7815
7816# User's domain id (string value)
7817#user_domain_id = <None>
7818
7819# User's domain name (string value)
7820#user_domain_name = <None>
7821user_domain_name = {{ controller.get('user_domain_name', 'Default') }}
7822
7823# User's password (string value)
7824#password = <None>
7825
7826# Tenant ID (string value)
7827#tenant_id = <None>
7828
7829# Tenant Name (string value)
7830#tenant_name = <None>
7831
7832# The default service_type for endpoint URL discovery. (string value)
7833#service_type = network
7834
7835# The default service_name for endpoint URL discovery. (string value)
7836#service_name = <None>
7837
7838# List of interfaces, in order of preference, for endpoint URL. (list
7839# value)
7840#valid_interfaces = internal,public
7841
7842# The default region_name for endpoint URL discovery. (string value)
7843#region_name = <None>
7844
7845# Always use this endpoint URL for requests for this client. NOTE: The
7846# unversioned endpoint should be specified here; to request a
7847# particular API version, use the `version`, `min-version`, and/or
7848# `max-version` options. (string value)
7849#endpoint_override = <None>
7850{% if pillar.neutron is defined and pillar.neutron.server is defined %}
7851password = {{ pillar.neutron.server.identity.password }}
7852project_name = {{ pillar.neutron.server.identity.tenant }}
7853username = {{ pillar.neutron.server.identity.user }}
7854region_name = {{ pillar.neutron.server.identity.region }}
7855{%- else %}
7856password = {{ controller.network.password }}
7857project_name = {{ controller.network.tenant }}
7858username = {{ controller.network.user }}
7859region_name = {{ controller.network.region }}
7860{%- endif %}
7861
7862
7863[notifications]
7864#
7865# Most of the actions in Nova which manipulate the system state
7866# generate
7867# notifications which are posted to the messaging component (e.g.
7868# RabbitMQ) and
7869# can be consumed by any service outside the OpenStack. More technical
7870# details
7871# at
7872# https://docs.openstack.org/nova/latest/reference/notifications.html
7873
7874#
7875# From nova.conf
7876#
7877
7878#
7879# If set, send compute.instance.update notifications on
7880# instance state changes.
7881#
7882# Please refer to
7883# https://docs.openstack.org/nova/latest/reference/notifications.html
7884# for
7885# additional information on notifications.
7886#
7887# Possible values:
7888#
7889# * None - no notifications
7890# * "vm_state" - notifications are sent with VM state transition
7891# information in
7892# the ``old_state`` and ``state`` fields. The ``old_task_state`` and
7893# ``new_task_state`` fields will be set to the current task_state of
7894# the
7895# instance.
7896# * "vm_and_task_state" - notifications are sent with VM and task
7897# state
7898# transition information.
7899# (string value)
7900# Possible values:
7901# <None> - <No description provided>
7902# vm_state - <No description provided>
7903# vm_and_task_state - <No description provided>
7904#notify_on_state_change = <None>
7905{%- if controller.get('notification', {}).notify_on is defined %}
7906{%- for key, value in controller.notification.notify_on.iteritems() %}
7907notify_on_{{ key }} = {{ value }}
7908{%- endfor %}
7909{%- endif %}
7910
7911#
7912# If enabled, send api.fault notifications on caught exceptions in the
7913# API service.
7914# (boolean value)
7915# Deprecated group/name - [DEFAULT]/notify_api_faults
7916#notify_on_api_faults=false
7917notify_on_api_faults=false
7918
7919# Default notification level for outgoing notifications. (string
7920# value)
7921# Possible values:
7922# DEBUG - <No description provided>
7923# INFO - <No description provided>
7924# WARN - <No description provided>
7925# ERROR - <No description provided>
7926# CRITICAL - <No description provided>
7927# Deprecated group/name - [DEFAULT]/default_notification_level
7928#default_level = INFO
7929
7930# DEPRECATED:
7931# Default publisher_id for outgoing notifications. If you consider
7932# routing
7933# notifications using different publisher, change this value
7934# accordingly.
7935#
7936# Possible values:
7937#
7938# * Defaults to the current hostname of this host, but it can be any
7939# valid
7940# oslo.messaging publisher_id
7941#
7942# Related options:
7943#
7944# * host - Hostname, FQDN or IP address of this host.
7945# (string value)
7946# This option is deprecated for removal since 17.0.0.
7947# Its value may be silently ignored in the future.
7948# Reason:
7949# This option is only used when ``monkey_patch=True`` and
7950# ``monkey_patch_modules`` is configured to specify the legacy
7951# notify_decorator.
7952# Since the monkey_patch and monkey_patch_modules options are
7953# deprecated, this
7954# option is also deprecated.
7955#default_publisher_id = $host
7956
7957#
7958# Specifies which notification format shall be used by nova.
7959#
7960# The default value is fine for most deployments and rarely needs to
7961# be changed.
7962# This value can be set to 'versioned' once the infrastructure moves
7963# closer to
7964# consuming the newer format of notifications. After this occurs, this
7965# option
7966# will be removed.
7967#
7968# Note that notifications can be completely disabled by setting
7969# ``driver=noop``
7970# in the ``[oslo_messaging_notifications]`` group.
7971#
7972# Possible values:
7973# * unversioned: Only the legacy unversioned notifications are
7974# emitted.
7975# * versioned: Only the new versioned notifications are emitted.
7976# * both: Both the legacy unversioned and the new versioned
7977# notifications are
7978# emitted. (Default)
7979#
7980# The list of versioned notifications is visible in
7981# https://docs.openstack.org/nova/latest/reference/notifications.html
7982# (string value)
7983# Possible values:
7984# unversioned - <No description provided>
7985# versioned - <No description provided>
7986# both - <No description provided>
7987#notification_format = both
7988
7989#
7990# Specifies the topics for the versioned notifications issued by nova.
7991#
7992# The default value is fine for most deployments and rarely needs to
7993# be changed.
7994# However, if you have a third-party service that consumes versioned
7995# notifications, it might be worth getting a topic for that service.
7996# Nova will send a message containing a versioned notification payload
7997# to each
7998# topic queue in this list.
7999#
8000# The list of versioned notifications is visible in
8001# https://docs.openstack.org/nova/latest/reference/notifications.html
8002# (list value)
8003#versioned_notifications_topics = versioned_notifications
8004
8005#
8006# If enabled, include block device information in the versioned
8007# notification
8008# payload. Sending block device information is disabled by default as
8009# providing
8010# that information can incur some overhead on the system since the
8011# information
8012# may need to be loaded from the database.
8013# (boolean value)
8014#bdms_in_notifications = false
8015
8016
8017[osapi_v21]
8018
8019#
8020# From nova.conf
8021#
8022
8023# DEPRECATED:
8024# This option is a string representing a regular expression (regex)
8025# that matches
8026# the project_id as contained in URLs. If not set, it will match
8027# normal UUIDs
8028# created by keystone.
8029#
8030# Possible values:
8031#
8032# * A string representing any legal regular expression
8033# (string value)
8034# This option is deprecated for removal since 13.0.0.
8035# Its value may be silently ignored in the future.
8036# Reason:
8037# Recent versions of nova constrain project IDs to hexadecimal
8038# characters and
8039# dashes. If your installation uses IDs outside of this range, you
8040# should use
8041# this option to provide your own regex and give you time to migrate
8042# offending
8043# projects to valid IDs before the next release.
8044#project_id_regex = <None>
8045
8046
8047[pci]
8048
8049#
8050# From nova.conf
8051#
8052
8053#
8054# An alias for a PCI passthrough device requirement.
8055#
8056# This allows users to specify the alias in the extra specs for a
8057# flavor, without
8058# needing to repeat all the PCI property requirements.
8059#
8060# Possible Values:
8061#
8062# * A list of JSON values which describe the aliases. For example::
8063#
8064# alias = {
8065# "name": "QuickAssist",
8066# "product_id": "0443",
8067# "vendor_id": "8086",
8068# "device_type": "type-PCI",
8069# "numa_policy": "required"
8070# }
8071#
8072# This defines an alias for the Intel QuickAssist card. (multi
8073# valued). Valid
8074# key values are :
8075#
8076# ``name``
8077# Name of the PCI alias.
8078#
8079# ``product_id``
8080# Product ID of the device in hexadecimal.
8081#
8082# ``vendor_id``
8083# Vendor ID of the device in hexadecimal.
8084#
8085# ``device_type``
8086# Type of PCI device. Valid values are: ``type-PCI``, ``type-PF``
8087# and
8088# ``type-VF``.
8089#
8090# ``numa_policy``
8091# Required NUMA affinity of device. Valid values are: ``legacy``,
8092# ``preferred`` and ``required``.
8093# (multi valued)
8094# Deprecated group/name - [DEFAULT]/pci_alias
8095#alias =
Oleh Hryhorovf5093b82018-10-17 11:16:08 +00008096{%- if controller.get('pci', {}).get('alias', false) %}
8097 {%- for alias_name, alias in controller.pci.alias.iteritems() %}
8098alias = {{ alias | json }}
8099 {%- endfor %}
8100{%- endif %}
Oleh Hryhorov5d0f13a2018-05-21 17:02:54 +00008101
8102#
8103# White list of PCI devices available to VMs.
8104#
8105# Possible values:
8106#
8107# * A JSON dictionary which describe a whitelisted PCI device. It
8108# should take
8109# the following format:
8110#
8111# ["vendor_id": "<id>",] ["product_id": "<id>",]
8112# ["address": "[[[[<domain>]:]<bus>]:][<slot>][.[<function>]]" |
8113# "devname": "<name>",]
8114# {"<tag>": "<tag_value>",}
8115#
8116# Where '[' indicates zero or one occurrences, '{' indicates zero or
8117# multiple
8118# occurrences, and '|' mutually exclusive options. Note that any
8119# missing
8120# fields are automatically wildcarded.
8121#
8122# Valid key values are :
8123#
8124# * "vendor_id": Vendor ID of the device in hexadecimal.
8125# * "product_id": Product ID of the device in hexadecimal.
8126# * "address": PCI address of the device.
8127# * "devname": Device name of the device (for e.g. interface name).
8128# Not all
8129# PCI devices have a name.
8130# * "<tag>": Additional <tag> and <tag_value> used for matching PCI
8131# devices.
8132# Supported <tag>: "physical_network".
8133#
8134# The address key supports traditional glob style and regular
8135# expression
8136# syntax. Valid examples are:
8137#
8138# passthrough_whitelist = {"devname":"eth0",
8139# "physical_network":"physnet"}
8140# passthrough_whitelist = {"address":"*:0a:00.*"}
8141# passthrough_whitelist = {"address":":0a:00.",
8142# "physical_network":"physnet1"}
8143# passthrough_whitelist = {"vendor_id":"1137",
8144# "product_id":"0071"}
8145# passthrough_whitelist = {"vendor_id":"1137",
8146# "product_id":"0071",
8147# "address": "0000:0a:00.1",
8148# "physical_network":"physnet1"}
8149# passthrough_whitelist = {"address":{"domain": ".*",
8150# "bus": "02", "slot": "01",
8151# "function": "[2-7]"},
8152# "physical_network":"physnet1"}
8153# passthrough_whitelist = {"address":{"domain": ".*",
8154# "bus": "02", "slot":
8155# "0[1-2]",
8156# "function": ".*"},
8157# "physical_network":"physnet1"}
8158#
8159# The following are invalid, as they specify mutually exclusive
8160# options:
8161#
8162# passthrough_whitelist = {"devname":"eth0",
8163# "physical_network":"physnet",
8164# "address":"*:0a:00.*"}
8165#
8166# * A JSON list of JSON dictionaries corresponding to the above
8167# format. For
8168# example:
8169#
8170# passthrough_whitelist = [{"product_id":"0001",
8171# "vendor_id":"8086"},
8172# {"product_id":"0002",
8173# "vendor_id":"8086"}]
8174# (multi valued)
8175# Deprecated group/name - [DEFAULT]/pci_passthrough_whitelist
8176#passthrough_whitelist =
8177{%- if controller.get('sriov', false) %}
8178{%- for nic_name, sriov in controller.sriov.iteritems() %}
8179passthrough_whitelist = {"devname":"{{ sriov.devname }}","physical_network":"{{ sriov.physical_network }}"}
8180{%- endfor %}
8181{%- endif %}
8182
8183[placement]
8184
Oleh Hryhorov597af682018-11-07 15:08:13 +00008185{%- set _data = controller.identity %}
8186{%- set auth_type = _data.get('auth_type', 'password') %}
8187{%- if _data.ssl is defined and 'cacert_file' not in _data.get('ssl', {}).keys() %}{% do _data['ssl'].update({'cacert_file': controller.cacert_file}) %}{% endif %}
8188{%- include "oslo_templates/files/queens/keystoneauth/_type_" + auth_type + ".conf" %}
Oleh Hryhorov5d0f13a2018-05-21 17:02:54 +00008189
8190# List of interfaces, in order of preference, for endpoint URL. (list
8191# value)
8192# Deprecated group/name - [placement]/os_interface
Oleh Hryhorov597af682018-11-07 15:08:13 +00008193valid_interfaces = {{ controller.identity.get('valid_interfaces', 'internal') }}
Oleh Hryhorov5d0f13a2018-05-21 17:02:54 +00008194
8195# Always use this endpoint URL for requests for this client. NOTE: The
8196# unversioned endpoint should be specified here; to request a
8197# particular API version, use the `version`, `min-version`, and/or
8198# `max-version` options. (string value)
8199#endpoint_override = <None>
8200
Oleh Hryhorov5d0f13a2018-05-21 17:02:54 +00008201[quota]
8202#
8203# Quota options allow to manage quotas in openstack deployment.
8204
8205#
8206# From nova.conf
8207#
8208
8209#
8210# The number of instances allowed per project.
8211#
8212# Possible Values
8213#
8214# * A positive integer or 0.
8215# * -1 to disable the quota.
8216# (integer value)
8217# Minimum value: -1
8218# Deprecated group/name - [DEFAULT]/quota_instances
8219#instances = 10
8220
8221#
8222# The number of instance cores or vCPUs allowed per project.
8223#
8224# Possible values:
8225#
8226# * A positive integer or 0.
8227# * -1 to disable the quota.
8228# (integer value)
8229# Minimum value: -1
8230# Deprecated group/name - [DEFAULT]/quota_cores
8231#cores = 20
8232
8233#
8234# The number of megabytes of instance RAM allowed per project.
8235#
8236# Possible values:
8237#
8238# * A positive integer or 0.
8239# * -1 to disable the quota.
8240# (integer value)
8241# Minimum value: -1
8242# Deprecated group/name - [DEFAULT]/quota_ram
8243#ram = 51200
8244
8245# DEPRECATED:
8246# The number of floating IPs allowed per project.
8247#
8248# Floating IPs are not allocated to instances by default. Users need
8249# to select
8250# them from the pool configured by the OpenStack administrator to
8251# attach to their
8252# instances.
8253#
8254# Possible values:
8255#
8256# * A positive integer or 0.
8257# * -1 to disable the quota.
8258# (integer value)
8259# Minimum value: -1
8260# Deprecated group/name - [DEFAULT]/quota_floating_ips
8261# This option is deprecated for removal since 15.0.0.
8262# Its value may be silently ignored in the future.
8263# Reason:
8264# nova-network is deprecated, as are any related configuration
8265# options.
8266#floating_ips = 10
8267
8268# DEPRECATED:
8269# The number of fixed IPs allowed per project.
8270#
8271# Unlike floating IPs, fixed IPs are allocated dynamically by the
8272# network
8273# component when instances boot up. This quota value should be at
8274# least the
8275# number of instances allowed
8276#
8277# Possible values:
8278#
8279# * A positive integer or 0.
8280# * -1 to disable the quota.
8281# (integer value)
8282# Minimum value: -1
8283# Deprecated group/name - [DEFAULT]/quota_fixed_ips
8284# This option is deprecated for removal since 15.0.0.
8285# Its value may be silently ignored in the future.
8286# Reason:
8287# nova-network is deprecated, as are any related configuration
8288# options.
8289#fixed_ips = -1
8290
8291#
8292# The number of metadata items allowed per instance.
8293#
8294# Users can associate metadata with an instance during instance
8295# creation. This
8296# metadata takes the form of key-value pairs.
8297#
8298# Possible values:
8299#
8300# * A positive integer or 0.
8301# * -1 to disable the quota.
8302# (integer value)
8303# Minimum value: -1
8304# Deprecated group/name - [DEFAULT]/quota_metadata_items
8305#metadata_items = 128
8306
8307#
8308# The number of injected files allowed.
8309#
8310# File injection allows users to customize the personality of an
8311# instance by
8312# injecting data into it upon boot. Only text file injection is
8313# permitted: binary
8314# or ZIP files are not accepted. During file injection, any existing
8315# files that
8316# match specified files are renamed to include ``.bak`` extension
8317# appended with a
8318# timestamp.
8319#
8320# Possible values:
8321#
8322# * A positive integer or 0.
8323# * -1 to disable the quota.
8324# (integer value)
8325# Minimum value: -1
8326# Deprecated group/name - [DEFAULT]/quota_injected_files
8327#injected_files = 5
8328
8329#
8330# The number of bytes allowed per injected file.
8331#
8332# Possible values:
8333#
8334# * A positive integer or 0.
8335# * -1 to disable the quota.
8336# (integer value)
8337# Minimum value: -1
8338# Deprecated group/name - [DEFAULT]/quota_injected_file_content_bytes
8339#injected_file_content_bytes = 10240
8340
8341#
8342# The maximum allowed injected file path length.
8343#
8344# Possible values:
8345#
8346# * A positive integer or 0.
8347# * -1 to disable the quota.
8348# (integer value)
8349# Minimum value: -1
8350# Deprecated group/name - [DEFAULT]/quota_injected_file_path_length
8351#injected_file_path_length = 255
8352
8353# DEPRECATED:
8354# The number of security groups per project.
8355#
8356# Possible values:
8357#
8358# * A positive integer or 0.
8359# * -1 to disable the quota.
8360# (integer value)
8361# Minimum value: -1
8362# Deprecated group/name - [DEFAULT]/quota_security_groups
8363# This option is deprecated for removal since 15.0.0.
8364# Its value may be silently ignored in the future.
8365# Reason:
8366# nova-network is deprecated, as are any related configuration
8367# options.
8368#security_groups = 10
8369
8370# DEPRECATED:
8371# The number of security rules per security group.
8372#
8373# The associated rules in each security group control the traffic to
8374# instances in
8375# the group.
8376#
8377# Possible values:
8378#
8379# * A positive integer or 0.
8380# * -1 to disable the quota.
8381# (integer value)
8382# Minimum value: -1
8383# Deprecated group/name - [DEFAULT]/quota_security_group_rules
8384# This option is deprecated for removal since 15.0.0.
8385# Its value may be silently ignored in the future.
8386# Reason:
8387# nova-network is deprecated, as are any related configuration
8388# options.
8389#security_group_rules = 20
8390
8391#
8392# The maximum number of key pairs allowed per user.
8393#
8394# Users can create at least one key pair for each project and use the
8395# key pair
8396# for multiple instances that belong to that project.
8397#
8398# Possible values:
8399#
8400# * A positive integer or 0.
8401# * -1 to disable the quota.
8402# (integer value)
8403# Minimum value: -1
8404# Deprecated group/name - [DEFAULT]/quota_key_pairs
8405#key_pairs = 100
8406
8407#
8408# The maxiumum number of server groups per project.
8409#
8410# Server groups are used to control the affinity and anti-affinity
8411# scheduling
8412# policy for a group of servers or instances. Reducing the quota will
8413# not affect
8414# any existing group, but new servers will not be allowed into groups
8415# that have
8416# become over quota.
8417#
8418# Possible values:
8419#
8420# * A positive integer or 0.
8421# * -1 to disable the quota.
8422# (integer value)
8423# Minimum value: -1
8424# Deprecated group/name - [DEFAULT]/quota_server_groups
8425#server_groups = 10
8426
8427#
8428# The maximum number of servers per server group.
8429#
8430# Possible values:
8431#
8432# * A positive integer or 0.
8433# * -1 to disable the quota.
8434# (integer value)
8435# Minimum value: -1
8436# Deprecated group/name - [DEFAULT]/quota_server_group_members
8437#server_group_members = 10
8438
8439#
8440# The number of seconds until a reservation expires.
8441#
8442# This quota represents the time period for invalidating quota
8443# reservations.
8444# (integer value)
8445#reservation_expire = 86400
8446
8447#
8448# The count of reservations until usage is refreshed.
8449#
8450# This defaults to 0 (off) to avoid additional load but it is useful
8451# to turn on
8452# to help keep quota usage up-to-date and reduce the impact of out of
8453# sync usage
8454# issues.
8455# (integer value)
8456# Minimum value: 0
8457#until_refresh = 0
8458
8459#
8460# The number of seconds between subsequent usage refreshes.
8461#
8462# This defaults to 0 (off) to avoid additional load but it is useful
8463# to turn on
8464# to help keep quota usage up-to-date and reduce the impact of out of
8465# sync usage
8466# issues. Note that quotas are not updated on a periodic task, they
8467# will update
8468# on a new reservation if max_age has passed since the last
8469# reservation.
8470# (integer value)
8471# Minimum value: 0
8472#max_age = 0
8473
8474# DEPRECATED:
8475# The quota enforcer driver.
8476#
8477# Provides abstraction for quota checks. Users can configure a
8478# specific
8479# driver to use for quota checks.
8480#
8481# Possible values:
8482#
8483# * nova.quota.DbQuotaDriver (default) or any string representing
8484# fully
8485# qualified class name.
8486# (string value)
8487# Deprecated group/name - [DEFAULT]/quota_driver
8488# This option is deprecated for removal since 14.0.0.
8489# Its value may be silently ignored in the future.
8490#driver = nova.quota.DbQuotaDriver
8491
8492#
8493# Recheck quota after resource creation to prevent allowing quota to
8494# be exceeded.
8495#
8496# This defaults to True (recheck quota after resource creation) but
8497# can be set to
8498# False to avoid additional load if allowing quota to be exceeded
8499# because of
8500# racing requests is considered acceptable. For example, when set to
8501# False, if a
8502# user makes highly parallel REST API requests to create servers, it
8503# will be
8504# possible for them to create more servers than their allowed quota
8505# during the
8506# race. If their quota is 10 servers, they might be able to create 50
8507# during the
8508# burst. After the burst, they will not be able to create any more
8509# servers but
8510# they will be able to keep their 50 servers until they delete them.
8511#
8512# The initial quota check is done before resources are created, so if
8513# multiple
8514# parallel requests arrive at the same time, all could pass the quota
8515# check and
8516# create resources, potentially exceeding quota. When recheck_quota is
8517# True,
8518# quota will be checked a second time after resources have been
8519# created and if
8520# the resource is over quota, it will be deleted and OverQuota will be
8521# raised,
8522# usually resulting in a 403 response to the REST API user. This makes
8523# it
8524# impossible for a user to exceed their quota with the caveat that it
8525# will,
8526# however, be possible for a REST API user to be rejected with a 403
8527# response in
8528# the event of a collision close to reaching their quota limit, even
8529# if the user
8530# has enough quota available when they made the request.
8531# (boolean value)
8532#recheck_quota = true
8533
8534
8535[rdp]
8536#
8537# Options under this group enable and configure Remote Desktop
8538# Protocol (
8539# RDP) related features.
8540#
8541# This group is only relevant to Hyper-V users.
8542
8543#
8544# From nova.conf
8545#
8546
8547#
8548# Enable Remote Desktop Protocol (RDP) related features.
8549#
8550# Hyper-V, unlike the majority of the hypervisors employed on Nova
8551# compute
8552# nodes, uses RDP instead of VNC and SPICE as a desktop sharing
8553# protocol to
8554# provide instance console access. This option enables RDP for
8555# graphical
8556# console access for virtual machines created by Hyper-V.
8557#
8558# **Note:** RDP should only be enabled on compute nodes that support
8559# the Hyper-V
8560# virtualization platform.
8561#
8562# Related options:
8563#
8564# * ``compute_driver``: Must be hyperv.
8565#
8566# (boolean value)
8567#enabled = false
8568
8569#
8570# The URL an end user would use to connect to the RDP HTML5 console
8571# proxy.
8572# The console proxy service is called with this token-embedded URL and
8573# establishes the connection to the proper instance.
8574#
8575# An RDP HTML5 console proxy service will need to be configured to
8576# listen on the
8577# address configured here. Typically the console proxy service would
8578# be run on a
8579# controller node. The localhost address used as default would only
8580# work in a
8581# single node environment i.e. devstack.
8582#
8583# An RDP HTML5 proxy allows a user to access via the web the text or
8584# graphical
8585# console of any Windows server or workstation using RDP. RDP HTML5
8586# console
8587# proxy services include FreeRDP, wsgate.
8588# See https://github.com/FreeRDP/FreeRDP-WebConnect
8589#
8590# Possible values:
8591#
8592# * <scheme>://<ip-address>:<port-number>/
8593#
8594# The scheme must be identical to the scheme configured for the RDP
8595# HTML5
8596# console proxy service. It is ``http`` or ``https``.
8597#
8598# The IP address must be identical to the address on which the RDP
8599# HTML5
8600# console proxy service is listening.
8601#
8602# The port must be identical to the port on which the RDP HTML5
8603# console proxy
8604# service is listening.
8605#
8606# Related options:
8607#
8608# * ``rdp.enabled``: Must be set to ``True`` for
8609# ``html5_proxy_base_url`` to be
8610# effective.
8611# (uri value)
8612#html5_proxy_base_url = http://127.0.0.1:6083/
8613
8614
8615[remote_debug]
8616
8617#
8618# From nova.conf
8619#
8620
8621#
8622# Debug host (IP or name) to connect to. This command line parameter
8623# is used when
8624# you want to connect to a nova service via a debugger running on a
8625# different
8626# host.
8627#
8628# Note that using the remote debug option changes how Nova uses the
8629# eventlet
8630# library to support async IO. This could result in failures that do
8631# not occur
8632# under normal operation. Use at your own risk.
8633#
8634# Possible Values:
8635#
8636# * IP address of a remote host as a command line parameter
8637# to a nova service. For Example:
8638#
8639# /usr/local/bin/nova-compute --config-file /etc/nova/nova.conf
8640# --remote_debug-host <IP address where the debugger is running>
8641# (unknown value)
8642#host = <None>
8643
8644#
8645# Debug port to connect to. This command line parameter allows you to
8646# specify
8647# the port you want to use to connect to a nova service via a debugger
8648# running
8649# on different host.
8650#
8651# Note that using the remote debug option changes how Nova uses the
8652# eventlet
8653# library to support async IO. This could result in failures that do
8654# not occur
8655# under normal operation. Use at your own risk.
8656#
8657# Possible Values:
8658#
8659# * Port number you want to use as a command line parameter
8660# to a nova service. For Example:
8661#
8662# /usr/local/bin/nova-compute --config-file /etc/nova/nova.conf
8663# --remote_debug-host <IP address where the debugger is running>
8664# --remote_debug-port <port> it's listening on>.
8665# (port value)
8666# Minimum value: 0
8667# Maximum value: 65535
8668#port = <None>
8669
8670
8671[scheduler]
8672
8673#
8674# From nova.conf
8675#
8676
8677#
8678# The scheduler host manager to use.
8679#
8680# The host manager manages the in-memory picture of the hosts that the
8681# scheduler
8682# uses. The options values are chosen from the entry points under the
8683# namespace
8684# 'nova.scheduler.host_manager' in 'setup.cfg'.
8685#
8686# NOTE: The "ironic_host_manager" option is deprecated as of the
8687# 17.0.0 Queens
8688# release.
8689# (string value)
8690# Possible values:
8691# host_manager - <No description provided>
8692# ironic_host_manager - <No description provided>
8693# Deprecated group/name - [DEFAULT]/scheduler_host_manager
8694#host_manager = host_manager
8695
8696#
8697# The class of the driver used by the scheduler. This should be chosen
8698# from one
8699# of the entrypoints under the namespace 'nova.scheduler.driver' of
8700# file
8701# 'setup.cfg'. If nothing is specified in this option, the
8702# 'filter_scheduler' is
8703# used.
8704#
8705# Other options are:
8706#
8707# * 'caching_scheduler' which aggressively caches the system state for
8708# better
8709# individual scheduler performance at the risk of more retries when
8710# running
8711# multiple schedulers. [DEPRECATED]
8712# * 'chance_scheduler' which simply picks a host at random.
8713# [DEPRECATED]
8714# * 'fake_scheduler' which is used for testing.
8715#
8716# Possible values:
8717#
8718# * Any of the drivers included in Nova:
8719# ** filter_scheduler
8720# ** caching_scheduler
8721# ** chance_scheduler
8722# ** fake_scheduler
8723# * You may also set this to the entry point name of a custom
8724# scheduler driver,
8725# but you will be responsible for creating and maintaining it in
8726# your setup.cfg
8727# file.
8728# (string value)
8729# Deprecated group/name - [DEFAULT]/scheduler_driver
8730#driver = filter_scheduler
8731
8732#
8733# Periodic task interval.
8734#
8735# This value controls how often (in seconds) to run periodic tasks in
8736# the
8737# scheduler. The specific tasks that are run for each period are
8738# determined by
8739# the particular scheduler being used.
8740#
8741# If this is larger than the nova-service 'service_down_time' setting,
8742# Nova may
8743# report the scheduler service as down. This is because the scheduler
8744# driver is
8745# responsible for sending a heartbeat and it will only do that as
8746# often as this
8747# option allows. As each scheduler can work a little differently than
8748# the others,
8749# be sure to test this with your selected scheduler.
8750#
8751# Possible values:
8752#
8753# * An integer, where the integer corresponds to periodic task
8754# interval in
8755# seconds. 0 uses the default interval (60 seconds). A negative
8756# value disables
8757# periodic tasks.
8758#
8759# Related options:
8760#
8761# * ``nova-service service_down_time``
8762# (integer value)
8763# Deprecated group/name - [DEFAULT]/scheduler_driver_task_period
8764#periodic_task_interval = 60
8765
8766#
8767# This is the maximum number of attempts that will be made for a given
8768# instance
8769# build/move operation. It limits the number of alternate hosts
8770# returned by the
8771# scheduler. When that list of hosts is exhausted, a
8772# MaxRetriesExceeded
8773# exception is raised and the instance is set to an error state.
8774#
8775# Possible values:
8776#
8777# * A positive integer, where the integer corresponds to the max
8778# number of
8779# attempts that can be made when building or moving an instance.
8780# (integer value)
8781# Minimum value: 1
8782# Deprecated group/name - [DEFAULT]/scheduler_max_attempts
8783#max_attempts = 3
8784
8785#
8786# Periodic task interval.
8787#
8788# This value controls how often (in seconds) the scheduler should
8789# attempt
8790# to discover new hosts that have been added to cells. If negative
8791# (the
8792# default), no automatic discovery will occur.
8793#
8794# Deployments where compute nodes come and go frequently may want this
8795# enabled, where others may prefer to manually discover hosts when one
8796# is added to avoid any overhead from constantly checking. If enabled,
8797# every time this runs, we will select any unmapped hosts out of each
8798# cell database on every run.
8799# (integer value)
8800# Minimum value: -1
8801#discover_hosts_in_cells_interval = -1
8802
8803#
8804# This setting determines the maximum limit on results received from
8805# the
8806# placement service during a scheduling operation. It effectively
8807# limits
8808# the number of hosts that may be considered for scheduling requests
8809# that
8810# match a large number of candidates.
8811#
8812# A value of 1 (the minimum) will effectively defer scheduling to the
8813# placement
8814# service strictly on "will it fit" grounds. A higher value will put
8815# an upper
8816# cap on the number of results the scheduler will consider during the
8817# filtering
8818# and weighing process. Large deployments may need to set this lower
8819# than the
8820# total number of hosts available to limit memory consumption, network
8821# traffic,
8822# etc. of the scheduler.
8823#
8824# This option is only used by the FilterScheduler; if you use a
8825# different
8826# scheduler, this option has no effect.
8827# (integer value)
8828# Minimum value: 1
8829#max_placement_results = 1000
8830
8831
8832[serial_console]
8833#
8834# The serial console feature allows you to connect to a guest in case
8835# a
8836# graphical console like VNC, RDP or SPICE is not available. This is
8837# only
8838# currently supported for the libvirt, Ironic and hyper-v drivers.
8839
8840#
8841# From nova.conf
8842#
8843
8844#
8845# Enable the serial console feature.
8846#
8847# In order to use this feature, the service ``nova-serialproxy`` needs
8848# to run.
8849# This service is typically executed on the controller node.
8850# (boolean value)
8851#enabled = false
8852
8853#
8854# A range of TCP ports a guest can use for its backend.
8855#
8856# Each instance which gets created will use one port out of this
8857# range. If the
8858# range is not big enough to provide another port for an new instance,
8859# this
8860# instance won't get launched.
8861#
8862# Possible values:
8863#
8864# * Each string which passes the regex ``\d+:\d+`` For example
8865# ``10000:20000``.
8866# Be sure that the first port number is lower than the second port
8867# number
8868# and that both are in range from 0 to 65535.
8869# (string value)
8870#port_range = 10000:20000
8871
8872#
8873# The URL an end user would use to connect to the ``nova-serialproxy``
8874# service.
8875#
8876# The ``nova-serialproxy`` service is called with this token enriched
8877# URL
8878# and establishes the connection to the proper instance.
8879#
8880# Related options:
8881#
8882# * The IP address must be identical to the address to which the
8883# ``nova-serialproxy`` service is listening (see option
8884# ``serialproxy_host``
8885# in this section).
8886# * The port must be the same as in the option ``serialproxy_port`` of
8887# this
8888# section.
8889# * If you choose to use a secured websocket connection, then start
8890# this option
8891# with ``wss://`` instead of the unsecured ``ws://``. The options
8892# ``cert``
8893# and ``key`` in the ``[DEFAULT]`` section have to be set for that.
8894# (uri value)
8895#base_url = ws://127.0.0.1:6083/
8896
8897#
8898# The IP address to which proxy clients (like ``nova-serialproxy``)
8899# should
8900# connect to get the serial console of an instance.
8901#
8902# This is typically the IP address of the host of a ``nova-compute``
8903# service.
8904# (string value)
8905#proxyclient_address = 127.0.0.1
8906
8907#
8908# The IP address which is used by the ``nova-serialproxy`` service to
8909# listen
8910# for incoming requests.
8911#
8912# The ``nova-serialproxy`` service listens on this IP address for
8913# incoming
8914# connection requests to instances which expose serial console.
8915#
8916# Related options:
8917#
8918# * Ensure that this is the same IP address which is defined in the
8919# option
8920# ``base_url`` of this section or use ``0.0.0.0`` to listen on all
8921# addresses.
8922# (string value)
8923#serialproxy_host = 0.0.0.0
8924
8925#
8926# The port number which is used by the ``nova-serialproxy`` service to
8927# listen
8928# for incoming requests.
8929#
8930# The ``nova-serialproxy`` service listens on this port number for
8931# incoming
8932# connection requests to instances which expose serial console.
8933#
8934# Related options:
8935#
8936# * Ensure that this is the same port number which is defined in the
8937# option
8938# ``base_url`` of this section.
8939# (port value)
8940# Minimum value: 0
8941# Maximum value: 65535
8942#serialproxy_port = 6083
8943
8944
8945[service_user]
8946#
8947# Configuration options for service to service authentication using a
8948# service
8949# token. These options allow sending a service token along with the
8950# user's token
8951# when contacting external REST APIs.
8952
8953#
8954# From nova.conf
8955#
8956
8957#
8958# When True, if sending a user token to a REST API, also send a
8959# service token.
8960#
8961# Nova often reuses the user token provided to the nova-api to talk to
8962# other REST
8963# APIs, such as Cinder, Glance and Neutron. It is possible that while
8964# the user
8965# token was valid when the request was made to Nova, the token may
8966# expire before
8967# it reaches the other service. To avoid any failures, and to make it
8968# clear it is
8969# Nova calling the service on the user's behalf, we include a service
8970# token along
8971# with the user token. Should the user's token have expired, a valid
8972# service
8973# token ensures the REST API request will still be accepted by the
8974# keystone
8975# middleware.
8976# (boolean value)
8977#send_service_user_token = false
8978
8979# PEM encoded Certificate Authority to use when verifying HTTPs
8980# connections. (string value)
8981#cafile = <None>
8982
8983# PEM encoded client certificate cert file (string value)
8984#certfile = <None>
8985
8986# PEM encoded client certificate key file (string value)
8987#keyfile = <None>
8988
8989# Verify HTTPS connections. (boolean value)
8990#insecure = false
8991
8992# Timeout value for http requests (integer value)
8993#timeout = <None>
8994
8995# Authentication type to load (string value)
8996# Deprecated group/name - [service_user]/auth_plugin
8997#auth_type = <None>
8998
8999# Config Section from which to load plugin specific options (string
9000# value)
9001#auth_section = <None>
9002
9003# Authentication URL (string value)
9004#auth_url = <None>
9005
9006# Scope for system operations (string value)
9007#system_scope = <None>
9008
9009# Domain ID to scope to (string value)
9010#domain_id = <None>
9011
9012# Domain name to scope to (string value)
9013#domain_name = <None>
9014
9015# Project ID to scope to (string value)
9016#project_id = <None>
9017
9018# Project name to scope to (string value)
9019#project_name = <None>
9020
9021# Domain ID containing project (string value)
9022#project_domain_id = <None>
9023
9024# Domain name containing project (string value)
9025#project_domain_name = <None>
9026
9027# Trust ID (string value)
9028#trust_id = <None>
9029
9030# Optional domain ID to use with v3 and v2 parameters. It will be used
9031# for both the user and project domain in v3 and ignored in v2
9032# authentication. (string value)
9033#default_domain_id = <None>
9034
9035# Optional domain name to use with v3 API and v2 parameters. It will
9036# be used for both the user and project domain in v3 and ignored in v2
9037# authentication. (string value)
9038#default_domain_name = <None>
9039
9040# User ID (string value)
9041#user_id = <None>
9042
9043# Username (string value)
9044# Deprecated group/name - [service_user]/user_name
9045#username = <None>
9046
9047# User's domain id (string value)
9048#user_domain_id = <None>
9049
9050# User's domain name (string value)
9051#user_domain_name = <None>
9052
9053# User's password (string value)
9054#password = <None>
9055
9056# Tenant ID (string value)
9057#tenant_id = <None>
9058
9059# Tenant Name (string value)
9060#tenant_name = <None>
9061
9062
9063[spice]
9064#
9065# SPICE console feature allows you to connect to a guest virtual
9066# machine.
9067# SPICE is a replacement for fairly limited VNC protocol.
9068#
9069# Following requirements must be met in order to use SPICE:
9070#
9071# * Virtualization driver must be libvirt
9072# * spice.enabled set to True
9073# * vnc.enabled set to False
9074# * update html5proxy_base_url
9075# * update server_proxyclient_address
9076
9077#
9078# From nova.conf
9079#
9080
9081#
9082# Enable SPICE related features.
9083#
9084# Related options:
9085#
9086# * VNC must be explicitly disabled to get access to the SPICE
9087# console. Set the
9088# enabled option to False in the [vnc] section to disable the VNC
9089# console.
9090# (boolean value)
9091#enabled = false
9092enabled = false
9093#
9094# Enable the SPICE guest agent support on the instances.
9095#
9096# The Spice agent works with the Spice protocol to offer a better
9097# guest console
9098# experience. However, the Spice console can still be used without the
9099# Spice
9100# Agent. With the Spice agent installed the following features are
9101# enabled:
9102#
9103# * Copy & Paste of text and images between the guest and client
9104# machine
9105# * Automatic adjustment of resolution when the client screen changes
9106# - e.g.
9107# if you make the Spice console full screen the guest resolution
9108# will adjust to
9109# match it rather than letterboxing.
9110# * Better mouse integration - The mouse can be captured and released
9111# without
9112# needing to click inside the console or press keys to release it.
9113# The
9114# performance of mouse movement is also improved.
9115# (boolean value)
9116#agent_enabled = true
9117
9118#
9119# Location of the SPICE HTML5 console proxy.
9120#
9121# End user would use this URL to connect to the `nova-
9122# spicehtml5proxy``
9123# service. This service will forward request to the console of an
9124# instance.
9125#
9126# In order to use SPICE console, the service ``nova-spicehtml5proxy``
9127# should be
9128# running. This service is typically launched on the controller node.
9129#
9130# Possible values:
9131#
9132# * Must be a valid URL of the form:
9133# ``http://host:port/spice_auto.html``
9134# where host is the node running ``nova-spicehtml5proxy`` and the
9135# port is
9136# typically 6082. Consider not using default value as it is not well
9137# defined
9138# for any real deployment.
9139#
9140# Related options:
9141#
9142# * This option depends on ``html5proxy_host`` and ``html5proxy_port``
9143# options.
9144# The access URL returned by the compute node must have the host
9145# and port where the ``nova-spicehtml5proxy`` service is listening.
9146# (uri value)
9147#html5proxy_base_url = http://127.0.0.1:6082/spice_auto.html
9148{%- if controller.vncproxy_url is defined %}
9149html5proxy_base_url = {{ controller.vncproxy_url }}/spice_auto.html
9150{%- endif %}
9151
9152#
9153# The address where the SPICE server running on the instances should
9154# listen.
9155#
9156# Typically, the ``nova-spicehtml5proxy`` proxy client runs on the
9157# controller
9158# node and connects over the private network to this address on the
9159# compute
9160# node(s).
9161#
9162# Possible values:
9163#
9164# * IP address to listen on.
9165# (string value)
9166#server_listen = 127.0.0.1
9167
9168#
9169# The address used by ``nova-spicehtml5proxy`` client to connect to
9170# instance
9171# console.
9172#
9173# Typically, the ``nova-spicehtml5proxy`` proxy client runs on the
9174# controller node and connects over the private network to this
9175# address on the
9176# compute node(s).
9177#
9178# Possible values:
9179#
9180# * Any valid IP address on the compute node.
9181#
9182# Related options:
9183#
9184# * This option depends on the ``server_listen`` option.
9185# The proxy client must be able to access the address specified in
9186# ``server_listen`` using the value of this option.
9187# (string value)
9188#server_proxyclient_address = 127.0.0.1
9189
9190#
9191# A keyboard layout which is supported by the underlying hypervisor on
9192# this
9193# node.
9194#
9195# Possible values:
9196# * This is usually an 'IETF language tag' (default is 'en-us'). If
9197# you
9198# use QEMU as hypervisor, you should find the list of supported
9199# keyboard
9200# layouts at /usr/share/qemu/keymaps.
9201# (string value)
9202#keymap = en-us
9203
9204#
9205# IP address or a hostname on which the ``nova-spicehtml5proxy``
9206# service
9207# listens for incoming requests.
9208#
9209# Related options:
9210#
9211# * This option depends on the ``html5proxy_base_url`` option.
9212# The ``nova-spicehtml5proxy`` service must be listening on a host
9213# that is
9214# accessible from the HTML5 client.
9215# (unknown value)
9216#html5proxy_host = 0.0.0.0
9217
9218#
9219# Port on which the ``nova-spicehtml5proxy`` service listens for
9220# incoming
9221# requests.
9222#
9223# Related options:
9224#
9225# * This option depends on the ``html5proxy_base_url`` option.
9226# The ``nova-spicehtml5proxy`` service must be listening on a port
9227# that is
9228# accessible from the HTML5 client.
9229# (port value)
9230# Minimum value: 0
9231# Maximum value: 65535
9232#html5proxy_port = 6082
9233
9234
9235[upgrade_levels]
9236
9237{%- if controller.upgrade_levels is defined %}
9238{%- for key, value in controller.upgrade_levels.iteritems() %}
9239{{ key }}={{ value }}
9240{%- endfor %}
9241{%- endif %}
9242#
9243# upgrade_levels options are used to set version cap for RPC
9244# messages sent between different nova services.
9245#
9246# By default all services send messages using the latest version
9247# they know about.
9248#
9249# The compute upgrade level is an important part of rolling upgrades
9250# where old and new nova-compute services run side by side.
9251#
9252# The other options can largely be ignored, and are only kept to
9253# help with a possible future backport issue.
9254
9255#
9256# From nova.conf
9257#
9258
9259#
9260# Compute RPC API version cap.
9261#
9262# By default, we always send messages using the most recent version
9263# the client knows about.
9264#
9265# Where you have old and new compute services running, you should set
9266# this to the lowest deployed version. This is to guarantee that all
9267# services never send messages that one of the compute nodes can't
9268# understand. Note that we only support upgrading from release N to
9269# release N+1.
9270#
9271# Set this option to "auto" if you want to let the compute RPC module
9272# automatically determine what version to use based on the service
9273# versions in the deployment.
9274#
9275# Possible values:
9276#
9277# * By default send the latest version the client knows about
9278# * 'auto': Automatically determines what version to use based on
9279# the service versions in the deployment.
9280# * A string representing a version number in the format 'N.N';
9281# for example, possible values might be '1.12' or '2.0'.
9282# * An OpenStack release name, in lower case, such as 'mitaka' or
9283# 'liberty'.
9284# (string value)
9285#compute = <None>
9286
9287# Cells RPC API version cap (string value)
9288#cells = <None>
9289
9290# Intercell RPC API version cap (string value)
9291#intercell = <None>
9292
9293# Cert RPC API version cap (string value)
9294#cert = <None>
9295
9296# Scheduler RPC API version cap (string value)
9297#scheduler = <None>
9298
9299# Conductor RPC API version cap (string value)
9300#conductor = <None>
9301
9302# Console RPC API version cap (string value)
9303#console = <None>
9304
9305# Consoleauth RPC API version cap (string value)
9306#consoleauth = <None>
9307
9308# Network RPC API version cap (string value)
9309#network = <None>
9310
9311# Base API RPC API version cap (string value)
9312#baseapi = <None>
9313
9314
9315[vault]
9316
9317#
9318# From nova.conf
9319#
9320
9321# root token for vault (string value)
9322#root_token_id = <None>
9323
9324# Use this endpoint to connect to Vault, for example:
9325# "http://127.0.0.1:8200" (string value)
9326#vault_url = http://127.0.0.1:8200
9327
9328# Absolute path to ca cert file (string value)
9329#ssl_ca_crt_file = <None>
9330
9331# SSL Enabled/Disabled (boolean value)
9332#use_ssl = false
9333
9334
9335[vendordata_dynamic_auth]
9336#
9337# Options within this group control the authentication of the
9338# vendordata
9339# subsystem of the metadata API server (and config drive) with
9340# external systems.
9341
9342#
9343# From nova.conf
9344#
9345
9346# PEM encoded Certificate Authority to use when verifying HTTPs
9347# connections. (string value)
9348#cafile = <None>
9349
9350# PEM encoded client certificate cert file (string value)
9351#certfile = <None>
9352
9353# PEM encoded client certificate key file (string value)
9354#keyfile = <None>
9355
9356# Verify HTTPS connections. (boolean value)
9357#insecure = false
9358
9359# Timeout value for http requests (integer value)
9360#timeout = <None>
9361
9362# Authentication type to load (string value)
9363# Deprecated group/name - [vendordata_dynamic_auth]/auth_plugin
9364#auth_type = <None>
9365
9366# Config Section from which to load plugin specific options (string
9367# value)
9368#auth_section = <None>
9369
9370# Authentication URL (string value)
9371#auth_url = <None>
9372
9373# Scope for system operations (string value)
9374#system_scope = <None>
9375
9376# Domain ID to scope to (string value)
9377#domain_id = <None>
9378
9379# Domain name to scope to (string value)
9380#domain_name = <None>
9381
9382# Project ID to scope to (string value)
9383#project_id = <None>
9384
9385# Project name to scope to (string value)
9386#project_name = <None>
9387
9388# Domain ID containing project (string value)
9389#project_domain_id = <None>
9390
9391# Domain name containing project (string value)
9392#project_domain_name = <None>
9393
9394# Trust ID (string value)
9395#trust_id = <None>
9396
9397# Optional domain ID to use with v3 and v2 parameters. It will be used
9398# for both the user and project domain in v3 and ignored in v2
9399# authentication. (string value)
9400#default_domain_id = <None>
9401
9402# Optional domain name to use with v3 API and v2 parameters. It will
9403# be used for both the user and project domain in v3 and ignored in v2
9404# authentication. (string value)
9405#default_domain_name = <None>
9406
9407# User ID (string value)
9408#user_id = <None>
9409
9410# Username (string value)
9411# Deprecated group/name - [vendordata_dynamic_auth]/user_name
9412#username = <None>
9413
9414# User's domain id (string value)
9415#user_domain_id = <None>
9416
9417# User's domain name (string value)
9418#user_domain_name = <None>
9419
9420# User's password (string value)
9421#password = <None>
9422
9423# Tenant ID (string value)
9424#tenant_id = <None>
9425
9426# Tenant Name (string value)
9427#tenant_name = <None>
9428
Vasyl Saienkocab3a902018-07-12 13:17:17 +03009429{%- set compute_driver = controller.get('compute_driver', 'libvirt.LibvirtDriver') %}
9430{%- if compute_driver in compute_driver_mapping.keys() %}
9431{%- set _data = controller.get(compute_driver_mapping[compute_driver]) %}
9432{%- include "nova/files/queens/compute/_" + compute_driver_mapping[compute_driver] + ".conf" %}
9433{%- endif %}
Oleh Hryhorov5d0f13a2018-05-21 17:02:54 +00009434
9435[vnc]
9436#
9437# Virtual Network Computer (VNC) can be used to provide remote desktop
9438# console access to instances for tenants and/or administrators.
9439
9440#
9441# From nova.conf
9442#
9443
9444#
9445# Enable VNC related features.
9446#
9447# Guests will get created with graphical devices to support this.
9448# Clients
9449# (for example Horizon) can then establish a VNC connection to the
9450# guest.
9451# (boolean value)
9452# Deprecated group/name - [DEFAULT]/vnc_enabled
9453enabled = true
9454
9455novncproxy_host = {{ controller.bind.get('novncproxy_address', '0.0.0.0') }}
9456novncproxy_base_url = {{ controller.vncproxy_url }}/vnc_auto.html
9457novncproxy_port={{ controller.bind.get('vncproxy_port', '6080') }}
9458{%- if pillar.nova.compute is defined %}
9459vncserver_listen={{ controller.bind.private_address }}
9460vncserver_proxyclient_address={{ controller.bind.private_address }}
9461{%- else %}
9462vncserver_listen={{ controller.bind.get('novncproxy_address', '0.0.0.0') }}
9463{%- endif %}
Oleksandr Shyshkod8337cf2018-07-11 17:55:58 +03009464{%- if controller.novncproxy.vencrypt.tls.get('enabled', False) %}
Oleksandr Shyshkoe6f983e2018-06-18 13:53:40 +03009465auth_schemes=vencrypt
Oleksandr Shyshkod8337cf2018-07-11 17:55:58 +03009466vencrypt_client_key={{controller.novncproxy.vencrypt.tls.key_file|yaml_squote}}
9467vencrypt_client_cert={{controller.novncproxy.vencrypt.tls.cert_file|yaml_squote}}
9468vencrypt_ca_certs={{controller.novncproxy.vencrypt.tls.ca_file|yaml_squote}}
Oleksandr Shyshkoe6f983e2018-06-18 13:53:40 +03009469{%- endif %}
Oleh Hryhorov5d0f13a2018-05-21 17:02:54 +00009470
9471#
9472# Keymap for VNC.
9473#
9474# The keyboard mapping (keymap) determines which keyboard layout a VNC
9475# session should use by default.
9476#
9477# Possible values:
9478#
9479# * A keyboard layout which is supported by the underlying hypervisor
9480# on
9481# this node. This is usually an 'IETF language tag' (for example
9482# 'en-us'). If you use QEMU as hypervisor, you should find the
9483# list
9484# of supported keyboard layouts at ``/usr/share/qemu/keymaps``.
9485# (string value)
9486# Deprecated group/name - [DEFAULT]/vnc_keymap
9487keymap = {{ controller.get('vnc_keymap', 'en-us') }}
9488
9489#
9490# The IP address or hostname on which an instance should listen to for
9491# incoming VNC connection requests on this node.
9492# (unknown value)
9493# Deprecated group/name - [DEFAULT]/vncserver_listen
9494# Deprecated group/name - [vnc]/vncserver_listen
9495#server_listen = 127.0.0.1
9496
9497#
9498# Private, internal IP address or hostname of VNC console proxy.
9499#
9500# The VNC proxy is an OpenStack component that enables compute service
9501# users to access their instances through VNC clients.
9502#
9503# This option sets the private address to which proxy clients, such as
9504# ``nova-xvpvncproxy``, should connect to.
9505# (unknown value)
9506# Deprecated group/name - [DEFAULT]/vncserver_proxyclient_address
9507# Deprecated group/name - [vnc]/vncserver_proxyclient_address
9508#server_proxyclient_address = 127.0.0.1
9509
9510#
9511# Public address of noVNC VNC console proxy.
9512#
9513# The VNC proxy is an OpenStack component that enables compute service
9514# users to access their instances through VNC clients. noVNC provides
9515# VNC support through a websocket-based client.
9516#
9517# This option sets the public base URL to which client systems will
9518# connect. noVNC clients can use this address to connect to the noVNC
9519# instance and, by extension, the VNC sessions.
9520#
9521# Related options:
9522#
9523# * novncproxy_host
9524# * novncproxy_port
9525# (uri value)
9526#novncproxy_base_url = http://127.0.0.1:6080/vnc_auto.html
9527
9528#
9529# IP address or hostname that the XVP VNC console proxy should bind
9530# to.
9531#
9532# The VNC proxy is an OpenStack component that enables compute service
9533# users to access their instances through VNC clients. Xen provides
9534# the Xenserver VNC Proxy, or XVP, as an alternative to the
9535# websocket-based noVNC proxy used by Libvirt. In contrast to noVNC,
9536# XVP clients are Java-based.
9537#
9538# This option sets the private address to which the XVP VNC console
9539# proxy
9540# service should bind to.
9541#
9542# Related options:
9543#
9544# * xvpvncproxy_port
9545# * xvpvncproxy_base_url
9546# (unknown value)
9547#xvpvncproxy_host = 0.0.0.0
9548
9549#
9550# Port that the XVP VNC console proxy should bind to.
9551#
9552# The VNC proxy is an OpenStack component that enables compute service
9553# users to access their instances through VNC clients. Xen provides
9554# the Xenserver VNC Proxy, or XVP, as an alternative to the
9555# websocket-based noVNC proxy used by Libvirt. In contrast to noVNC,
9556# XVP clients are Java-based.
9557#
9558# This option sets the private port to which the XVP VNC console proxy
9559# service should bind to.
9560#
9561# Related options:
9562#
9563# * xvpvncproxy_host
9564# * xvpvncproxy_base_url
9565# (port value)
9566# Minimum value: 0
9567# Maximum value: 65535
9568#xvpvncproxy_port = 6081
9569
9570#
9571# Public URL address of XVP VNC console proxy.
9572#
9573# The VNC proxy is an OpenStack component that enables compute service
9574# users to access their instances through VNC clients. Xen provides
9575# the Xenserver VNC Proxy, or XVP, as an alternative to the
9576# websocket-based noVNC proxy used by Libvirt. In contrast to noVNC,
9577# XVP clients are Java-based.
9578#
9579# This option sets the public base URL to which client systems will
9580# connect. XVP clients can use this address to connect to the XVP
9581# instance and, by extension, the VNC sessions.
9582#
9583# Related options:
9584#
9585# * xvpvncproxy_host
9586# * xvpvncproxy_port
9587# (uri value)
9588#xvpvncproxy_base_url = http://127.0.0.1:6081/console
9589
9590#
9591# IP address that the noVNC console proxy should bind to.
9592#
9593# The VNC proxy is an OpenStack component that enables compute service
9594# users to access their instances through VNC clients. noVNC provides
9595# VNC support through a websocket-based client.
9596#
9597# This option sets the private address to which the noVNC console
9598# proxy
9599# service should bind to.
9600#
9601# Related options:
9602#
9603# * novncproxy_port
9604# * novncproxy_base_url
9605# (string value)
9606#novncproxy_host = 0.0.0.0
9607
9608#
9609# Port that the noVNC console proxy should bind to.
9610#
9611# The VNC proxy is an OpenStack component that enables compute service
9612# users to access their instances through VNC clients. noVNC provides
9613# VNC support through a websocket-based client.
9614#
9615# This option sets the private port to which the noVNC console proxy
9616# service should bind to.
9617#
9618# Related options:
9619#
9620# * novncproxy_host
9621# * novncproxy_base_url
9622# (port value)
9623# Minimum value: 0
9624# Maximum value: 65535
9625#novncproxy_port = 6080
9626
9627#
9628# The authentication schemes to use with the compute node.
9629#
9630# Control what RFB authentication schemes are permitted for
9631# connections between
9632# the proxy and the compute host. If multiple schemes are enabled, the
9633# first
9634# matching scheme will be used, thus the strongest schemes should be
9635# listed
9636# first.
9637#
9638# Possible values:
9639#
9640# * ``none``: allow connection without authentication
9641# * ``vencrypt``: use VeNCrypt authentication scheme
9642#
9643# Related options:
9644#
9645# * ``[vnc]vencrypt_client_key``, ``[vnc]vencrypt_client_cert``: must
9646# also be set
9647# (list value)
9648#auth_schemes = none
9649
9650# The path to the client certificate PEM file (for x509)
9651#
9652# The fully qualified path to a PEM file containing the private key
9653# which the VNC
9654# proxy server presents to the compute node during VNC authentication.
9655#
9656# Related options:
9657#
9658# * ``vnc.auth_schemes``: must include ``vencrypt``
9659# * ``vnc.vencrypt_client_cert``: must also be set
9660# (string value)
9661#vencrypt_client_key = <None>
9662
9663# The path to the client key file (for x509)
9664#
9665# The fully qualified path to a PEM file containing the x509
9666# certificate which
9667# the VNC proxy server presents to the compute node during VNC
9668# authentication.
9669#
9670# Realted options:
9671#
9672# * ``vnc.auth_schemes``: must include ``vencrypt``
9673# * ``vnc.vencrypt_client_key``: must also be set
9674# (string value)
9675#vencrypt_client_cert = <None>
9676
9677# The path to the CA certificate PEM file
9678#
9679# The fully qualified path to a PEM file containing one or more x509
9680# certificates
9681# for the certificate authorities used by the compute node VNC server.
9682#
9683# Related options:
9684#
9685# * ``vnc.auth_schemes``: must include ``vencrypt``
9686# (string value)
9687#vencrypt_ca_certs = <None>
9688
9689
9690[workarounds]
9691#
9692# A collection of workarounds used to mitigate bugs or issues found in
9693# system
9694# tools (e.g. Libvirt or QEMU) or Nova itself under certain
9695# conditions. These
9696# should only be enabled in exceptional circumstances. All options are
9697# linked
9698# against bug IDs, where more information on the issue can be found.
9699
9700#
9701# From nova.conf
9702#
9703
9704#
9705# Use sudo instead of rootwrap.
9706#
9707# Allow fallback to sudo for performance reasons.
9708#
9709# For more information, refer to the bug report:
9710#
9711# https://bugs.launchpad.net/nova/+bug/1415106
9712#
9713# Possible values:
9714#
9715# * True: Use sudo instead of rootwrap
9716# * False: Use rootwrap as usual
9717#
9718# Interdependencies to other options:
9719#
9720# * Any options that affect 'rootwrap' will be ignored.
9721# (boolean value)
9722#disable_rootwrap = false
9723
9724#
9725# Disable live snapshots when using the libvirt driver.
9726#
9727# Live snapshots allow the snapshot of the disk to happen without an
9728# interruption to the guest, using coordination with a guest agent to
9729# quiesce the filesystem.
9730#
9731# When using libvirt 1.2.2 live snapshots fail intermittently under
9732# load
9733# (likely related to concurrent libvirt/qemu operations). This config
9734# option provides a mechanism to disable live snapshot, in favor of
9735# cold
9736# snapshot, while this is resolved. Cold snapshot causes an instance
9737# outage while the guest is going through the snapshotting process.
9738#
9739# For more information, refer to the bug report:
9740#
9741# https://bugs.launchpad.net/nova/+bug/1334398
9742#
9743# Possible values:
9744#
9745# * True: Live snapshot is disabled when using libvirt
9746# * False: Live snapshots are always used when snapshotting (as long
9747# as
9748# there is a new enough libvirt and the backend storage supports it)
9749# (boolean value)
9750#disable_libvirt_livesnapshot = false
9751disable_libvirt_livesnapshot = {{ controller.get('workaround', {}).get('disable_libvirt_livesnapshot', True)|lower }}
9752
9753#
9754# Enable handling of events emitted from compute drivers.
9755#
9756# Many compute drivers emit lifecycle events, which are events that
9757# occur when,
9758# for example, an instance is starting or stopping. If the instance is
9759# going
9760# through task state changes due to an API operation, like resize, the
9761# events
9762# are ignored.
9763#
9764# This is an advanced feature which allows the hypervisor to signal to
9765# the
9766# compute service that an unexpected state change has occurred in an
9767# instance
9768# and that the instance can be shutdown automatically. Unfortunately,
9769# this can
9770# race in some conditions, for example in reboot operations or when
9771# the compute
9772# service or when host is rebooted (planned or due to an outage). If
9773# such races
9774# are common, then it is advisable to disable this feature.
9775#
9776# Care should be taken when this feature is disabled and
9777# 'sync_power_state_interval' is set to a negative value. In this
9778# case, any
9779# instances that get out of sync between the hypervisor and the Nova
9780# database
9781# will have to be synchronized manually.
9782#
9783# For more information, refer to the bug report:
9784#
9785# https://bugs.launchpad.net/bugs/1444630
9786#
9787# Interdependencies to other options:
9788#
9789# * If ``sync_power_state_interval`` is negative and this feature is
9790# disabled,
9791# then instances that get out of sync between the hypervisor and the
9792# Nova
9793# database will have to be synchronized manually.
9794# (boolean value)
9795#handle_virt_lifecycle_events = true
9796
9797#
9798# Disable the server group policy check upcall in compute.
9799#
9800# In order to detect races with server group affinity policy, the
9801# compute
9802# service attempts to validate that the policy was not violated by the
9803# scheduler. It does this by making an upcall to the API database to
9804# list
9805# the instances in the server group for one that it is booting, which
9806# violates
9807# our api/cell isolation goals. Eventually this will be solved by
9808# proper affinity
9809# guarantees in the scheduler and placement service, but until then,
9810# this late
9811# check is needed to ensure proper affinity policy.
9812#
9813# Operators that desire api/cell isolation over this check should
9814# enable this flag, which will avoid making that upcall from compute.
9815#
9816# Related options:
9817#
9818# * [filter_scheduler]/track_instance_changes also relies on upcalls
9819# from the
9820# compute service to the scheduler service.
9821# (boolean value)
9822#disable_group_policy_check_upcall = false
9823
9824
9825[wsgi]
9826#
9827# Options under this group are used to configure WSGI (Web Server
9828# Gateway
9829# Interface). WSGI is used to serve API requests.
9830
9831#
9832# From nova.conf
9833#
9834
9835#
9836# This option represents a file name for the paste.deploy config for
9837# nova-api.
9838#
9839# Possible values:
9840#
9841# * A string representing file name for the paste.deploy config.
9842# (string value)
9843api_paste_config = /etc/nova/api-paste.ini
9844
9845# DEPRECATED:
9846# It represents a python format string that is used as the template to
9847# generate
9848# log lines. The following values can be formatted into it: client_ip,
9849# date_time, request_line, status_code, body_length, wall_seconds.
9850#
9851# This option is used for building custom request loglines when
9852# running
9853# nova-api under eventlet. If used under uwsgi or apache, this option
9854# has no effect.
9855#
9856# Possible values:
9857#
9858# * '%(client_ip)s "%(request_line)s" status: %(status_code)s'
9859# 'len: %(body_length)s time: %(wall_seconds).7f' (default)
9860# * Any formatted string formed by specific values.
9861# (string value)
9862# This option is deprecated for removal since 16.0.0.
9863# Its value may be silently ignored in the future.
9864# Reason:
9865# This option only works when running nova-api under eventlet, and
9866# encodes very eventlet specific pieces of information. Starting in
9867# Pike
9868# the preferred model for running nova-api is under uwsgi or apache
9869# mod_wsgi.
9870#wsgi_log_format = %(client_ip)s "%(request_line)s" status: %(status_code)s len: %(body_length)s time: %(wall_seconds).7f
9871
9872#
9873# This option specifies the HTTP header used to determine the protocol
9874# scheme
9875# for the original request, even if it was removed by a SSL
9876# terminating proxy.
9877#
9878# Possible values:
9879#
9880# * None (default) - the request scheme is not influenced by any HTTP
9881# headers
9882# * Valid HTTP header, like HTTP_X_FORWARDED_PROTO
9883#
9884# WARNING: Do not set this unless you know what you are doing.
9885#
9886# Make sure ALL of the following are true before setting this
9887# (assuming the
9888# values from the example above):
9889# * Your API is behind a proxy.
9890# * Your proxy strips the X-Forwarded-Proto header from all incoming
9891# requests.
9892# In other words, if end users include that header in their
9893# requests, the proxy
9894# will discard it.
9895# * Your proxy sets the X-Forwarded-Proto header and sends it to API,
9896# but only
9897# for requests that originally come in via HTTPS.
9898#
9899# If any of those are not true, you should keep this setting set to
9900# None.
9901#
9902# (string value)
9903#secure_proxy_ssl_header = <None>
9904
9905#
9906# This option allows setting path to the CA certificate file that
9907# should be used
9908# to verify connecting clients.
9909#
9910# Possible values:
9911#
9912# * String representing path to the CA certificate file.
9913#
9914# Related options:
9915#
9916# * enabled_ssl_apis
9917# (string value)
9918#ssl_ca_file = <None>
9919
9920#
9921# This option allows setting path to the SSL certificate of API
9922# server.
9923#
9924# Possible values:
9925#
9926# * String representing path to the SSL certificate.
9927#
9928# Related options:
9929#
9930# * enabled_ssl_apis
9931# (string value)
9932#ssl_cert_file = <None>
9933
9934#
9935# This option specifies the path to the file where SSL private key of
9936# API
9937# server is stored when SSL is in effect.
9938#
9939# Possible values:
9940#
9941# * String representing path to the SSL private key.
9942#
9943# Related options:
9944#
9945# * enabled_ssl_apis
9946# (string value)
9947#ssl_key_file = <None>
9948
9949#
9950# This option sets the value of TCP_KEEPIDLE in seconds for each
9951# server socket.
9952# It specifies the duration of time to keep connection active. TCP
9953# generates a
9954# KEEPALIVE transmission for an application that requests to keep
9955# connection
9956# active. Not supported on OS X.
9957#
9958# Related options:
9959#
9960# * keep_alive
9961# (integer value)
9962# Minimum value: 0
9963#tcp_keepidle = 600
9964
9965#
9966# This option specifies the size of the pool of greenthreads used by
9967# wsgi.
9968# It is possible to limit the number of concurrent connections using
9969# this
9970# option.
9971# (integer value)
9972# Minimum value: 0
9973# Deprecated group/name - [DEFAULT]/wsgi_default_pool_size
9974#default_pool_size = 1000
9975
9976#
9977# This option specifies the maximum line size of message headers to be
9978# accepted.
9979# max_header_line may need to be increased when using large tokens
9980# (typically
9981# those generated by the Keystone v3 API with big service catalogs).
9982#
9983# Since TCP is a stream based protocol, in order to reuse a
9984# connection, the HTTP
9985# has to have a way to indicate the end of the previous response and
9986# beginning
9987# of the next. Hence, in a keep_alive case, all messages must have a
9988# self-defined message length.
9989# (integer value)
9990# Minimum value: 0
9991#max_header_line = 16384
9992
9993#
9994# This option allows using the same TCP connection to send and receive
9995# multiple
9996# HTTP requests/responses, as opposed to opening a new one for every
9997# single
9998# request/response pair. HTTP keep-alive indicates HTTP connection
9999# reuse.
10000#
10001# Possible values:
10002#
10003# * True : reuse HTTP connection.
10004# * False : closes the client socket connection explicitly.
10005#
10006# Related options:
10007#
10008# * tcp_keepidle
10009# (boolean value)
10010# Deprecated group/name - [DEFAULT]/wsgi_keep_alive
10011#keep_alive = true
10012
10013#
10014# This option specifies the timeout for client connections' socket
10015# operations.
10016# If an incoming connection is idle for this number of seconds it will
10017# be
10018# closed. It indicates timeout on individual read/writes on the socket
10019# connection. To wait forever set to 0.
10020# (integer value)
10021# Minimum value: 0
10022#client_socket_timeout = 900
10023
10024
10025[xenserver]
10026#
10027# XenServer options are used when the compute_driver is set to use
10028# XenServer (compute_driver=xenapi.XenAPIDriver).
10029#
10030# Must specify connection_url, connection_password and
10031# ovs_integration_bridge to
10032# use compute_driver=xenapi.XenAPIDriver.
10033
10034#
10035# From nova.conf
10036#
10037
10038#
10039# Number of seconds to wait for agent's reply to a request.
10040#
10041# Nova configures/performs certain administrative actions on a server
10042# with the
10043# help of an agent that's installed on the server. The communication
10044# between
10045# Nova and the agent is achieved via sharing messages, called records,
10046# over
10047# xenstore, a shared storage across all the domains on a Xenserver
10048# host.
10049# Operations performed by the agent on behalf of nova are: 'version','
10050# key_init',
10051# 'password','resetnetwork','inject_file', and 'agentupdate'.
10052#
10053# To perform one of the above operations, the xapi 'agent' plugin
10054# writes the
10055# command and its associated parameters to a certain location known to
10056# the domain
10057# and awaits response. On being notified of the message, the agent
10058# performs
10059# appropriate actions on the server and writes the result back to
10060# xenstore. This
10061# result is then read by the xapi 'agent' plugin to determine the
10062# success/failure
10063# of the operation.
10064#
10065# This config option determines how long the xapi 'agent' plugin shall
10066# wait to
10067# read the response off of xenstore for a given request/command. If
10068# the agent on
10069# the instance fails to write the result in this time period, the
10070# operation is
10071# considered to have timed out.
10072#
10073# Related options:
10074#
10075# * ``agent_version_timeout``
10076# * ``agent_resetnetwork_timeout``
10077#
10078# (integer value)
10079# Minimum value: 0
10080#agent_timeout = 30
10081
10082#
10083# Number of seconds to wait for agent't reply to version request.
10084#
10085# This indicates the amount of time xapi 'agent' plugin waits for the
10086# agent to
10087# respond to the 'version' request specifically. The generic timeout
10088# for agent
10089# communication ``agent_timeout`` is ignored in this case.
10090#
10091# During the build process the 'version' request is used to determine
10092# if the
10093# agent is available/operational to perform other requests such as
10094# 'resetnetwork', 'password', 'key_init' and 'inject_file'. If the
10095# 'version' call
10096# fails, the other configuration is skipped. So, this configuration
10097# option can
10098# also be interpreted as time in which agent is expected to be fully
10099# operational.
10100# (integer value)
10101# Minimum value: 0
10102#agent_version_timeout = 300
10103
10104#
10105# Number of seconds to wait for agent's reply to resetnetwork
10106# request.
10107#
10108# This indicates the amount of time xapi 'agent' plugin waits for the
10109# agent to
10110# respond to the 'resetnetwork' request specifically. The generic
10111# timeout for
10112# agent communication ``agent_timeout`` is ignored in this case.
10113# (integer value)
10114# Minimum value: 0
10115#agent_resetnetwork_timeout = 60
10116
10117#
10118# Path to locate guest agent on the server.
10119#
10120# Specifies the path in which the XenAPI guest agent should be
10121# located. If the
10122# agent is present, network configuration is not injected into the
10123# image.
10124#
10125# Related options:
10126#
10127# For this option to have an effect:
10128# * ``flat_injected`` should be set to ``True``
10129# * ``compute_driver`` should be set to ``xenapi.XenAPIDriver``
10130#
10131# (string value)
10132#agent_path = usr/sbin/xe-update-networking
10133
10134#
10135# Disables the use of XenAPI agent.
10136#
10137# This configuration option suggests whether the use of agent should
10138# be enabled
10139# or not regardless of what image properties are present. Image
10140# properties have
10141# an effect only when this is set to ``True``. Read description of
10142# config option
10143# ``use_agent_default`` for more information.
10144#
10145# Related options:
10146#
10147# * ``use_agent_default``
10148#
10149# (boolean value)
10150#disable_agent = false
10151
10152#
10153# Whether or not to use the agent by default when its usage is enabled
10154# but not
10155# indicated by the image.
10156#
10157# The use of XenAPI agent can be disabled altogether using the
10158# configuration
10159# option ``disable_agent``. However, if it is not disabled, the use of
10160# an agent
10161# can still be controlled by the image in use through one of its
10162# properties,
10163# ``xenapi_use_agent``. If this property is either not present or
10164# specified
10165# incorrectly on the image, the use of agent is determined by this
10166# configuration
10167# option.
10168#
10169# Note that if this configuration is set to ``True`` when the agent is
10170# not
10171# present, the boot times will increase significantly.
10172#
10173# Related options:
10174#
10175# * ``disable_agent``
10176#
10177# (boolean value)
10178#use_agent_default = false
10179
10180# Timeout in seconds for XenAPI login. (integer value)
10181# Minimum value: 0
10182#login_timeout = 10
10183
10184#
10185# Maximum number of concurrent XenAPI connections.
10186#
10187# In nova, multiple XenAPI requests can happen at a time.
10188# Configuring this option will parallelize access to the XenAPI
10189# session, which allows you to make concurrent XenAPI connections.
10190# (integer value)
10191# Minimum value: 1
10192#connection_concurrent = 5
10193
10194#
10195# Cache glance images locally.
10196#
10197# The value for this option must be chosen from the choices listed
10198# here. Configuring a value other than these will default to 'all'.
10199#
10200# Note: There is nothing that deletes these images.
10201#
10202# Possible values:
10203#
10204# * `all`: will cache all images.
10205# * `some`: will only cache images that have the
10206# image_property `cache_in_nova=True`.
10207# * `none`: turns off caching entirely.
10208# (string value)
10209# Possible values:
10210# all - <No description provided>
10211# some - <No description provided>
10212# none - <No description provided>
10213#cache_images = all
10214
10215#
10216# Compression level for images.
10217#
10218# By setting this option we can configure the gzip compression level.
10219# This option sets GZIP environment variable before spawning tar -cz
10220# to force the compression level. It defaults to none, which means the
10221# GZIP environment variable is not set and the default (usually -6)
10222# is used.
10223#
10224# Possible values:
10225#
10226# * Range is 1-9, e.g., 9 for gzip -9, 9 being most
10227# compressed but most CPU intensive on dom0.
10228# * Any values out of this range will default to None.
10229# (integer value)
10230# Minimum value: 1
10231# Maximum value: 9
10232#image_compression_level = <None>
10233
10234# Default OS type used when uploading an image to glance (string
10235# value)
10236#default_os_type = linux
10237
10238# Time in secs to wait for a block device to be created (integer
10239# value)
10240# Minimum value: 1
10241#block_device_creation_timeout = 10
10242{%- if controller.block_device_creation_timeout is defined %}
10243block_device_creation_timeout = {{ controller.block_device_creation_timeout }}
10244{%- endif %}
10245
10246#
10247# Maximum size in bytes of kernel or ramdisk images.
10248#
10249# Specifying the maximum size of kernel or ramdisk will avoid copying
10250# large files to dom0 and fill up /boot/guest.
10251# (integer value)
10252#max_kernel_ramdisk_size = 16777216
10253
10254#
10255# Filter for finding the SR to be used to install guest instances on.
10256#
10257# Possible values:
10258#
10259# * To use the Local Storage in default XenServer/XCP installations
10260# set this flag to other-config:i18n-key=local-storage.
10261# * To select an SR with a different matching criteria, you could
10262# set it to other-config:my_favorite_sr=true.
10263# * To fall back on the Default SR, as displayed by XenCenter,
10264# set this flag to: default-sr:true.
10265# (string value)
10266#sr_matching_filter = default-sr:true
10267
10268#
10269# Whether to use sparse_copy for copying data on a resize down.
10270# (False will use standard dd). This speeds up resizes down
10271# considerably since large runs of zeros won't have to be rsynced.
10272# (boolean value)
10273#sparse_copy = true
10274
10275#
10276# Maximum number of retries to unplug VBD.
10277# If set to 0, should try once, no retries.
10278# (integer value)
10279# Minimum value: 0
10280#num_vbd_unplug_retries = 10
10281
10282#
10283# Name of network to use for booting iPXE ISOs.
10284#
10285# An iPXE ISO is a specially crafted ISO which supports iPXE booting.
10286# This feature gives a means to roll your own image.
10287#
10288# By default this option is not set. Enable this option to
10289# boot an iPXE ISO.
10290#
10291# Related Options:
10292#
10293# * `ipxe_boot_menu_url`
10294# * `ipxe_mkisofs_cmd`
10295# (string value)
10296#ipxe_network_name = <None>
10297
10298#
10299# URL to the iPXE boot menu.
10300#
10301# An iPXE ISO is a specially crafted ISO which supports iPXE booting.
10302# This feature gives a means to roll your own image.
10303#
10304# By default this option is not set. Enable this option to
10305# boot an iPXE ISO.
10306#
10307# Related Options:
10308#
10309# * `ipxe_network_name`
10310# * `ipxe_mkisofs_cmd`
10311# (string value)
10312#ipxe_boot_menu_url = <None>
10313
10314#
10315# Name and optionally path of the tool used for ISO image creation.
10316#
10317# An iPXE ISO is a specially crafted ISO which supports iPXE booting.
10318# This feature gives a means to roll your own image.
10319#
10320# Note: By default `mkisofs` is not present in the Dom0, so the
10321# package can either be manually added to Dom0 or include the
10322# `mkisofs` binary in the image itself.
10323#
10324# Related Options:
10325#
10326# * `ipxe_network_name`
10327# * `ipxe_boot_menu_url`
10328# (string value)
10329#ipxe_mkisofs_cmd = mkisofs
10330
10331#
10332# URL for connection to XenServer/Xen Cloud Platform. A special value
10333# of unix://local can be used to connect to the local unix socket.
10334#
10335# Possible values:
10336#
10337# * Any string that represents a URL. The connection_url is
10338# generally the management network IP address of the XenServer.
10339# * This option must be set if you chose the XenServer driver.
10340# (string value)
10341#connection_url = <None>
10342
10343# Username for connection to XenServer/Xen Cloud Platform (string
10344# value)
10345#connection_username = root
10346
10347# Password for connection to XenServer/Xen Cloud Platform (string
10348# value)
10349#connection_password = <None>
10350
10351#
10352# The interval used for polling of coalescing vhds.
10353#
10354# This is the interval after which the task of coalesce VHD is
10355# performed, until it reaches the max attempts that is set by
10356# vhd_coalesce_max_attempts.
10357#
10358# Related options:
10359#
10360# * `vhd_coalesce_max_attempts`
10361# (floating point value)
10362# Minimum value: 0
10363#vhd_coalesce_poll_interval = 5.0
10364
10365#
10366# Ensure compute service is running on host XenAPI connects to.
10367# This option must be set to false if the 'independent_compute'
10368# option is set to true.
10369#
10370# Possible values:
10371#
10372# * Setting this option to true will make sure that compute service
10373# is running on the same host that is specified by connection_url.
10374# * Setting this option to false, doesn't perform the check.
10375#
10376# Related options:
10377#
10378# * `independent_compute`
10379# (boolean value)
10380#check_host = true
10381
10382#
10383# Max number of times to poll for VHD to coalesce.
10384#
10385# This option determines the maximum number of attempts that can be
10386# made for coalescing the VHD before giving up.
10387#
10388# Related opitons:
10389#
10390# * `vhd_coalesce_poll_interval`
10391# (integer value)
10392# Minimum value: 0
10393#vhd_coalesce_max_attempts = 20
10394
10395# Base path to the storage repository on the XenServer host. (string
10396# value)
10397#sr_base_path = /var/run/sr-mount
10398
10399#
10400# The iSCSI Target Host.
10401#
10402# This option represents the hostname or ip of the iSCSI Target.
10403# If the target host is not present in the connection information from
10404# the volume provider then the value from this option is taken.
10405#
10406# Possible values:
10407#
10408# * Any string that represents hostname/ip of Target.
10409# (unknown value)
10410#target_host = <None>
10411
10412#
10413# The iSCSI Target Port.
10414#
10415# This option represents the port of the iSCSI Target. If the
10416# target port is not present in the connection information from the
10417# volume provider then the value from this option is taken.
10418# (port value)
10419# Minimum value: 0
10420# Maximum value: 65535
10421#target_port = 3260
10422
10423#
10424# Used to prevent attempts to attach VBDs locally, so Nova can
10425# be run in a VM on a different host.
10426#
10427# Related options:
10428#
10429# * ``CONF.flat_injected`` (Must be False)
10430# * ``CONF.xenserver.check_host`` (Must be False)
10431# * ``CONF.default_ephemeral_format`` (Must be unset or 'ext3')
10432# * Joining host aggregates (will error if attempted)
10433# * Swap disks for Windows VMs (will error if attempted)
10434# * Nova-based auto_configure_disk (will error if attempted)
10435# (boolean value)
10436#independent_compute = false
10437
10438#
10439# Wait time for instances to go to running state.
10440#
10441# Provide an integer value representing time in seconds to set the
10442# wait time for an instance to go to running state.
10443#
10444# When a request to create an instance is received by nova-api and
10445# communicated to nova-compute, the creation of the instance occurs
10446# through interaction with Xen via XenAPI in the compute node. Once
10447# the node on which the instance(s) are to be launched is decided by
10448# nova-schedule and the launch is triggered, a certain amount of wait
10449# time is involved until the instance(s) can become available and
10450# 'running'. This wait time is defined by running_timeout. If the
10451# instances do not go to running state within this specified wait
10452# time, the launch expires and the instance(s) are set to 'error'
10453# state.
10454# (integer value)
10455# Minimum value: 0
10456#running_timeout = 60
10457
10458# DEPRECATED:
10459# The XenAPI VIF driver using XenServer Network APIs.
10460#
10461# Provide a string value representing the VIF XenAPI vif driver to use
10462# for
10463# plugging virtual network interfaces.
10464#
10465# Xen configuration uses bridging within the backend domain to allow
10466# all VMs to appear on the network as individual hosts. Bridge
10467# interfaces are used to create a XenServer VLAN network in which
10468# the VIFs for the VM instances are plugged. If no VIF bridge driver
10469# is plugged, the bridge is not made available. This configuration
10470# option takes in a value for the VIF driver.
10471#
10472# Possible values:
10473#
10474# * nova.virt.xenapi.vif.XenAPIOpenVswitchDriver (default)
10475# * nova.virt.xenapi.vif.XenAPIBridgeDriver (deprecated)
10476#
10477# Related options:
10478#
10479# * ``vlan_interface``
10480# * ``ovs_integration_bridge``
10481# (string value)
10482# This option is deprecated for removal since 15.0.0.
10483# Its value may be silently ignored in the future.
10484# Reason:
10485# There are only two in-tree vif drivers for XenServer.
10486# XenAPIBridgeDriver is for
10487# nova-network which is deprecated and XenAPIOpenVswitchDriver is for
10488# Neutron
10489# which is the default configuration for Nova since the 15.0.0 Ocata
10490# release. In
10491# the future the "use_neutron" configuration option will be used to
10492# determine
10493# which vif driver to use.
10494#vif_driver = nova.virt.xenapi.vif.XenAPIOpenVswitchDriver
10495
10496#
10497# Dom0 plugin driver used to handle image uploads.
10498#
10499# Provide a string value representing a plugin driver required to
10500# handle the image uploading to GlanceStore.
10501#
10502# Images, and snapshots from XenServer need to be uploaded to the data
10503# store for use. image_upload_handler takes in a value for the Dom0
10504# plugin driver. This driver is then called to uplaod images to the
10505# GlanceStore.
10506# (string value)
10507#image_upload_handler = nova.virt.xenapi.image.glance.GlanceStore
10508
10509#
10510# Number of seconds to wait for SR to settle if the VDI
10511# does not exist when first introduced.
10512#
10513# Some SRs, particularly iSCSI connections are slow to see the VDIs
10514# right after they got introduced. Setting this option to a
10515# time interval will make the SR to wait for that time period
10516# before raising VDI not found exception.
10517# (integer value)
10518# Minimum value: 0
10519#introduce_vdi_retry_wait = 20
10520
10521#
10522# The name of the integration Bridge that is used with xenapi
10523# when connecting with Open vSwitch.
10524#
10525# Note: The value of this config option is dependent on the
10526# environment, therefore this configuration value must be set
10527# accordingly if you are using XenAPI.
10528#
10529# Possible values:
10530#
10531# * Any string that represents a bridge name.
10532# (string value)
10533#ovs_integration_bridge = <None>
10534
10535#
10536# When adding new host to a pool, this will append a --force flag to
10537# the
10538# command, forcing hosts to join a pool, even if they have different
10539# CPUs.
10540#
10541# Since XenServer version 5.6 it is possible to create a pool of hosts
10542# that have
10543# different CPU capabilities. To accommodate CPU differences,
10544# XenServer limited
10545# features it uses to determine CPU compatibility to only the ones
10546# that are
10547# exposed by CPU and support for CPU masking was added.
10548# Despite this effort to level differences between CPUs, it is still
10549# possible
10550# that adding new host will fail, thus option to force join was
10551# introduced.
10552# (boolean value)
10553#use_join_force = true
10554
10555#
10556# Publicly visible name for this console host.
10557#
10558# Possible values:
10559#
10560# * Current hostname (default) or any string representing hostname.
10561# (string value)
10562#console_public_hostname = <current_hostname>
10563
10564
10565[xvp]
10566#
10567# Configuration options for XVP.
10568#
10569# xvp (Xen VNC Proxy) is a proxy server providing password-protected
10570# VNC-based
10571# access to the consoles of virtual machines hosted on Citrix
10572# XenServer.
10573
10574#
10575# From nova.conf
10576#
10577
10578# XVP conf template (string value)
10579#console_xvp_conf_template = $pybasedir/nova/console/xvp.conf.template
10580
10581# Generated XVP conf file (string value)
10582#console_xvp_conf = /etc/xvp.conf
10583
10584# XVP master process pid file (string value)
10585#console_xvp_pid = /var/run/xvp.pid
10586
10587# XVP log file (string value)
10588#console_xvp_log = /var/log/xvp.log
10589
10590# Port for XVP to multiplex VNC connections on (port value)
10591# Minimum value: 0
10592# Maximum value: 65535
10593#console_xvp_multiplex_port = 5900
10594
10595[matchmaker_redis]
10596{#- include "oslo_templates/oslo/_matchmaker_redis.conf" #}
10597
10598[oslo_messaging_notifications]
10599{%- set _data = controller.notification %}
10600{%- include "oslo_templates/files/queens/oslo/messaging/_notifications.conf" %}
10601
10602{%- if controller.message_queue is defined %}
10603{%- set _data = controller.message_queue %}
10604{%- if _data.engine == 'rabbitmq' %}
10605 {%- set messaging_engine = 'rabbit' %}
10606{%- else %}
10607 {%- set messaging_engine = _data.engine %}
10608{%- endif %}
10609[oslo_messaging_{{ messaging_engine }}]
Oleksandr Bryndziie539a912018-09-25 17:45:48 +000010610{%- if _data.ssl is defined and 'cacert_file' not in _data.get('ssl', {}).keys() %}{% do _data['ssl'].update({'cacert_file': controller.cacert_file}) %}{% endif %}
Oleh Hryhorov5d0f13a2018-05-21 17:02:54 +000010611{%- include "oslo_templates/files/queens/oslo/messaging/_" + messaging_engine + ".conf" %}
10612{%- endif %}
10613
10614[oslo_policy]
10615{%- if controller.policy is defined %}
10616{%- set _data = controller.policy %}
10617{%- include "oslo_templates/files/queens/oslo/_policy.conf" %}
10618{%- endif %}
10619
10620[database]
10621{%- set _data = controller.database %}
10622{%- if _data.ssl is defined and 'cacert_file' not in _data.get('ssl', {}).keys() %}{% do _data['ssl'].update({'cacert_file': controller.cacert_file}) %}{% endif %}
10623{%- include "oslo_templates/files/queens/oslo/_database.conf" %}
10624
10625[oslo_middleware]
10626{%- set _data = controller %}
10627{%- include "oslo_templates/files/queens/oslo/_middleware.conf" %}
10628
10629[keystone_authtoken]
10630{%- set _data = controller.identity %}
Mykyta Karpinb3bc4512018-06-14 22:06:17 +030010631{%- if 'cacert_file' not in _data.keys() %}{% do _data.update({'cacert_file': controller.cacert_file}) %}{% endif %}
Oleh Hryhorov5d0f13a2018-05-21 17:02:54 +000010632{%- set auth_type = _data.get('auth_type', 'password') %}
Oleksandr Bryndzii9b918512018-11-22 18:18:11 +000010633{%- if controller.get('cache',{}).members is defined and 'cache' not in _data.keys() %}
10634{% do _data.update({'cache': controller.cache}) %}
10635{%- endif %}
Oleh Hryhorov5d0f13a2018-05-21 17:02:54 +000010636{%- include "oslo_templates/files/queens/keystonemiddleware/_auth_token.conf" %}
10637{%- include "oslo_templates/files/queens/keystoneauth/_type_" + auth_type + ".conf" %}