| {%- from "nova/map.jinja" import controller,compute_driver_mapping with context %} |
| |
| {%- set connection_x509_ssl_option = '' %} |
| {%- if controller.database.get('x509',{}).get('enabled',False) %} |
| {%- set connection_x509_ssl_option = '&ssl_ca=' ~ controller.database.x509.ca_file ~ '&ssl_cert=' ~ controller.database.x509.cert_file ~ '&ssl_key=' ~ controller.database.x509.key_file %} |
| {%- elif controller.database.get('ssl',{}).get('enabled',False) %} |
| {%- set connection_x509_ssl_option = '&ssl_ca=' ~ controller.database.ssl.get('cacert_file', controller.cacert_file) %} |
| {%- endif %} |
| |
| [DEFAULT] |
| # |
| # From nova.conf |
| # |
| image_service=nova.image.glance.GlanceImageService |
| |
| # DEPRECATED: |
| # When returning instance metadata, this is the class that is used |
| # for getting vendor metadata when that class isn't specified in the individual |
| # request. The value should be the full dot-separated path to the class to use. |
| # |
| # Possible values: |
| # |
| # * Any valid dot-separated class path that can be imported. |
| # (string value) |
| # This option is deprecated for removal since 13.0.0. |
| # Its value may be silently ignored in the future. |
| #vendordata_driver=nova.api.metadata.vendordata_json.JsonFileVendorData |
| |
| # DEPRECATED: |
| # This option is used to enable or disable quota checking for tenant networks. |
| # |
| # Related options: |
| # |
| # * quota_networks |
| # (boolean value) |
| # This option is deprecated for removal since 14.0.0. |
| # Its value may be silently ignored in the future. |
| # Reason: |
| # CRUD operations on tenant networks are only available when using nova-network |
| # and nova-network is itself deprecated. |
| #enable_network_quota=false |
| |
| # DEPRECATED: |
| # This option controls the number of private networks that can be created per |
| # project (or per tenant). |
| # |
| # Related options: |
| # |
| # * enable_network_quota |
| # (integer value) |
| # Minimum value: 0 |
| # This option is deprecated for removal since 14.0.0. |
| # Its value may be silently ignored in the future. |
| # Reason: |
| # CRUD operations on tenant networks are only available when using nova-network |
| # and nova-network is itself deprecated. |
| #quota_networks=3 |
| |
| # |
| # This option specifies the name of the availability zone for the |
| # internal services. Services like nova-scheduler, nova-network, |
| # nova-conductor are internal services. These services will appear in |
| # their own internal availability_zone. |
| # |
| # Possible values: |
| # |
| # * Any string representing an availability zone name |
| # * 'internal' is the default value |
| # |
| # (string value) |
| #internal_service_availability_zone=internal |
| |
| # |
| # Default compute node availability_zone. |
| # |
| # This option determines the availability zone to be used when it is not |
| # specified in the VM creation request. If this option is not set, |
| # the default availability zone 'nova' is used. |
| # |
| # Possible values: |
| # |
| # * Any string representing an availability zone name |
| # * 'nova' is the default value |
| # |
| # (string value) |
| #default_availability_zone=nova |
| |
| # Length of generated instance admin passwords. (integer value) |
| # Minimum value: 0 |
| #password_length=12 |
| |
| # |
| # Time period to generate instance usages for. It is possible to define optional |
| # offset to given period by appending @ character followed by a number defining |
| # offset. |
| # |
| # Possible values: |
| # |
| # * period, example: ``hour``, ``day``, ``month` or ``year`` |
| # * period with offset, example: ``month@15`` will result in monthly audits |
| # starting on 15th day of month. |
| # (string value) |
| #instance_usage_audit_period=month |
| {% if controller.instance_usage_audit_period is defined %} |
| instance_usage_audit_period = {{ controller.instance_usage_audit_period }} |
| {%- endif %} |
| |
| # |
| # Start and use a daemon that can run the commands that need to be run with |
| # root privileges. This option is usually enabled on nodes that run nova compute |
| # processes. |
| # (boolean value) |
| #use_rootwrap_daemon=false |
| |
| # |
| # Path to the rootwrap configuration file. |
| # |
| # Goal of the root wrapper is to allow a service-specific unprivileged user to |
| # run a number of actions as the root user in the safest manner possible. |
| # The configuration file used here must match the one defined in the sudoers |
| # entry. |
| # (string value) |
| #rootwrap_config=/etc/nova/rootwrap.conf |
| rootwrap_config=/etc/nova/rootwrap.conf |
| |
| # Explicitly specify the temporary working directory. (string value) |
| #tempdir=<None> |
| |
| # |
| # Determine if monkey patching should be applied. |
| # |
| # Related options: |
| # |
| # * ``monkey_patch_modules``: This must have values set for this option to |
| # have any effect |
| # (boolean value) |
| #monkey_patch=false |
| |
| # |
| # List of modules/decorators to monkey patch. |
| # |
| # This option allows you to patch a decorator for all functions in specified |
| # modules. |
| # |
| # Possible values: |
| # |
| # * nova.compute.api:nova.notifications.notify_decorator |
| # * nova.api.ec2.cloud:nova.notifications.notify_decorator |
| # * [...] |
| # |
| # Related options: |
| # |
| # * ``monkey_patch``: This must be set to ``True`` for this option to |
| # have any effect |
| # (list value) |
| #monkey_patch_modules=nova.compute.api:nova.notifications.notify_decorator |
| |
| # |
| # Defines which driver to use for controlling virtualization. |
| # |
| # Possible values: |
| # |
| # * ``libvirt.LibvirtDriver`` |
| # * ``xenapi.XenAPIDriver`` |
| # * ``fake.FakeDriver`` |
| # * ``ironic.IronicDriver`` |
| # * ``vmwareapi.VMwareVCDriver`` |
| # * ``hyperv.HyperVDriver`` |
| # (string value) |
| #compute_driver=<None> |
| compute_driver = {{ controller.get('compute_driver', 'libvirt.LibvirtDriver') }} |
| |
| # |
| # Allow destination machine to match source for resize. Useful when |
| # testing in single-host environments. By default it is not allowed |
| # to resize to the same host. Setting this option to true will add |
| # the same host to the destination options. |
| # (boolean value) |
| #allow_resize_to_same_host=false |
| allow_resize_to_same_host=true |
| |
| # |
| # Availability zone to use when user doesn't specify one. |
| # |
| # This option is used by the scheduler to determine which availability |
| # zone to place a new VM instance into if the user did not specify one |
| # at the time of VM boot request. |
| # |
| # Possible values: |
| # |
| # * Any string representing an availability zone name |
| # * Default value is None. |
| # (string value) |
| {%- if controller.default_schedule_zone is defined %} |
| default_schedule_zone = {{ controller.default_schedule_zone }} |
| {%- endif %} |
| |
| # |
| # Image properties that should not be inherited from the instance |
| # when taking a snapshot. |
| # |
| # This option gives an opportunity to select which image-properties |
| # should not be inherited by newly created snapshots. |
| # |
| # Possible values: |
| # |
| # * A list whose item is an image property. Usually only the image |
| # properties that are only needed by base images can be included |
| # here, since the snapshots that are created from the base images |
| # doesn't need them. |
| # * Default list: ['cache_in_nova', 'bittorrent'] |
| # (list value) |
| #non_inheritable_image_properties=cache_in_nova,bittorrent |
| |
| # DEPRECATED: |
| # This option is used to decide when an image should have no external |
| # ramdisk or kernel. By default this is set to 'nokernel', so when an |
| # image is booted with the property 'kernel_id' with the value |
| # 'nokernel', Nova assumes the image doesn't require an external kernel |
| # and ramdisk. |
| # (string value) |
| # This option is deprecated for removal since 15.0.0. |
| # Its value may be silently ignored in the future. |
| # Reason: |
| # When an image is booted with the property 'kernel_id' with the value |
| # 'nokernel', Nova assumes the image doesn't require an external kernel and |
| # ramdisk. This option allows user to change the API behaviour which should not |
| # be allowed and this value "nokernel" should be hard coded. |
| #null_kernel=nokernel |
| |
| # DEPRECATED: |
| # When creating multiple instances with a single request using the |
| # os-multiple-create API extension, this template will be used to build |
| # the display name for each instance. The benefit is that the instances |
| # end up with different hostnames. Example display names when creating |
| # two VM's: name-1, name-2. |
| # |
| # Possible values: |
| # |
| # * Valid keys for the template are: name, uuid, count. |
| # (string value) |
| # This option is deprecated for removal since 15.0.0. |
| # Its value may be silently ignored in the future. |
| # Reason: |
| # This config changes API behaviour. All changes in API behaviour should be |
| # discoverable. |
| #multi_instance_display_name_template=%(name)s-%(count)d |
| |
| # |
| # Maximum number of devices that will result in a local image being |
| # created on the hypervisor node. |
| # |
| # A negative number means unlimited. Setting max_local_block_devices |
| # to 0 means that any request that attempts to create a local disk |
| # will fail. This option is meant to limit the number of local discs |
| # (so root local disc that is the result of --image being used, and |
| # any other ephemeral and swap disks). 0 does not mean that images |
| # will be automatically converted to volumes and boot instances from |
| # volumes - it just means that all requests that attempt to create a |
| # local disk will fail. |
| # |
| # Possible values: |
| # |
| # * 0: Creating a local disk is not allowed. |
| # * Negative number: Allows unlimited number of local discs. |
| # * Positive number: Allows only these many number of local discs. |
| # (Default value is 3). |
| # (integer value) |
| #max_local_block_devices=3 |
| |
| # |
| # A list of monitors that can be used for getting compute metrics. |
| # You can use the alias/name from the setuptools entry points for |
| # nova.compute.monitors.* namespaces. If no namespace is supplied, |
| # the "cpu." namespace is assumed for backwards-compatibility. |
| # |
| # Possible values: |
| # |
| # * An empty list will disable the feature(Default). |
| # * An example value that would enable both the CPU and NUMA memory |
| # bandwidth monitors that used the virt driver variant: |
| # ["cpu.virt_driver", "numa_mem_bw.virt_driver"] |
| # (list value) |
| #compute_monitors = |
| |
| # |
| # The default format an ephemeral_volume will be formatted with on creation. |
| # |
| # Possible values: |
| # |
| # * ``ext2`` |
| # * ``ext3`` |
| # * ``ext4`` |
| # * ``xfs`` |
| # * ``ntfs`` (only for Windows guests) |
| # (string value) |
| #default_ephemeral_format=<None> |
| |
| # |
| # Determine if instance should boot or fail on VIF plugging timeout. |
| # |
| # Nova sends a port update to Neutron after an instance has been scheduled, |
| # providing Neutron with the necessary information to finish setup of the port. |
| # Once completed, Neutron notifies Nova that it has finished setting up the |
| # port, at which point Nova resumes the boot of the instance since network |
| # connectivity is now supposed to be present. A timeout will occur if the reply |
| # is not received after a given interval. |
| # |
| # This option determines what Nova does when the VIF plugging timeout event |
| # happens. When enabled, the instance will error out. When disabled, the |
| # instance will continue to boot on the assumption that the port is ready. |
| # |
| # Possible values: |
| # |
| # * True: Instances should fail after VIF plugging timeout |
| # * False: Instances should continue booting after VIF plugging timeout |
| # (boolean value) |
| #vif_plugging_is_fatal=true |
| vif_plugging_is_fatal = {{ controller.get('vif_plugging_is_fatal', 'false') }} |
| |
| # |
| # Timeout for Neutron VIF plugging event message arrival. |
| # |
| # Number of seconds to wait for Neutron vif plugging events to |
| # arrive before continuing or failing (see 'vif_plugging_is_fatal'). |
| # |
| # Related options: |
| # |
| # * vif_plugging_is_fatal - If ``vif_plugging_timeout`` is set to zero and |
| # ``vif_plugging_is_fatal`` is False, events should not be expected to |
| # arrive at all. |
| # (integer value) |
| # Minimum value: 0 |
| #vif_plugging_timeout=300 |
| vif_plugging_timeout = {{ controller.get('vif_plugging_timeout', '300') }} |
| |
| # Path to '/etc/network/interfaces' template. |
| # |
| # The path to a template file for the '/etc/network/interfaces'-style file, |
| # which |
| # will be populated by nova and subsequently used by cloudinit. This provides a |
| # method to configure network connectivity in environments without a DHCP |
| # server. |
| # |
| # The template will be rendered using Jinja2 template engine, and receive a |
| # top-level key called ``interfaces``. This key will contain a list of |
| # dictionaries, one for each interface. |
| # |
| # Refer to the cloudinit documentaion for more information: |
| # |
| # https://cloudinit.readthedocs.io/en/latest/topics/datasources.html |
| # |
| # Possible values: |
| # |
| # * A path to a Jinja2-formatted template for a Debian '/etc/network/interfaces' |
| # file. This applies even if using a non Debian-derived guest. |
| # |
| # Related options: |
| # |
| # * ``flat_inject``: This must be set to ``True`` to ensure nova embeds network |
| # configuration information in the metadata provided through the config drive. |
| # (string value) |
| #injected_network_template=$pybasedir/nova/virt/interfaces.template |
| injected_network_template=$pybasedir/nova/virt/interfaces.template |
| |
| # |
| # The image preallocation mode to use. |
| # |
| # Image preallocation allows storage for instance images to be allocated up |
| # front |
| # when the instance is initially provisioned. This ensures immediate feedback is |
| # given if enough space isn't available. In addition, it should significantly |
| # improve performance on writes to new blocks and may even improve I/O |
| # performance to prewritten blocks due to reduced fragmentation. |
| # |
| # Possible values: |
| # |
| # * "none" => no storage provisioning is done up front |
| # * "space" => storage is fully allocated at instance start |
| # (string value) |
| # Allowed values: none, space |
| #preallocate_images=none |
| |
| # |
| # Enable use of copy-on-write (cow) images. |
| # |
| # QEMU/KVM allow the use of qcow2 as backing files. By disabling this, |
| # backing files will not be used. |
| # (boolean value) |
| #use_cow_images=true |
| |
| # |
| # Force conversion of backing images to raw format. |
| # |
| # Possible values: |
| # |
| # * True: Backing image files will be converted to raw image format |
| # * False: Backing image files will not be converted |
| # |
| # Related options: |
| # |
| # * ``compute_driver``: Only the libvirt driver uses this option. |
| # (boolean value) |
| #force_raw_images=true |
| |
| # |
| # Name of the mkfs commands for ephemeral device. |
| # |
| # The format is <os_type>=<mkfs command> |
| # (multi valued) |
| #virt_mkfs = |
| |
| # |
| # Enable resizing of filesystems via a block device. |
| # |
| # If enabled, attempt to resize the filesystem by accessing the image over a |
| # block device. This is done by the host and may not be necessary if the image |
| # contains a recent version of cloud-init. Possible mechanisms require the nbd |
| # driver (for qcow and raw), or loop (for raw). |
| # (boolean value) |
| #resize_fs_using_block_device=false |
| |
| # Amount of time, in seconds, to wait for NBD device start up. (integer value) |
| # Minimum value: 0 |
| #timeout_nbd=10 |
| {%- if controller.timeout_nbd is defined %} |
| timeout_nbd = {{ controller.timeout_nbd }} |
| {%- endif %} |
| |
| # |
| # Location of cached images. |
| # |
| # This is NOT the full path - just a folder name relative to '$instances_path'. |
| # For per-compute-host cached images, set to '_base_$my_ip' |
| # (string value) |
| #image_cache_subdirectory_name=_base |
| |
| # Should unused base images be removed? (boolean value) |
| #remove_unused_base_images=true |
| |
| # |
| # Unused unresized base images younger than this will not be removed. |
| # (integer value) |
| #remove_unused_original_minimum_age_seconds=86400 |
| |
| # |
| # Generic property to specify the pointer type. |
| # |
| # Input devices allow interaction with a graphical framebuffer. For |
| # example to provide a graphic tablet for absolute cursor movement. |
| # |
| # If set, the 'hw_pointer_model' image property takes precedence over |
| # this configuration option. |
| # |
| # Possible values: |
| # |
| # * None: Uses default behavior provided by drivers (mouse on PS2 for |
| # libvirt x86) |
| # * ps2mouse: Uses relative movement. Mouse connected by PS2 |
| # * usbtablet: Uses absolute movement. Tablet connect by USB |
| # |
| # Related options: |
| # |
| # * usbtablet must be configured with VNC enabled or SPICE enabled and SPICE |
| # agent disabled. When used with libvirt the instance mode should be |
| # configured as HVM. |
| # (string value) |
| # Allowed values: <None>, ps2mouse, usbtablet |
| #pointer_model=usbtablet |
| |
| # |
| # Defines which physical CPUs (pCPUs) can be used by instance |
| # virtual CPUs (vCPUs). |
| # |
| # Possible values: |
| # |
| # * A comma-separated list of physical CPU numbers that virtual CPUs can be |
| # allocated to by default. Each element should be either a single CPU number, |
| # a range of CPU numbers, or a caret followed by a CPU number to be |
| # excluded from a previous range. For example: |
| # |
| # vcpu_pin_set = "4-12,^8,15" |
| # (string value) |
| #vcpu_pin_set=<None> |
| |
| # |
| # Number of huge/large memory pages to reserved per NUMA host cell. |
| # |
| # Possible values: |
| # |
| # * A list of valid key=value which reflect NUMA node ID, page size |
| # (Default unit is KiB) and number of pages to be reserved. |
| # |
| # reserved_huge_pages = node:0,size:2048,count:64 |
| # reserved_huge_pages = node:1,size:1GB,count:1 |
| # |
| # In this example we are reserving on NUMA node 0 64 pages of 2MiB |
| # and on NUMA node 1 1 page of 1GiB. |
| # (dict value) |
| #reserved_huge_pages=<None> |
| |
| # |
| # Amount of disk resources in MB to make them always available to host. The |
| # disk usage gets reported back to the scheduler from nova-compute running |
| # on the compute nodes. To prevent the disk resources from being considered |
| # as available, this option can be used to reserve disk space for that host. |
| # |
| # Possible values: |
| # |
| # * Any positive integer representing amount of disk in MB to reserve |
| # for the host. |
| # (integer value) |
| # Minimum value: 0 |
| #reserved_host_disk_mb=0 |
| |
| # |
| # Amount of memory in MB to reserve for the host so that it is always available |
| # to host processes. The host resources usage is reported back to the scheduler |
| # continuously from nova-compute running on the compute node. To prevent the |
| # host |
| # memory from being considered as available, this option is used to reserve |
| # memory for the host. |
| # |
| # Possible values: |
| # |
| # * Any positive integer representing amount of memory in MB to reserve |
| # for the host. |
| # (integer value) |
| # Minimum value: 0 |
| #reserved_host_memory_mb=512 |
| |
| # |
| # This option helps you specify virtual CPU to physical CPU allocation ratio. |
| # |
| # From Ocata (15.0.0) this is used to influence the hosts selected by |
| # the Placement API. Note that when Placement is used, the CoreFilter |
| # is redundant, because the Placement API will have already filtered |
| # out hosts that would have failed the CoreFilter. |
| # |
| # This configuration specifies ratio for CoreFilter which can be set |
| # per compute node. For AggregateCoreFilter, it will fall back to this |
| # configuration value if no per-aggregate setting is found. |
| # |
| # NOTE: This can be set per-compute, or if set to 0.0, the value |
| # set on the scheduler node(s) or compute node(s) will be used |
| # and defaulted to 16.0'. |
| # |
| # Possible values: |
| # |
| # * Any valid positive integer or float value |
| # (floating point value) |
| # Minimum value: 0 |
| #cpu_allocation_ratio=0.0 |
| cpu_allocation_ratio={{ controller.cpu_allocation_ratio }} |
| |
| # |
| # This option helps you specify virtual RAM to physical RAM |
| # allocation ratio. |
| # |
| # From Ocata (15.0.0) this is used to influence the hosts selected by |
| # the Placement API. Note that when Placement is used, the RamFilter |
| # is redundant, because the Placement API will have already filtered |
| # out hosts that would have failed the RamFilter. |
| # |
| # This configuration specifies ratio for RamFilter which can be set |
| # per compute node. For AggregateRamFilter, it will fall back to this |
| # configuration value if no per-aggregate setting found. |
| # |
| # NOTE: This can be set per-compute, or if set to 0.0, the value |
| # set on the scheduler node(s) or compute node(s) will be used and |
| # defaulted to 1.5. |
| # |
| # Possible values: |
| # |
| # * Any valid positive integer or float value |
| # (floating point value) |
| # Minimum value: 0 |
| #ram_allocation_ratio=0.0 |
| ram_allocation_ratio = {{ controller.ram_allocation_ratio }} |
| |
| # |
| # This option helps you specify virtual disk to physical disk |
| # allocation ratio. |
| # |
| # From Ocata (15.0.0) this is used to influence the hosts selected by |
| # the Placement API. Note that when Placement is used, the DiskFilter |
| # is redundant, because the Placement API will have already filtered |
| # out hosts that would have failed the DiskFilter. |
| # |
| # A ratio greater than 1.0 will result in over-subscription of the |
| # available physical disk, which can be useful for more |
| # efficiently packing instances created with images that do not |
| # use the entire virtual disk, such as sparse or compressed |
| # images. It can be set to a value between 0.0 and 1.0 in order |
| # to preserve a percentage of the disk for uses other than |
| # instances. |
| # |
| # NOTE: This can be set per-compute, or if set to 0.0, the value |
| # set on the scheduler node(s) or compute node(s) will be used and |
| # defaulted to 1.0'. |
| # |
| # Possible values: |
| # |
| # * Any valid positive integer or float value |
| # (floating point value) |
| # Minimum value: 0 |
| #disk_allocation_ratio=0.0 |
| disk_allocation_ratio = {{ controller.disk_allocation_ratio }} |
| |
| # |
| # Console proxy host to be used to connect to instances on this host. It is the |
| # publicly visible name for the console host. |
| # |
| # Possible values: |
| # |
| # * Current hostname (default) or any string representing hostname. |
| # (string value) |
| #console_host=socket.gethostname() |
| |
| # |
| # Name of the network to be used to set access IPs for instances. If there are |
| # multiple IPs to choose from, an arbitrary one will be chosen. |
| # |
| # Possible values: |
| # |
| # * None (default) |
| # * Any string representing network name. |
| # (string value) |
| #default_access_ip_network_name=<None> |
| |
| # |
| # Whether to batch up the application of IPTables rules during a host restart |
| # and apply all at the end of the init phase. |
| # (boolean value) |
| #defer_iptables_apply=false |
| |
| # |
| # Specifies where instances are stored on the hypervisor's disk. |
| # It can point to locally attached storage or a directory on NFS. |
| # |
| # Possible values: |
| # |
| # * $state_path/instances where state_path is a config option that specifies |
| # the top-level directory for maintaining nova's state. (default) or |
| # Any string representing directory path. |
| # (string value) |
| #instances_path=$state_path/instances |
| |
| # |
| # This option enables periodic compute.instance.exists notifications. Each |
| # compute node must be configured to generate system usage data. These |
| # notifications are consumed by OpenStack Telemetry service. |
| # (boolean value) |
| #instance_usage_audit=false |
| {% if controller.instance_usage_audit is defined %} |
| instance_usage_audit = {{ controller.instance_usage_audit }} |
| {%- endif %} |
| |
| # |
| # Maximum number of 1 second retries in live_migration. It specifies number |
| # of retries to iptables when it complains. It happens when an user continuously |
| # sends live-migration request to same host leading to concurrent request |
| # to iptables. |
| # |
| # Possible values: |
| # |
| # * Any positive integer representing retry count. |
| # (integer value) |
| # Minimum value: 0 |
| #live_migration_retry_count=30 |
| |
| # |
| # Number of times to retry network allocation. It is required to attempt network |
| # allocation retries if the virtual interface plug fails. |
| # |
| # Possible values: |
| # |
| # * Any positive integer representing retry count. |
| # (integer value) |
| # Minimum value: 0 |
| #network_allocate_retries=0 |
| |
| # |
| # Limits the maximum number of instance builds to run concurrently by |
| # nova-compute. Compute service can attempt to build an infinite number of |
| # instances, if asked to do so. This limit is enforced to avoid building |
| # unlimited instance concurrently on a compute node. This value can be set |
| # per compute node. |
| # |
| # Possible Values: |
| # |
| # * 0 : treated as unlimited. |
| # * Any positive integer representing maximum concurrent builds. |
| # (integer value) |
| # Minimum value: 0 |
| #max_concurrent_builds=10 |
| |
| # |
| # Maximum number of live migrations to run concurrently. This limit is enforced |
| # to avoid outbound live migrations overwhelming the host/network and causing |
| # failures. It is not recommended that you change this unless you are very sure |
| # that doing so is safe and stable in your environment. |
| # |
| # Possible values: |
| # |
| # * 0 : treated as unlimited. |
| # * Negative value defaults to 0. |
| # * Any positive integer representing maximum number of live migrations |
| # to run concurrently. |
| # (integer value) |
| #max_concurrent_live_migrations=1 |
| |
| # |
| # Number of times to retry block device allocation on failures. Starting with |
| # Liberty, Cinder can use image volume cache. This may help with block device |
| # allocation performance. Look at the cinder image_volume_cache_enabled |
| # configuration option. |
| # |
| # Possible values: |
| # |
| # * 60 (default) |
| # * If value is 0, then one attempt is made. |
| # * Any negative value is treated as 0. |
| # * For any value > 0, total attempts are (value + 1) |
| # (integer value) |
| #block_device_allocate_retries=60 |
| block_device_allocate_retries={{ controller.get('block_device_allocate_retries', '600') }} |
| |
| # |
| # Number of greenthreads available for use to sync power states. |
| # |
| # This option can be used to reduce the number of concurrent requests |
| # made to the hypervisor or system with real instance power states |
| # for performance reasons, for example, with Ironic. |
| # |
| # Possible values: |
| # |
| # * Any positive integer representing greenthreads count. |
| # (integer value) |
| #sync_power_state_pool_size=1000 |
| |
| # |
| # Number of seconds to wait between runs of the image cache manager. |
| # |
| # Possible values: |
| # * 0: run at the default rate. |
| # * -1: disable |
| # * Any other value |
| # (integer value) |
| # Minimum value: -1 |
| #image_cache_manager_interval=2400 |
| |
| # |
| # Interval to pull network bandwidth usage info. |
| # |
| # Not supported on all hypervisors. If a hypervisor doesn't support bandwidth |
| # usage, it will not get the info in the usage events. |
| # |
| # Possible values: |
| # |
| # * 0: Will run at the default periodic interval. |
| # * Any value < 0: Disables the option. |
| # * Any positive integer in seconds. |
| # (integer value) |
| #bandwidth_poll_interval=600 |
| |
| # |
| # Interval to sync power states between the database and the hypervisor. |
| # |
| # The interval that Nova checks the actual virtual machine power state |
| # and the power state that Nova has in its database. If a user powers |
| # down their VM, Nova updates the API to report the VM has been |
| # powered down. Should something turn on the VM unexpectedly, |
| # Nova will turn the VM back off to keep the system in the expected |
| # state. |
| # |
| # Possible values: |
| # |
| # * 0: Will run at the default periodic interval. |
| # * Any value < 0: Disables the option. |
| # * Any positive integer in seconds. |
| # |
| # Related options: |
| # |
| # * If ``handle_virt_lifecycle_events`` in workarounds_group is |
| # false and this option is negative, then instances that get out |
| # of sync between the hypervisor and the Nova database will have |
| # to be synchronized manually. |
| # (integer value) |
| #sync_power_state_interval=600 |
| |
| # |
| # Interval between instance network information cache updates. |
| # |
| # Number of seconds after which each compute node runs the task of |
| # querying Neutron for all of its instances networking information, |
| # then updates the Nova db with that information. Nova will never |
| # update it's cache if this option is set to 0. If we don't update the |
| # cache, the metadata service and nova-api endpoints will be proxying |
| # incorrect network data about the instance. So, it is not recommended |
| # to set this option to 0. |
| # |
| # Possible values: |
| # |
| # * Any positive integer in seconds. |
| # * Any value <=0 will disable the sync. This is not recommended. |
| # (integer value) |
| #heal_instance_info_cache_interval=60 |
| {%- if controller.heal_instance_info_cache_interval is defined %} |
| heal_instance_info_cache_interval = {{ controller.heal_instance_info_cache_interval }} |
| {%- endif %} |
| |
| |
| # |
| # Interval for reclaiming deleted instances. |
| # |
| # A value greater than 0 will enable SOFT_DELETE of instances. |
| # This option decides whether the server to be deleted will be put into |
| # the SOFT_DELETED state. If this value is greater than 0, the deleted |
| # server will not be deleted immediately, instead it will be put into |
| # a queue until it's too old (deleted time greater than the value of |
| # reclaim_instance_interval). The server can be recovered from the |
| # delete queue by using the restore action. If the deleted server remains |
| # longer than the value of reclaim_instance_interval, it will be |
| # deleted by a periodic task in the compute service automatically. |
| # |
| # Note that this option is read from both the API and compute nodes, and |
| # must be set globally otherwise servers could be put into a soft deleted |
| # state in the API and never actually reclaimed (deleted) on the compute |
| # node. |
| # |
| # Possible values: |
| # |
| # * Any positive integer(in seconds) greater than 0 will enable |
| # this option. |
| # * Any value <=0 will disable the option. |
| # (integer value) |
| #reclaim_instance_interval=0 |
| {%- if controller.reclaim_instance_interval is defined %} |
| reclaim_instance_interval = {{ controller.reclaim_instance_interval }} |
| {%- endif %} |
| |
| # |
| # Interval for gathering volume usages. |
| # |
| # This option updates the volume usage cache for every |
| # volume_usage_poll_interval number of seconds. |
| # |
| # Possible values: |
| # |
| # * Any positive integer(in seconds) greater than 0 will enable |
| # this option. |
| # * Any value <=0 will disable the option. |
| # (integer value) |
| #volume_usage_poll_interval=0 |
| |
| # |
| # Interval for polling shelved instances to offload. |
| # |
| # The periodic task runs for every shelved_poll_interval number |
| # of seconds and checks if there are any shelved instances. If it |
| # finds a shelved instance, based on the 'shelved_offload_time' config |
| # value it offloads the shelved instances. Check 'shelved_offload_time' |
| # config option description for details. |
| # |
| # Possible values: |
| # |
| # * Any value <= 0: Disables the option. |
| # * Any positive integer in seconds. |
| # |
| # Related options: |
| # |
| # * ``shelved_offload_time`` |
| # (integer value) |
| #shelved_poll_interval=3600 |
| |
| # |
| # Time before a shelved instance is eligible for removal from a host. |
| # |
| # By default this option is set to 0 and the shelved instance will be |
| # removed from the hypervisor immediately after shelve operation. |
| # Otherwise, the instance will be kept for the value of |
| # shelved_offload_time(in seconds) so that during the time period the |
| # unshelve action will be faster, then the periodic task will remove |
| # the instance from hypervisor after shelved_offload_time passes. |
| # |
| # Possible values: |
| # |
| # * 0: Instance will be immediately offloaded after being |
| # shelved. |
| # * Any value < 0: An instance will never offload. |
| # * Any positive integer in seconds: The instance will exist for |
| # the specified number of seconds before being offloaded. |
| # (integer value) |
| #shelved_offload_time=0 |
| |
| # |
| # Interval for retrying failed instance file deletes. |
| # |
| # This option depends on 'maximum_instance_delete_attempts'. |
| # This option specifies how often to retry deletes whereas |
| # 'maximum_instance_delete_attempts' specifies the maximum number |
| # of retry attempts that can be made. |
| # |
| # Possible values: |
| # |
| # * 0: Will run at the default periodic interval. |
| # * Any value < 0: Disables the option. |
| # * Any positive integer in seconds. |
| # |
| # Related options: |
| # |
| # * ``maximum_instance_delete_attempts`` from instance_cleaning_opts |
| # group. |
| # (integer value) |
| #instance_delete_interval=300 |
| |
| # |
| # Interval (in seconds) between block device allocation retries on failures. |
| # |
| # This option allows the user to specify the time interval between |
| # consecutive retries. 'block_device_allocate_retries' option specifies |
| # the maximum number of retries. |
| # |
| # Possible values: |
| # |
| # * 0: Disables the option. |
| # * Any positive integer in seconds enables the option. |
| # |
| # Related options: |
| # |
| # * ``block_device_allocate_retries`` in compute_manager_opts group. |
| # (integer value) |
| # Minimum value: 0 |
| #block_device_allocate_retries_interval=3 |
| block_device_allocate_retries_interval={{ controller.get('block_device_allocate_retries_interval', '10') }} |
| |
| # |
| # Interval between sending the scheduler a list of current instance UUIDs to |
| # verify that its view of instances is in sync with nova. |
| # |
| # If the CONF option 'scheduler_tracks_instance_changes' is |
| # False, the sync calls will not be made. So, changing this option will |
| # have no effect. |
| # |
| # If the out of sync situations are not very common, this interval |
| # can be increased to lower the number of RPC messages being sent. |
| # Likewise, if sync issues turn out to be a problem, the interval |
| # can be lowered to check more frequently. |
| # |
| # Possible values: |
| # |
| # * 0: Will run at the default periodic interval. |
| # * Any value < 0: Disables the option. |
| # * Any positive integer in seconds. |
| # |
| # Related options: |
| # |
| # * This option has no impact if ``scheduler_tracks_instance_changes`` |
| # is set to False. |
| # (integer value) |
| #scheduler_instance_sync_interval=120 |
| |
| # |
| # Interval for updating compute resources. |
| # |
| # This option specifies how often the update_available_resources |
| # periodic task should run. A number less than 0 means to disable the |
| # task completely. Leaving this at the default of 0 will cause this to |
| # run at the default periodic interval. Setting it to any positive |
| # value will cause it to run at approximately that number of seconds. |
| # |
| # Possible values: |
| # |
| # * 0: Will run at the default periodic interval. |
| # * Any value < 0: Disables the option. |
| # * Any positive integer in seconds. |
| # (integer value) |
| #update_resources_interval=0 |
| |
| # |
| # Time interval after which an instance is hard rebooted automatically. |
| # |
| # When doing a soft reboot, it is possible that a guest kernel is |
| # completely hung in a way that causes the soft reboot task |
| # to not ever finish. Setting this option to a time period in seconds |
| # will automatically hard reboot an instance if it has been stuck |
| # in a rebooting state longer than N seconds. |
| # |
| # Possible values: |
| # |
| # * 0: Disables the option (default). |
| # * Any positive integer in seconds: Enables the option. |
| # (integer value) |
| # Minimum value: 0 |
| #reboot_timeout=0 |
| |
| # |
| # Maximum time in seconds that an instance can take to build. |
| # |
| # If this timer expires, instance status will be changed to ERROR. |
| # Enabling this option will make sure an instance will not be stuck |
| # in BUILD state for a longer period. |
| # |
| # Possible values: |
| # |
| # * 0: Disables the option (default) |
| # * Any positive integer in seconds: Enables the option. |
| # (integer value) |
| # Minimum value: 0 |
| {%- if controller.instance_build_timeout is defined %} |
| instance_build_timeout = {{ controller.instance_build_timeout }} |
| {%- else %} |
| #instance_build_timeout = 0 |
| {%- endif %} |
| |
| # |
| # Interval to wait before un-rescuing an instance stuck in RESCUE. |
| # |
| # Possible values: |
| # |
| # * 0: Disables the option (default) |
| # * Any positive integer in seconds: Enables the option. |
| # (integer value) |
| # Minimum value: 0 |
| #rescue_timeout=0 |
| |
| # |
| # Automatically confirm resizes after N seconds. |
| # |
| # Resize functionality will save the existing server before resizing. |
| # After the resize completes, user is requested to confirm the resize. |
| # The user has the opportunity to either confirm or revert all |
| # changes. Confirm resize removes the original server and changes |
| # server status from resized to active. Setting this option to a time |
| # period (in seconds) will automatically confirm the resize if the |
| # server is in resized state longer than that time. |
| # |
| # Possible values: |
| # |
| # * 0: Disables the option (default) |
| # * Any positive integer in seconds: Enables the option. |
| # (integer value) |
| # Minimum value: 0 |
| #resize_confirm_window=0 |
| |
| # |
| # Total time to wait in seconds for an instance toperform a clean |
| # shutdown. |
| # |
| # It determines the overall period (in seconds) a VM is allowed to |
| # perform a clean shutdown. While performing stop, rescue and shelve, |
| # rebuild operations, configuring this option gives the VM a chance |
| # to perform a controlled shutdown before the instance is powered off. |
| # The default timeout is 60 seconds. |
| # |
| # The timeout value can be overridden on a per image basis by means |
| # of os_shutdown_timeout that is an image metadata setting allowing |
| # different types of operating systems to specify how much time they |
| # need to shut down cleanly. |
| # |
| # Possible values: |
| # |
| # * Any positive integer in seconds (default value is 60). |
| # (integer value) |
| # Minimum value: 1 |
| #shutdown_timeout=60 |
| |
| # |
| # The compute service periodically checks for instances that have been |
| # deleted in the database but remain running on the compute node. The |
| # above option enables action to be taken when such instances are |
| # identified. |
| # |
| # Possible values: |
| # |
| # * reap: Powers down the instances and deletes them(default) |
| # * log: Logs warning message about deletion of the resource |
| # * shutdown: Powers down instances and marks them as non- |
| # bootable which can be later used for debugging/analysis |
| # * noop: Takes no action |
| # |
| # Related options: |
| # |
| # * running_deleted_instance_poll |
| # * running_deleted_instance_timeout |
| # (string value) |
| # Allowed values: noop, log, shutdown, reap |
| #running_deleted_instance_action=reap |
| |
| # |
| # Time interval in seconds to wait between runs for the clean up action. |
| # If set to 0, above check will be disabled. If "running_deleted_instance |
| # _action" is set to "log" or "reap", a value greater than 0 must be set. |
| # |
| # Possible values: |
| # |
| # * Any positive integer in seconds enables the option. |
| # * 0: Disables the option. |
| # * 1800: Default value. |
| # |
| # Related options: |
| # |
| # * running_deleted_instance_action |
| # (integer value) |
| #running_deleted_instance_poll_interval=1800 |
| |
| # |
| # Time interval in seconds to wait for the instances that have |
| # been marked as deleted in database to be eligible for cleanup. |
| # |
| # Possible values: |
| # |
| # * Any positive integer in seconds(default is 0). |
| # |
| # Related options: |
| # |
| # * "running_deleted_instance_action" |
| # (integer value) |
| #running_deleted_instance_timeout=0 |
| |
| # |
| # The number of times to attempt to reap an instance's files. |
| # |
| # This option specifies the maximum number of retry attempts |
| # that can be made. |
| # |
| # Possible values: |
| # |
| # * Any positive integer defines how many attempts are made. |
| # * Any value <=0 means no delete attempts occur, but you should use |
| # ``instance_delete_interval`` to disable the delete attempts. |
| # |
| # Related options: |
| # * ``instance_delete_interval`` in interval_opts group can be used to disable |
| # this option. |
| # (integer value) |
| #maximum_instance_delete_attempts=5 |
| |
| # DEPRECATED: |
| # This is the message queue topic that the compute service 'listens' on. It is |
| # used when the compute service is started up to configure the queue, and |
| # whenever an RPC call to the compute service is made. |
| # |
| # Possible values: |
| # |
| # * Any string, but there is almost never any reason to ever change this value |
| # from its default of 'compute'. |
| # (string value) |
| # This option is deprecated for removal since 15.0.0. |
| # Its value may be silently ignored in the future. |
| # Reason: |
| # There is no need to let users choose the RPC topic for all services - there |
| # is little gain from this. Furthermore, it makes it really easy to break Nova |
| # by using this option. |
| #compute_topic=compute |
| |
| # |
| # Sets the scope of the check for unique instance names. |
| # |
| # The default doesn't check for unique names. If a scope for the name check is |
| # set, a launch of a new instance or an update of an existing instance with a |
| # duplicate name will result in an ''InstanceExists'' error. The uniqueness is |
| # case-insensitive. Setting this option can increase the usability for end |
| # users as they don't have to distinguish among instances with the same name |
| # by their IDs. |
| # |
| # Possible values: |
| # |
| # * '': An empty value means that no uniqueness check is done and duplicate |
| # names are possible. |
| # * "project": The instance name check is done only for instances within the |
| # same project. |
| # * "global": The instance name check is done for all instances regardless of |
| # the project. |
| # (string value) |
| # Allowed values: '', project, global |
| #osapi_compute_unique_server_name_scope = |
| |
| # |
| # Enable new services on this host automatically. |
| # |
| # When a new service (for example "nova-compute") starts up, it gets |
| # registered in the database as an enabled service. Sometimes it can be useful |
| # to register new services in disabled state and then enabled them at a later |
| # point in time. This option can set this behavior for all services per host. |
| # |
| # Possible values: |
| # |
| # * ``True``: Each new service is enabled as soon as it registers itself. |
| # * ``False``: Services must be enabled via a REST API call or with the CLI |
| # with ``nova service-enable <hostname> <binary>``, otherwise they are not |
| # ready to use. |
| # (boolean value) |
| #enable_new_services=true |
| |
| # |
| # Template string to be used to generate instance names. |
| # |
| # This template controls the creation of the database name of an instance. This |
| # is *not* the display name you enter when creating an instance (via Horizon |
| # or CLI). For a new deployment it is advisable to change the default value |
| # (which uses the database autoincrement) to another value which makes use |
| # of the attributes of an instance, like ``instance-%(uuid)s``. If you |
| # already have instances in your deployment when you change this, your |
| # deployment will break. |
| # |
| # Possible values: |
| # |
| # * A string which either uses the instance database ID (like the |
| # default) |
| # * A string with a list of named database columns, for example ``%(id)d`` |
| # or ``%(uuid)s`` or ``%(hostname)s``. |
| # |
| # Related options: |
| # |
| # * not to be confused with: ``multi_instance_display_name_template`` |
| # (string value) |
| #instance_name_template=instance-%08x |
| |
| # |
| # Number of times to retry live-migration before failing. |
| # |
| # Possible values: |
| # |
| # * If == -1, try until out of hosts (default) |
| # * If == 0, only try once, no retries |
| # * Integer greater than 0 |
| # (integer value) |
| # Minimum value: -1 |
| #migrate_max_retries=-1 |
| |
| # |
| # Configuration drive format |
| # |
| # Configuration drive format that will contain metadata attached to the |
| # instance when it boots. |
| # |
| # Possible values: |
| # |
| # * iso9660: A file system image standard that is widely supported across |
| # operating systems. NOTE: Mind the libvirt bug |
| # (https://bugs.launchpad.net/nova/+bug/1246201) - If your hypervisor |
| # driver is libvirt, and you want live migrate to work without shared storage, |
| # then use VFAT. |
| # * vfat: For legacy reasons, you can configure the configuration drive to |
| # use VFAT format instead of ISO 9660. |
| # |
| # Related options: |
| # |
| # * This option is meaningful when one of the following alternatives occur: |
| # 1. force_config_drive option set to 'true' |
| # 2. the REST API call to create the instance contains an enable flag for |
| # config drive option |
| # 3. the image used to create the instance requires a config drive, |
| # this is defined by img_config_drive property for that image. |
| # * A compute node running Hyper-V hypervisor can be configured to attach |
| # configuration drive as a CD drive. To attach the configuration drive as a CD |
| # drive, set config_drive_cdrom option at hyperv section, to true. |
| # (string value) |
| # Allowed values: iso9660, vfat |
| #config_drive_format=iso9660 |
| |
| # |
| # Force injection to take place on a config drive |
| # |
| # When this option is set to true configuration drive functionality will be |
| # forced enabled by default, otherwise user can still enable configuration |
| # drives via the REST API or image metadata properties. |
| # |
| # Possible values: |
| # |
| # * True: Force to use of configuration drive regardless the user's input in the |
| # REST API call. |
| # * False: Do not force use of configuration drive. Config drives can still be |
| # enabled via the REST API or image metadata properties. |
| # |
| # Related options: |
| # |
| # * Use the 'mkisofs_cmd' flag to set the path where you install the |
| # genisoimage program. If genisoimage is in same path as the |
| # nova-compute service, you do not need to set this flag. |
| # * To use configuration drive with Hyper-V, you must set the |
| # 'mkisofs_cmd' value to the full path to an mkisofs.exe installation. |
| # Additionally, you must set the qemu_img_cmd value in the hyperv |
| # configuration section to the full path to an qemu-img command |
| # installation. |
| # (boolean value) |
| #force_config_drive=false |
| |
| # |
| # Name or path of the tool used for ISO image creation |
| # |
| # Use the mkisofs_cmd flag to set the path where you install the genisoimage |
| # program. If genisoimage is on the system path, you do not need to change |
| # the default value. |
| # |
| # To use configuration drive with Hyper-V, you must set the mkisofs_cmd value |
| # to the full path to an mkisofs.exe installation. Additionally, you must set |
| # the qemu_img_cmd value in the hyperv configuration section to the full path |
| # to an qemu-img command installation. |
| # |
| # Possible values: |
| # |
| # * Name of the ISO image creator program, in case it is in the same directory |
| # as the nova-compute service |
| # * Path to ISO image creator program |
| # |
| # Related options: |
| # |
| # * This option is meaningful when config drives are enabled. |
| # * To use configuration drive with Hyper-V, you must set the qemu_img_cmd |
| # value in the hyperv configuration section to the full path to an qemu-img |
| # command installation. |
| # (string value) |
| #mkisofs_cmd=genisoimage |
| |
| # DEPRECATED: |
| # nova-console-proxy is used to set up multi-tenant VM console access. |
| # This option allows pluggable driver program for the console session |
| # and represents driver to use for the console proxy. |
| # |
| # Possible values: |
| # |
| # * A string representing fully classified class name of console driver. |
| # (string value) |
| # This option is deprecated for removal since 15.0.0. |
| # Its value may be silently ignored in the future. |
| # Reason: |
| # This option no longer does anything. Previously this option had only two |
| # valid, |
| # in-tree values: nova.console.xvp.XVPConsoleProxy and |
| # nova.console.fake.FakeConsoleProxy. The latter of these was only used in tests |
| # and has since been replaced. |
| #console_driver=nova.console.xvp.XVPConsoleProxy |
| |
| # DEPRECATED: |
| # Represents the message queue topic name used by nova-console |
| # service when communicating via the AMQP server. The Nova API uses a message |
| # queue to communicate with nova-console to retrieve a console URL for that |
| # host. |
| # |
| # Possible values: |
| # |
| # * A string representing topic exchange name |
| # (string value) |
| # This option is deprecated for removal since 15.0.0. |
| # Its value may be silently ignored in the future. |
| # Reason: |
| # There is no need to let users choose the RPC topic for all services - there |
| # is little gain from this. Furthermore, it makes it really easy to break Nova |
| # by using this option. |
| #console_topic=console |
| |
| # DEPRECATED: |
| # This option allows you to change the message topic used by nova-consoleauth |
| # service when communicating via the AMQP server. Nova Console Authentication |
| # server authenticates nova consoles. Users can then access their instances |
| # through VNC clients. The Nova API service uses a message queue to |
| # communicate with nova-consoleauth to get a VNC console. |
| # |
| # Possible Values: |
| # |
| # * 'consoleauth' (default) or Any string representing topic exchange name. |
| # (string value) |
| # This option is deprecated for removal since 15.0.0. |
| # Its value may be silently ignored in the future. |
| # Reason: |
| # There is no need to let users choose the RPC topic for all services - there |
| # is little gain from this. Furthermore, it makes it really easy to break Nova |
| # by using this option. |
| #consoleauth_topic=consoleauth |
| |
| # DEPRECATED: The driver to use for database access (string value) |
| # This option is deprecated for removal since 13.0.0. |
| # Its value may be silently ignored in the future. |
| #db_driver=nova.db |
| |
| # DEPRECATED: |
| # Default flavor to use for the EC2 API only. |
| # The Nova API does not support a default flavor. |
| # (string value) |
| # This option is deprecated for removal since 14.0.0. |
| # Its value may be silently ignored in the future. |
| # Reason: The EC2 API is deprecated. |
| #default_flavor=m1.small |
| |
| # |
| # Default pool for floating IPs. |
| # |
| # This option specifies the default floating IP pool for allocating floating |
| # IPs. |
| # |
| # While allocating a floating ip, users can optionally pass in the name of the |
| # pool they want to allocate from, otherwise it will be pulled from the |
| # default pool. |
| # |
| # If this option is not set, then 'nova' is used as default floating pool. |
| # |
| # Possible values: |
| # |
| # * Any string representing a floating IP pool name |
| # (string value) |
| #default_floating_pool=nova |
| |
| # DEPRECATED: |
| # Autoassigning floating IP to VM |
| # |
| # When set to True, floating IP is auto allocated and associated |
| # to the VM upon creation. |
| # |
| # Related options: |
| # |
| # * use_neutron: this options only works with nova-network. |
| # (boolean value) |
| # This option is deprecated for removal since 15.0.0. |
| # Its value may be silently ignored in the future. |
| # Reason: |
| # nova-network is deprecated, as are any related configuration options. |
| #auto_assign_floating_ip=false |
| use_neutron = True |
| |
| # DEPRECATED: |
| # Full class name for the DNS Manager for floating IPs. |
| # |
| # This option specifies the class of the driver that provides functionality |
| # to manage DNS entries associated with floating IPs. |
| # |
| # When a user adds a DNS entry for a specified domain to a floating IP, |
| # nova will add a DNS entry using the specified floating DNS driver. |
| # When a floating IP is deallocated, its DNS entry will automatically be |
| # deleted. |
| # |
| # Possible values: |
| # |
| # * Full Python path to the class to be used |
| # |
| # Related options: |
| # |
| # * use_neutron: this options only works with nova-network. |
| # (string value) |
| # This option is deprecated for removal since 15.0.0. |
| # Its value may be silently ignored in the future. |
| # Reason: |
| # nova-network is deprecated, as are any related configuration options. |
| #floating_ip_dns_manager=nova.network.noop_dns_driver.NoopDNSDriver |
| |
| # DEPRECATED: |
| # Full class name for the DNS Manager for instance IPs. |
| # |
| # This option specifies the class of the driver that provides functionality |
| # to manage DNS entries for instances. |
| # |
| # On instance creation, nova will add DNS entries for the instance name and |
| # id, using the specified instance DNS driver and domain. On instance deletion, |
| # nova will remove the DNS entries. |
| # |
| # Possible values: |
| # |
| # * Full Python path to the class to be used |
| # |
| # Related options: |
| # |
| # * use_neutron: this options only works with nova-network. |
| # (string value) |
| # This option is deprecated for removal since 15.0.0. |
| # Its value may be silently ignored in the future. |
| # Reason: |
| # nova-network is deprecated, as are any related configuration options. |
| #instance_dns_manager=nova.network.noop_dns_driver.NoopDNSDriver |
| |
| # DEPRECATED: |
| # If specified, Nova checks if the availability_zone of every instance matches |
| # what the database says the availability_zone should be for the specified |
| # dns_domain. |
| # |
| # Related options: |
| # |
| # * use_neutron: this options only works with nova-network. |
| # (string value) |
| # This option is deprecated for removal since 15.0.0. |
| # Its value may be silently ignored in the future. |
| # Reason: |
| # nova-network is deprecated, as are any related configuration options. |
| #instance_dns_domain = |
| |
| # |
| # Abstracts out IPv6 address generation to pluggable backends. |
| # |
| # nova-network can be put into dual-stack mode, so that it uses |
| # both IPv4 and IPv6 addresses. In dual-stack mode, by default, instances |
| # acquire IPv6 global unicast addresses with the help of stateless address |
| # auto-configuration mechanism. |
| # |
| # Related options: |
| # |
| # * use_neutron: this option only works with nova-network. |
| # * use_ipv6: this option only works if ipv6 is enabled for nova-network. |
| # (string value) |
| # Allowed values: rfc2462, account_identifier |
| #ipv6_backend=rfc2462 |
| |
| # |
| # The IP address which the host is using to connect to the management network. |
| # |
| # Possible values: |
| # |
| # * String with valid IP address. Default is IPv4 address of this host. |
| # |
| # Related options: |
| # |
| # * metadata_host |
| # * my_block_storage_ip |
| # * routing_source_ip |
| # * vpn_ip |
| # (string value) |
| #my_ip=10.89.104.70 |
| my_ip={{ controller.bind.private_address }} |
| |
| # |
| # The IP address which is used to connect to the block storage network. |
| # |
| # Possible values: |
| # |
| # * String with valid IP address. Default is IP address of this host. |
| # |
| # Related options: |
| # |
| # * my_ip - if my_block_storage_ip is not set, then my_ip value is used. |
| # (string value) |
| #my_block_storage_ip=$my_ip |
| |
| # |
| # Hostname, FQDN or IP address of this host. Must be valid within AMQP key. |
| # |
| # Possible values: |
| # |
| # * String with hostname, FQDN or IP address. Default is hostname of this host. |
| # (string value) |
| #host=lcy01-22 |
| {%- if controller.host is defined %} |
| host={{ controller.host }} |
| {%- endif %} |
| |
| # |
| # Assign IPv6 and IPv4 addresses when creating instances. |
| # |
| # Related options: |
| # |
| # * use_neutron: this only works with nova-network. |
| # (boolean value) |
| #use_ipv6=false |
| |
| # |
| # This option is a list of full paths to one or more configuration files for |
| # dhcpbridge. In most cases the default path of '/etc/nova/nova-dhcpbridge.conf' |
| # should be sufficient, but if you have special needs for configuring |
| # dhcpbridge, |
| # you can change or add to this list. |
| # |
| # Possible values |
| # |
| # A list of strings, where each string is the full path to a dhcpbridge |
| # configuration file. |
| # (multi valued) |
| dhcpbridge_flagfile=/etc/nova/nova.conf |
| |
| # |
| # The location where the network configuration files will be kept. The default |
| # is |
| # the 'networks' directory off of the location where nova's Python module is |
| # installed. |
| # |
| # Possible values |
| # |
| # A string containing the full path to the desired configuration directory |
| # (string value) |
| #networks_path=$state_path/networks |
| |
| # |
| # This is the name of the network interface for public IP addresses. The default |
| # is 'eth0'. |
| # |
| # Possible values: |
| # |
| # Any string representing a network interface name |
| # (string value) |
| #public_interface=eth0 |
| |
| # |
| # The location of the binary nova-dhcpbridge. By default it is the binary named |
| # 'nova-dhcpbridge' that is installed with all the other nova binaries. |
| # |
| # Possible values: |
| # |
| # Any string representing the full path to the binary for dhcpbridge |
| # (string value) |
| dhcpbridge=/usr/bin/nova-dhcpbridge |
| |
| # |
| # This is the public IP address of the network host. It is used when creating a |
| # SNAT rule. |
| # |
| # Possible values: |
| # |
| # Any valid IP address |
| # |
| # Related options: |
| # |
| # force_snat_range |
| # (string value) |
| #routing_source_ip=$my_ip |
| |
| # |
| # The lifetime of a DHCP lease, in seconds. The default is 86400 (one day). |
| # |
| # Possible values: |
| # |
| # Any positive integer value. |
| # (integer value) |
| # Minimum value: 1 |
| #dhcp_lease_time=86400 |
| |
| # |
| # Despite the singular form of the name of this option, it is actually a list of |
| # zero or more server addresses that dnsmasq will use for DNS nameservers. If |
| # this is not empty, dnsmasq will not read /etc/resolv.conf, but will only use |
| # the servers specified in this option. If the option use_network_dns_servers is |
| # True, the dns1 and dns2 servers from the network will be appended to this |
| # list, |
| # and will be used as DNS servers, too. |
| # |
| # Possible values: |
| # |
| # A list of strings, where each string is either an IP address or a FQDN. |
| # |
| # Related options: |
| # |
| # use_network_dns_servers |
| # (multi valued) |
| #dns_server = |
| |
| # |
| # When this option is set to True, the dns1 and dns2 servers for the network |
| # specified by the user on boot will be used for DNS, as well as any specified |
| # in |
| # the `dns_server` option. |
| # |
| # Related options: |
| # |
| # dns_server |
| # (boolean value) |
| #use_network_dns_servers=false |
| |
| # |
| # This option is a list of zero or more IP address ranges in your network's DMZ |
| # that should be accepted. |
| # |
| # Possible values: |
| # |
| # A list of strings, each of which should be a valid CIDR. |
| # (list value) |
| #dmz_cidr = |
| |
| # |
| # This is a list of zero or more IP ranges that traffic from the |
| # `routing_source_ip` will be SNATted to. If the list is empty, then no SNAT |
| # rules are created. |
| # |
| # Possible values: |
| # |
| # A list of strings, each of which should be a valid CIDR. |
| # |
| # Related options: |
| # |
| # routing_source_ip |
| # (multi valued) |
| #force_snat_range = |
| |
| # |
| # The path to the custom dnsmasq configuration file, if any. |
| # |
| # Possible values: |
| # |
| # The full path to the configuration file, or an empty string if there is no |
| # custom dnsmasq configuration file. |
| # (string value) |
| #dnsmasq_config_file = |
| |
| # |
| # This is the class used as the ethernet device driver for linuxnet bridge |
| # operations. The default value should be all you need for most cases, but if |
| # you |
| # wish to use a customized class, set this option to the full dot-separated |
| # import path for that class. |
| # |
| # Possible values: |
| # |
| # Any string representing a dot-separated class path that Nova can import. |
| # (string value) |
| #linuxnet_interface_driver=nova.network.linux_net.LinuxBridgeInterfaceDriver |
| |
| # |
| # The name of the Open vSwitch bridge that is used with linuxnet when connecting |
| # with Open vSwitch." |
| # |
| # Possible values: |
| # |
| # Any string representing a valid bridge name. |
| # (string value) |
| #linuxnet_ovs_integration_bridge=br-int |
| |
| # |
| # When True, when a device starts up, and upon binding floating IP addresses, |
| # arp |
| # messages will be sent to ensure that the arp caches on the compute hosts are |
| # up-to-date. |
| # |
| # Related options: |
| # |
| # send_arp_for_ha_count |
| # (boolean value) |
| #send_arp_for_ha=false |
| |
| # |
| # When arp messages are configured to be sent, they will be sent with the count |
| # set to the value of this option. Of course, if this is set to zero, no arp |
| # messages will be sent. |
| # |
| # Possible values: |
| # |
| # Any integer greater than or equal to 0 |
| # |
| # Related options: |
| # |
| # send_arp_for_ha |
| # (integer value) |
| #send_arp_for_ha_count=3 |
| |
| # |
| # When set to True, only the firt nic of a VM will get its default gateway from |
| # the DHCP server. |
| # (boolean value) |
| #use_single_default_gateway=false |
| |
| # |
| # One or more interfaces that bridges can forward traffic to. If any of the |
| # items |
| # in this list is the special keyword 'all', then all traffic will be forwarded. |
| # |
| # Possible values: |
| # |
| # A list of zero or more interface names, or the word 'all'. |
| # (multi valued) |
| #forward_bridge_interface=all |
| |
| # |
| # This option determines the IP address for the network metadata API server. |
| # |
| # Possible values: |
| # |
| # * Any valid IP address. The default is the address of the Nova API server. |
| # |
| # Related options: |
| # |
| # * metadata_port |
| # (string value) |
| #metadata_host=$my_ip |
| |
| # |
| # This option determines the port used for the metadata API server. |
| # |
| # Related options: |
| # |
| # * metadata_host |
| # (port value) |
| # Minimum value: 0 |
| # Maximum value: 65535 |
| #metadata_port=8775 |
| |
| # |
| # This expression, if defined, will select any matching iptables rules and place |
| # them at the top when applying metadata changes to the rules. |
| # |
| # Possible values: |
| # |
| # * Any string representing a valid regular expression, or an empty string |
| # |
| # Related options: |
| # |
| # * iptables_bottom_regex |
| # (string value) |
| #iptables_top_regex = |
| |
| # |
| # This expression, if defined, will select any matching iptables rules and place |
| # them at the bottom when applying metadata changes to the rules. |
| # |
| # Possible values: |
| # |
| # * Any string representing a valid regular expression, or an empty string |
| # |
| # Related options: |
| # |
| # * iptables_top_regex |
| # (string value) |
| #iptables_bottom_regex = |
| |
| # |
| # By default, packets that do not pass the firewall are DROPped. In many cases, |
| # though, an operator may find it more useful to change this from DROP to |
| # REJECT, |
| # so that the user issuing those packets may have a better idea as to what's |
| # going on, or LOGDROP in order to record the blocked traffic before DROPping. |
| # |
| # Possible values: |
| # |
| # * A string representing an iptables chain. The default is DROP. |
| # (string value) |
| #iptables_drop_action=DROP |
| |
| # |
| # This option represents the period of time, in seconds, that the ovs_vsctl |
| # calls |
| # will wait for a response from the database before timing out. A setting of 0 |
| # means that the utility should wait forever for a response. |
| # |
| # Possible values: |
| # |
| # * Any positive integer if a limited timeout is desired, or zero if the |
| # calls should wait forever for a response. |
| # (integer value) |
| # Minimum value: 0 |
| #ovs_vsctl_timeout=120 |
| |
| # |
| # This option is used mainly in testing to avoid calls to the underlying network |
| # utilities. |
| # (boolean value) |
| #fake_network=false |
| |
| # |
| # This option determines the number of times to retry ebtables commands before |
| # giving up. The minimum number of retries is 1. |
| # |
| # Possible values: |
| # |
| # * Any positive integer |
| # |
| # Related options: |
| # |
| # * ebtables_retry_interval |
| # (integer value) |
| # Minimum value: 1 |
| #ebtables_exec_attempts=3 |
| |
| # |
| # This option determines the time, in seconds, that the system will sleep in |
| # between ebtables retries. Note that each successive retry waits a multiple of |
| # this value, so for example, if this is set to the default of 1.0 seconds, and |
| # ebtables_exec_attempts is 4, after the first failure, the system will sleep |
| # for |
| # 1 * 1.0 seconds, after the second failure it will sleep 2 * 1.0 seconds, and |
| # after the third failure it will sleep 3 * 1.0 seconds. |
| # |
| # Possible values: |
| # |
| # * Any non-negative float or integer. Setting this to zero will result in |
| # no |
| # waiting between attempts. |
| # |
| # Related options: |
| # |
| # * ebtables_exec_attempts |
| # (floating point value) |
| #ebtables_retry_interval=1.0 |
| |
| # |
| # This option determines whether the network setup information is injected into |
| # the VM before it is booted. While it was originally designed to be used only |
| # by |
| # nova-network, it is also used by the vmware and xenapi virt drivers to control |
| # whether network information is injected into a VM. |
| # (boolean value) |
| #flat_injected=false |
| |
| # DEPRECATED: |
| # This option determines the bridge used for simple network interfaces when no |
| # bridge is specified in the VM creation request. |
| # |
| # Please note that this option is only used when using nova-network instead of |
| # Neutron in your deployment. |
| # |
| # Possible values: |
| # |
| # Any string representing a valid network bridge, such as 'br100' |
| # |
| # Related options: |
| # |
| # ``use_neutron`` |
| # (string value) |
| # This option is deprecated for removal since 15.0.0. |
| # Its value may be silently ignored in the future. |
| # Reason: |
| # nova-network is deprecated, as are any related configuration options. |
| #flat_network_bridge=<None> |
| |
| # DEPRECATED: |
| # This is the address of the DNS server for a simple network. If this option is |
| # not specified, the default of '8.8.4.4' is used. |
| # |
| # Please note that this option is only used when using nova-network instead of |
| # Neutron in your deployment. |
| # |
| # Possible values: |
| # |
| # Any valid IP address. |
| # |
| # Related options: |
| # |
| # ``use_neutron`` |
| # (string value) |
| # This option is deprecated for removal since 15.0.0. |
| # Its value may be silently ignored in the future. |
| # Reason: |
| # nova-network is deprecated, as are any related configuration options. |
| #flat_network_dns=8.8.4.4 |
| |
| # DEPRECATED: |
| # This option is the name of the virtual interface of the VM on which the bridge |
| # will be built. While it was originally designed to be used only by |
| # nova-network, it is also used by libvirt for the bridge interface name. |
| # |
| # Possible values: |
| # |
| # Any valid virtual interface name, such as 'eth0' |
| # (string value) |
| # This option is deprecated for removal since 15.0.0. |
| # Its value may be silently ignored in the future. |
| # Reason: |
| # nova-network is deprecated, as are any related configuration options. |
| #flat_interface=<None> |
| |
| # DEPRECATED: |
| # This is the VLAN number used for private networks. Note that the when creating |
| # the networks, if the specified number has already been assigned, nova-network |
| # will increment this number until it finds an available VLAN. |
| # |
| # Please note that this option is only used when using nova-network instead of |
| # Neutron in your deployment. It also will be ignored if the configuration |
| # option |
| # for `network_manager` is not set to the default of |
| # 'nova.network.manager.VlanManager'. |
| # |
| # Possible values: |
| # |
| # Any integer between 1 and 4094. Values outside of that range will raise a |
| # ValueError exception. Default = 100. |
| # |
| # Related options: |
| # |
| # ``network_manager``, ``use_neutron`` |
| # (integer value) |
| # Minimum value: 1 |
| # Maximum value: 4094 |
| # This option is deprecated for removal since 15.0.0. |
| # Its value may be silently ignored in the future. |
| # Reason: |
| # nova-network is deprecated, as are any related configuration options. |
| #vlan_start=100 |
| |
| # DEPRECATED: |
| # This option is the name of the virtual interface of the VM on which the VLAN |
| # bridge will be built. While it was originally designed to be used only by |
| # nova-network, it is also used by libvirt and xenapi for the bridge interface |
| # name. |
| # |
| # Please note that this setting will be ignored in nova-network if the |
| # configuration option for `network_manager` is not set to the default of |
| # 'nova.network.manager.VlanManager'. |
| # |
| # Possible values: |
| # |
| # Any valid virtual interface name, such as 'eth0' |
| # (string value) |
| # This option is deprecated for removal since 15.0.0. |
| # Its value may be silently ignored in the future. |
| # Reason: |
| # nova-network is deprecated, as are any related configuration options. While |
| # this option has an effect when using neutron, it incorrectly override the |
| # value |
| # provided by neutron and should therefore not be used. |
| #vlan_interface=<None> |
| |
| # DEPRECATED: |
| # This option represents the number of networks to create if not explicitly |
| # specified when the network is created. The only time this is used is if a CIDR |
| # is specified, but an explicit network_size is not. In that case, the subnets |
| # are created by diving the IP address space of the CIDR by num_networks. The |
| # resulting subnet sizes cannot be larger than the configuration option |
| # `network_size`; in that event, they are reduced to `network_size`, and a |
| # warning is logged. |
| # |
| # Please note that this option is only used when using nova-network instead of |
| # Neutron in your deployment. |
| # |
| # Possible values: |
| # |
| # Any positive integer is technically valid, although there are practical |
| # limits based upon available IP address space and virtual interfaces. The |
| # default is 1. |
| # |
| # Related options: |
| # |
| # ``use_neutron``, ``network_size`` |
| # (integer value) |
| # Minimum value: 1 |
| # This option is deprecated for removal since 15.0.0. |
| # Its value may be silently ignored in the future. |
| # Reason: |
| # nova-network is deprecated, as are any related configuration options. |
| #num_networks=1 |
| |
| # DEPRECATED: |
| # This is the public IP address for the cloudpipe VPN servers. It defaults to |
| # the |
| # IP address of the host. |
| # |
| # Please note that this option is only used when using nova-network instead of |
| # Neutron in your deployment. It also will be ignored if the configuration |
| # option |
| # for `network_manager` is not set to the default of |
| # 'nova.network.manager.VlanManager'. |
| # |
| # Possible values: |
| # |
| # Any valid IP address. The default is $my_ip, the IP address of the VM. |
| # |
| # Related options: |
| # |
| # ``network_manager``, ``use_neutron``, ``vpn_start`` |
| # (string value) |
| # This option is deprecated for removal since 15.0.0. |
| # Its value may be silently ignored in the future. |
| # Reason: |
| # nova-network is deprecated, as are any related configuration options. |
| #vpn_ip=$my_ip |
| |
| # DEPRECATED: |
| # This is the port number to use as the first VPN port for private networks. |
| # |
| # Please note that this option is only used when using nova-network instead of |
| # Neutron in your deployment. It also will be ignored if the configuration |
| # option |
| # for `network_manager` is not set to the default of |
| # 'nova.network.manager.VlanManager', or if you specify a value the 'vpn_start' |
| # parameter when creating a network. |
| # |
| # Possible values: |
| # |
| # Any integer representing a valid port number. The default is 1000. |
| # |
| # Related options: |
| # |
| # ``use_neutron``, ``vpn_ip``, ``network_manager`` |
| # (port value) |
| # Minimum value: 0 |
| # Maximum value: 65535 |
| # This option is deprecated for removal since 15.0.0. |
| # Its value may be silently ignored in the future. |
| # Reason: |
| # nova-network is deprecated, as are any related configuration options. |
| #vpn_start=1000 |
| |
| # DEPRECATED: |
| # This option determines the number of addresses in each private subnet. |
| # |
| # Please note that this option is only used when using nova-network instead of |
| # Neutron in your deployment. |
| # |
| # Possible values: |
| # |
| # Any positive integer that is less than or equal to the available network |
| # size. Note that if you are creating multiple networks, they must all fit |
| # in |
| # the available IP address space. The default is 256. |
| # |
| # Related options: |
| # |
| # ``use_neutron``, ``num_networks`` |
| # (integer value) |
| # Minimum value: 1 |
| # This option is deprecated for removal since 15.0.0. |
| # Its value may be silently ignored in the future. |
| # Reason: |
| # nova-network is deprecated, as are any related configuration options. |
| #network_size=256 |
| |
| # DEPRECATED: |
| # This option determines the fixed IPv6 address block when creating a network. |
| # |
| # Please note that this option is only used when using nova-network instead of |
| # Neutron in your deployment. |
| # |
| # Possible values: |
| # |
| # Any valid IPv6 CIDR. The default value is "fd00::/48". |
| # |
| # Related options: |
| # |
| # ``use_neutron`` |
| # (string value) |
| # This option is deprecated for removal since 15.0.0. |
| # Its value may be silently ignored in the future. |
| # Reason: |
| # nova-network is deprecated, as are any related configuration options. |
| #fixed_range_v6=fd00::/48 |
| |
| # DEPRECATED: |
| # This is the default IPv4 gateway. It is used only in the testing suite. |
| # |
| # Please note that this option is only used when using nova-network instead of |
| # Neutron in your deployment. |
| # |
| # Possible values: |
| # |
| # Any valid IP address. |
| # |
| # Related options: |
| # |
| # ``use_neutron``, ``gateway_v6`` |
| # (string value) |
| # This option is deprecated for removal since 15.0.0. |
| # Its value may be silently ignored in the future. |
| # Reason: |
| # nova-network is deprecated, as are any related configuration options. |
| #gateway=<None> |
| |
| # DEPRECATED: |
| # This is the default IPv6 gateway. It is used only in the testing suite. |
| # |
| # Please note that this option is only used when using nova-network instead of |
| # Neutron in your deployment. |
| # |
| # Possible values: |
| # |
| # Any valid IP address. |
| # |
| # Related options: |
| # |
| # ``use_neutron``, ``gateway`` |
| # (string value) |
| # This option is deprecated for removal since 15.0.0. |
| # Its value may be silently ignored in the future. |
| # Reason: |
| # nova-network is deprecated, as are any related configuration options. |
| #gateway_v6=<None> |
| |
| # DEPRECATED: |
| # This option represents the number of IP addresses to reserve at the top of the |
| # address range for VPN clients. It also will be ignored if the configuration |
| # option for `network_manager` is not set to the default of |
| # 'nova.network.manager.VlanManager'. |
| # |
| # Possible values: |
| # |
| # Any integer, 0 or greater. The default is 0. |
| # |
| # Related options: |
| # |
| # ``use_neutron``, ``network_manager`` |
| # (integer value) |
| # Minimum value: 0 |
| # This option is deprecated for removal since 15.0.0. |
| # Its value may be silently ignored in the future. |
| # Reason: |
| # nova-network is deprecated, as are any related configuration options. |
| #cnt_vpn_clients=0 |
| |
| # DEPRECATED: |
| # This is the number of seconds to wait before disassociating a deallocated |
| # fixed |
| # IP address. This is only used with the nova-network service, and has no effect |
| # when using neutron for networking. |
| # |
| # Possible values: |
| # |
| # Any integer, zero or greater. The default is 600 (10 minutes). |
| # |
| # Related options: |
| # |
| # ``use_neutron`` |
| # (integer value) |
| # Minimum value: 0 |
| # This option is deprecated for removal since 15.0.0. |
| # Its value may be silently ignored in the future. |
| # Reason: |
| # nova-network is deprecated, as are any related configuration options. |
| #fixed_ip_disassociate_timeout=600 |
| |
| # DEPRECATED: |
| # This option determines how many times nova-network will attempt to create a |
| # unique MAC address before giving up and raising a |
| # `VirtualInterfaceMacAddressException` error. |
| # |
| # Possible values: |
| # |
| # Any positive integer. The default is 5. |
| # |
| # Related options: |
| # |
| # ``use_neutron`` |
| # (integer value) |
| # Minimum value: 1 |
| # This option is deprecated for removal since 15.0.0. |
| # Its value may be silently ignored in the future. |
| # Reason: |
| # nova-network is deprecated, as are any related configuration options. |
| #create_unique_mac_address_attempts=5 |
| |
| # DEPRECATED: |
| # Determines whether unused gateway devices, both VLAN and bridge, are deleted |
| # if |
| # the network is in nova-network VLAN mode and is multi-hosted. |
| # |
| # Related options: |
| # |
| # ``use_neutron``, ``vpn_ip``, ``fake_network`` |
| # (boolean value) |
| # This option is deprecated for removal since 15.0.0. |
| # Its value may be silently ignored in the future. |
| # Reason: |
| # nova-network is deprecated, as are any related configuration options. |
| #teardown_unused_network_gateway=false |
| |
| # DEPRECATED: |
| # When this option is True, a call is made to release the DHCP for the instance |
| # when that instance is terminated. |
| # |
| # Related options: |
| # |
| # ``use_neutron`` |
| # (boolean value) |
| # This option is deprecated for removal since 15.0.0. |
| # Its value may be silently ignored in the future. |
| # Reason: |
| # nova-network is deprecated, as are any related configuration options. |
| force_dhcp_release=true |
| |
| # DEPRECATED: |
| # When this option is True, whenever a DNS entry must be updated, a fanout cast |
| # message is sent to all network hosts to update their DNS entries in multi-host |
| # mode. |
| # |
| # Related options: |
| # |
| # ``use_neutron`` |
| # (boolean value) |
| # This option is deprecated for removal since 15.0.0. |
| # Its value may be silently ignored in the future. |
| # Reason: |
| # nova-network is deprecated, as are any related configuration options. |
| #update_dns_entries=false |
| |
| # DEPRECATED: |
| # This option determines the time, in seconds, to wait between refreshing DNS |
| # entries for the network. |
| # |
| # Possible values: |
| # |
| # Either -1 (default), or any positive integer. A negative value will |
| # disable |
| # the updates. |
| # |
| # Related options: |
| # |
| # ``use_neutron`` |
| # (integer value) |
| # Minimum value: -1 |
| # This option is deprecated for removal since 15.0.0. |
| # Its value may be silently ignored in the future. |
| # Reason: |
| # nova-network is deprecated, as are any related configuration options. |
| #dns_update_periodic_interval=-1 |
| |
| # DEPRECATED: |
| # This option allows you to specify the domain for the DHCP server. |
| # |
| # Possible values: |
| # |
| # Any string that is a valid domain name. |
| # |
| # Related options: |
| # |
| # ``use_neutron`` |
| # (string value) |
| # This option is deprecated for removal since 15.0.0. |
| # Its value may be silently ignored in the future. |
| # Reason: |
| # nova-network is deprecated, as are any related configuration options. |
| #dhcp_domain=novalocal |
| |
| # DEPRECATED: |
| # This option allows you to specify the L3 management library to be used. |
| # |
| # Possible values: |
| # |
| # Any dot-separated string that represents the import path to an L3 |
| # networking library. |
| # |
| # Related options: |
| # |
| # ``use_neutron`` |
| # (string value) |
| # This option is deprecated for removal since 15.0.0. |
| # Its value may be silently ignored in the future. |
| # Reason: |
| # nova-network is deprecated, as are any related configuration options. |
| #l3_lib=nova.network.l3.LinuxNetL3 |
| |
| # DEPRECATED: |
| # THIS VALUE SHOULD BE SET WHEN CREATING THE NETWORK. |
| # |
| # If True in multi_host mode, all compute hosts share the same dhcp address. The |
| # same IP address used for DHCP will be added on each nova-network node which is |
| # only visible to the VMs on the same host. |
| # |
| # The use of this configuration has been deprecated and may be removed in any |
| # release after Mitaka. It is recommended that instead of relying on this |
| # option, |
| # an explicit value should be passed to 'create_networks()' as a keyword |
| # argument |
| # with the name 'share_address'. |
| # (boolean value) |
| # This option is deprecated for removal since 2014.2. |
| # Its value may be silently ignored in the future. |
| #share_dhcp_address=false |
| |
| # DEPRECATED: Whether to use Neutron or Nova Network as the back end for |
| # networking. Defaults to False (indicating Nova network).Set to True to use |
| # neutron. (boolean value) |
| # This option is deprecated for removal since 15.0.0. |
| # Its value may be silently ignored in the future. |
| # Reason: |
| # nova-network is deprecated, as are any related configuration options. |
| #use_neutron=true |
| |
| # |
| # URL for LDAP server which will store DNS entries |
| # |
| # Possible values: |
| # |
| # * A valid LDAP URL representing the server |
| # (uri value) |
| #ldap_dns_url=ldap://ldap.example.com:389 |
| |
| # Bind user for LDAP server (string value) |
| #ldap_dns_user=uid=admin,ou=people,dc=example,dc=org |
| |
| # Bind user's password for LDAP server (string value) |
| #ldap_dns_password=password |
| |
| # |
| # Hostmaster for LDAP DNS driver Statement of Authority |
| # |
| # Possible values: |
| # |
| # * Any valid string representing LDAP DNS hostmaster. |
| # (string value) |
| #ldap_dns_soa_hostmaster=hostmaster@example.org |
| |
| # |
| # DNS Servers for LDAP DNS driver |
| # |
| # Possible values: |
| # |
| # * A valid URL representing a DNS server |
| # (multi valued) |
| #ldap_dns_servers=dns.example.org |
| |
| # |
| # Base distinguished name for the LDAP search query |
| # |
| # This option helps to decide where to look up the host in LDAP. |
| # (string value) |
| #ldap_dns_base_dn=ou=hosts,dc=example,dc=org |
| |
| # |
| # Refresh interval (in seconds) for LDAP DNS driver Start of Authority |
| # |
| # Time interval, a secondary/slave DNS server waits before requesting for |
| # primary DNS server's current SOA record. If the records are different, |
| # secondary DNS server will request a zone transfer from primary. |
| # |
| # NOTE: Lower values would cause more traffic. |
| # (integer value) |
| #ldap_dns_soa_refresh=1800 |
| |
| # |
| # Retry interval (in seconds) for LDAP DNS driver Start of Authority |
| # |
| # Time interval, a secondary/slave DNS server should wait, if an |
| # attempt to transfer zone failed during the previous refresh interval. |
| # (integer value) |
| #ldap_dns_soa_retry=3600 |
| |
| # |
| # Expiry interval (in seconds) for LDAP DNS driver Start of Authority |
| # |
| # Time interval, a secondary/slave DNS server holds the information |
| # before it is no longer considered authoritative. |
| # (integer value) |
| #ldap_dns_soa_expiry=86400 |
| |
| # |
| # Minimum interval (in seconds) for LDAP DNS driver Start of Authority |
| # |
| # It is Minimum time-to-live applies for all resource records in the |
| # zone file. This value is supplied to other servers how long they |
| # should keep the data in cache. |
| # (integer value) |
| #ldap_dns_soa_minimum=7200 |
| |
| # DEPRECATED: The topic network nodes listen on (string value) |
| # This option is deprecated for removal since 15.0.0. |
| # Its value may be silently ignored in the future. |
| # Reason: |
| # There is no need to let users choose the RPC topic for all services - there |
| # is little gain from this. Furthermore, it makes it really easy to break Nova |
| # by using this option. |
| #network_topic=network |
| |
| # DEPRECATED: |
| # Default value for multi_host in networks. |
| # |
| # nova-network service can operate in a multi-host or single-host mode. |
| # In multi-host mode each compute node runs a copy of nova-network and the |
| # instances on that compute node use the compute node as a gateway to the |
| # Internet. Where as in single-host mode, a central server runs the nova-network |
| # service. All compute nodes forward traffic from the instances to the |
| # cloud controller which then forwards traffic to the Internet. |
| # |
| # If this options is set to true, some rpc network calls will be sent directly |
| # to host. |
| # |
| # Note that this option is only used when using nova-network instead of |
| # Neutron in your deployment. |
| # |
| # Related options: |
| # |
| # * use_neutron |
| # (boolean value) |
| # This option is deprecated for removal since 15.0.0. |
| # Its value may be silently ignored in the future. |
| # Reason: |
| # nova-network is deprecated, as are any related configuration options. |
| #multi_host=false |
| |
| # DEPRECATED: |
| # Driver to use for network creation. |
| # |
| # Network driver initializes (creates bridges and so on) only when the |
| # first VM lands on a host node. All network managers configure the |
| # network using network drivers. The driver is not tied to any particular |
| # network manager. |
| # |
| # The default Linux driver implements vlans, bridges, and iptables rules |
| # using linux utilities. |
| # |
| # Note that this option is only used when using nova-network instead |
| # of Neutron in your deployment. |
| # |
| # Related options: |
| # |
| # * use_neutron |
| # (string value) |
| # This option is deprecated for removal since 15.0.0. |
| # Its value may be silently ignored in the future. |
| # Reason: |
| # nova-network is deprecated, as are any related configuration options. |
| #network_driver=nova.network.linux_net |
| |
| # |
| # Firewall driver to use with ``nova-network`` service. |
| # |
| # This option only applies when using the ``nova-network`` service. When using |
| # another networking services, such as Neutron, this should be to set to the |
| # ``nova.virt.firewall.NoopFirewallDriver``. |
| # |
| # If unset (the default), this will default to the hypervisor-specified |
| # default driver. |
| # |
| # Possible values: |
| # |
| # * nova.virt.firewall.IptablesFirewallDriver |
| # * nova.virt.firewall.NoopFirewallDriver |
| # * nova.virt.libvirt.firewall.IptablesFirewallDriver |
| # * [...] |
| # |
| # Related options: |
| # |
| # * ``use_neutron``: This must be set to ``False`` to enable ``nova-network`` |
| # networking |
| # (string value) |
| #firewall_driver=<None> |
| firewall_driver=nova.virt.firewall.NoopFirewallDriver |
| |
| # |
| # Determine whether to allow network traffic from same network. |
| # |
| # When set to true, hosts on the same subnet are not filtered and are allowed |
| # to pass all types of traffic between them. On a flat network, this allows |
| # all instances from all projects unfiltered communication. With VLAN |
| # networking, this allows access between instances within the same project. |
| # |
| # This option only applies when using the ``nova-network`` service. When using |
| # another networking services, such as Neutron, security groups or other |
| # approaches should be used. |
| # |
| # Possible values: |
| # |
| # * True: Network traffic should be allowed pass between all instances on the |
| # same network, regardless of their tenant and security policies |
| # * False: Network traffic should not be allowed pass between instances unless |
| # it is unblocked in a security group |
| # |
| # Related options: |
| # |
| # * ``use_neutron``: This must be set to ``False`` to enable ``nova-network`` |
| # networking |
| # * ``firewall_driver``: This must be set to |
| # ``nova.virt.libvirt.firewall.IptablesFirewallDriver`` to ensure the |
| # libvirt firewall driver is enabled. |
| # (boolean value) |
| #allow_same_net_traffic=true |
| |
| # |
| # Filename that will be used for storing websocket frames received |
| # and sent by a proxy service (like VNC, spice, serial) running on this host. |
| # If this is not set, no recording will be done. |
| # (string value) |
| #record=<None> |
| |
| # Run as a background process. (boolean value) |
| #daemon=false |
| |
| # Disallow non-encrypted connections. (boolean value) |
| #ssl_only=false |
| {%- if controller.novncproxy.tls.get('enabled', False) %} |
| ssl_only=True |
| cert={{controller.novncproxy.tls.server.cert_file|yaml_squote}} |
| key={{controller.novncproxy.tls.server.key_file|yaml_squote}} |
| {%- endif %} |
| |
| # Set to True if source host is addressed with IPv6. (boolean value) |
| #source_is_ipv6=false |
| |
| # Path to SSL certificate file. (string value) |
| #cert=self.pem |
| |
| # SSL key file (if separate from cert). (string value) |
| #key=<None> |
| |
| # |
| # Path to directory with content which will be served by a web server. |
| # (string value) |
| #web=/usr/share/spice-html5 |
| |
| # |
| # The directory where the Nova python modules are installed. |
| # |
| # This directory is used to store template files for networking and remote |
| # console access. It is also the default path for other config options which |
| # need to persist Nova internal data. It is very unlikely that you need to |
| # change this option from its default value. |
| # |
| # Possible values: |
| # |
| # * The full path to a directory. |
| # |
| # Related options: |
| # |
| # * ``state_path`` |
| # (string value) |
| #pybasedir=/build/nova-elxmSs/nova-15.0.2 |
| |
| # |
| # The directory where the Nova binaries are installed. |
| # |
| # This option is only relevant if the networking capabilities from Nova are |
| # used (see services below). Nova's networking capabilities are targeted to |
| # be fully replaced by Neutron in the future. It is very unlikely that you need |
| # to change this option from its default value. |
| # |
| # Possible values: |
| # |
| # * The full path to a directory. |
| # (string value) |
| #bindir=/usr/local/bin |
| |
| # |
| # The top-level directory for maintaining Nova's state. |
| # |
| # This directory is used to store Nova's internal state. It is used by a |
| # variety of other config options which derive from this. In some scenarios |
| # (for example migrations) it makes sense to use a storage location which is |
| # shared between multiple compute hosts (for example via NFS). Unless the |
| # option ``instances_path`` gets overwritten, this directory can grow very |
| # large. |
| # |
| # Possible values: |
| # |
| # * The full path to a directory. Defaults to value provided in ``pybasedir``. |
| # (string value) |
| state_path=/var/lib/nova |
| |
| # |
| # Number of seconds indicating how frequently the state of services on a |
| # given hypervisor is reported. Nova needs to know this to determine the |
| # overall health of the deployment. |
| # |
| # Related Options: |
| # |
| # * service_down_time |
| # report_interval should be less than service_down_time. If service_down_time |
| # is less than report_interval, services will routinely be considered down, |
| # because they report in too rarely. |
| # (integer value) |
| #report_interval=10 |
| report_interval=10 |
| |
| # |
| # Maximum time in seconds since last check-in for up service |
| # |
| # Each compute node periodically updates their database status based on the |
| # specified report interval. If the compute node hasn't updated the status |
| # for more than service_down_time, then the compute node is considered down. |
| # |
| # Related Options: |
| # |
| # * report_interval (service_down_time should not be less than report_interval) |
| # (integer value) |
| service_down_time = {{ controller.service_down_time|default('180') }} |
| |
| # |
| # Enable periodic tasks. |
| # |
| # If set to true, this option allows services to periodically run tasks |
| # on the manager. |
| # |
| # In case of running multiple schedulers or conductors you may want to run |
| # periodic tasks on only one host - in this case disable this option for all |
| # hosts but one. |
| # (boolean value) |
| #periodic_enable=true |
| |
| # |
| # Number of seconds to randomly delay when starting the periodic task |
| # scheduler to reduce stampeding. |
| # |
| # When compute workers are restarted in unison across a cluster, |
| # they all end up running the periodic tasks at the same time |
| # causing problems for the external services. To mitigate this |
| # behavior, periodic_fuzzy_delay option allows you to introduce a |
| # random initial delay when starting the periodic task scheduler. |
| # |
| # Possible Values: |
| # |
| # * Any positive integer (in seconds) |
| # * 0 : disable the random delay |
| # (integer value) |
| # Minimum value: 0 |
| #periodic_fuzzy_delay=60 |
| |
| # List of APIs to be enabled by default. (list value) |
| enabled_apis=osapi_compute,metadata |
| |
| # |
| # List of APIs with enabled SSL. |
| # |
| # Nova provides SSL support for the API servers. enabled_ssl_apis option |
| # allows configuring the SSL support. |
| # (list value) |
| #enabled_ssl_apis = |
| |
| # |
| # IP address on which the OpenStack API will listen. |
| # |
| # The OpenStack API service listens on this IP address for incoming |
| # requests. |
| # (string value) |
| #osapi_compute_listen=0.0.0.0 |
| osapi_compute_listen={{ controller.bind.private_address }} |
| |
| # |
| # Port on which the OpenStack API will listen. |
| # |
| # The OpenStack API service listens on this port number for incoming |
| # requests. |
| # (port value) |
| # Minimum value: 0 |
| # Maximum value: 65535 |
| #osapi_compute_listen_port=8774 |
| |
| # |
| # Number of workers for OpenStack API service. The default will be the number |
| # of CPUs available. |
| # |
| # OpenStack API services can be configured to run as multi-process (workers). |
| # This overcomes the problem of reduction in throughput when API request |
| # concurrency increases. OpenStack API service will run in the specified |
| # number of processes. |
| # |
| # Possible Values: |
| # |
| # * Any positive integer |
| # * None (default value) |
| # (integer value) |
| # Minimum value: 1 |
| #osapi_compute_workers=<None> |
| osapi_compute_workers = {{ controller.workers }} |
| |
| # |
| # IP address on which the metadata API will listen. |
| # |
| # The metadata API service listens on this IP address for incoming |
| # requests. |
| # (string value) |
| #metadata_listen=0.0.0.0 |
| metadata_listen={{ controller.get('metadata', {}).get('bind', {}).get('address', controller.bind.private_address) }} |
| osapi_volume_listen={{ controller.bind.private_address }} |
| |
| # |
| # Port on which the metadata API will listen. |
| # |
| # The metadata API service listens on this port number for incoming |
| # requests. |
| # (port value) |
| # Minimum value: 0 |
| # Maximum value: 65535 |
| {%- if controller.get('metadata', {}).get('bind', {}).port is defined %} |
| metadata_listen_port={{ controller.metadata.bind.port }} |
| {%- else %} |
| #metadata_listen_port=8775 |
| {%- endif %} |
| |
| # |
| # Number of workers for metadata service. If not specified the number of |
| # available CPUs will be used. |
| # |
| # The metadata service can be configured to run as multi-process (workers). |
| # This overcomes the problem of reduction in throughput when API request |
| # concurrency increases. The metadata service will run in the specified |
| # number of processes. |
| # |
| # Possible Values: |
| # |
| # * Any positive integer |
| # * None (default value) |
| # (integer value) |
| # Minimum value: 1 |
| #metadata_workers=<None> |
| metadata_workers = {{ controller.workers }} |
| |
| # Full class name for the Manager for network (string value) |
| # Allowed values: nova.network.manager.FlatManager, nova.network.manager.FlatDHCPManager, nova.network.manager.VlanManager |
| #network_manager=nova.network.manager.VlanManager |
| |
| # |
| # This option specifies the driver to be used for the servicegroup service. |
| # |
| # ServiceGroup API in nova enables checking status of a compute node. When a |
| # compute worker running the nova-compute daemon starts, it calls the join API |
| # to join the compute group. Services like nova scheduler can query the |
| # ServiceGroup API to check if a node is alive. Internally, the ServiceGroup |
| # client driver automatically updates the compute worker status. There are |
| # multiple backend implementations for this service: Database ServiceGroup |
| # driver |
| # and Memcache ServiceGroup driver. |
| # |
| # Possible Values: |
| # |
| # * db : Database ServiceGroup driver |
| # * mc : Memcache ServiceGroup driver |
| # |
| # Related Options: |
| # |
| # * service_down_time (maximum time since last check-in for up service) |
| # (string value) |
| # Allowed values: db, mc |
| #servicegroup_driver=db |
| |
| # |
| # From oslo.log |
| # |
| |
| # If set to true, the logging level will be set to DEBUG instead of the default |
| # INFO level. (boolean value) |
| # Note: This option can be changed without restarting. |
| #debug=false |
| debug=false |
| |
| # DEPRECATED: If set to false, the logging level will be set to WARNING instead |
| # of the default INFO level. (boolean value) |
| # This option is deprecated for removal. |
| # Its value may be silently ignored in the future. |
| #verbose=true |
| verbose=true |
| |
| # The name of a logging configuration file. This file is appended to any |
| # existing logging configuration files. For details about logging configuration |
| # files, see the Python logging module documentation. Note that when logging |
| # configuration files are used then all logging configuration is set in the |
| # configuration file and other logging configuration options are ignored (for |
| # example, logging_context_format_string). (string value) |
| # Note: This option can be changed without restarting. |
| # Deprecated group/name - [DEFAULT]/log_config |
| {%- if controller.logging.log_appender %} |
| log_config_append=/etc/nova/logging.conf |
| {%- endif %} |
| |
| # Defines the format string for %%(asctime)s in log records. Default: |
| # %(default)s . This option is ignored if log_config_append is set. (string |
| # value) |
| #log_date_format=%Y-%m-%d %H:%M:%S |
| |
| # (Optional) Name of log file to send logging output to. If no default is set, |
| # logging will go to stderr as defined by use_stderr. This option is ignored if |
| # log_config_append is set. (string value) |
| # Deprecated group/name - [DEFAULT]/logfile |
| #log_file=<None> |
| |
| # (Optional) The base directory used for relative log_file paths. This option |
| # is ignored if log_config_append is set. (string value) |
| # Deprecated group/name - [DEFAULT]/logdir |
| log_dir=/var/log/nova |
| |
| # Uses logging handler designed to watch file system. When log file is moved or |
| # removed this handler will open a new log file with specified path |
| # instantaneously. It makes sense only if log_file option is specified and Linux |
| # platform is used. This option is ignored if log_config_append is set. (boolean |
| # value) |
| #watch_log_file=false |
| |
| # Use syslog for logging. Existing syslog format is DEPRECATED and will be |
| # changed later to honor RFC5424. This option is ignored if log_config_append is |
| # set. (boolean value) |
| #use_syslog=false |
| |
| # Syslog facility to receive log lines. This option is ignored if |
| # log_config_append is set. (string value) |
| #syslog_log_facility=LOG_USER |
| |
| # Log output to standard error. This option is ignored if log_config_append is |
| # set. (boolean value) |
| #use_stderr=false |
| |
| # Format string to use for log messages with context. (string value) |
| #logging_context_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s |
| |
| # Format string to use for log messages when context is undefined. (string |
| # value) |
| #logging_default_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s |
| |
| # Additional data to append to log message when logging level for the message is |
| # DEBUG. (string value) |
| #logging_debug_format_suffix=%(funcName)s %(pathname)s:%(lineno)d |
| |
| # Prefix each line of exception output with this format. (string value) |
| #logging_exception_prefix=%(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s |
| |
| # Defines the format string for %(user_identity)s that is used in |
| # logging_context_format_string. (string value) |
| #logging_user_identity_format=%(user)s %(tenant)s %(domain)s %(user_domain)s %(project_domain)s |
| |
| # List of package logging levels in logger=LEVEL pairs. This option is ignored |
| # if log_config_append is set. (list value) |
| #default_log_levels=amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN,keystoneauth=WARN,oslo.cache=INFO,dogpile.core.dogpile=INFO |
| |
| # Enables or disables publication of error events. (boolean value) |
| #publish_errors=false |
| |
| # The format for an instance that is passed with the log message. (string value) |
| #instance_format="[instance: %(uuid)s] " |
| |
| # The format for an instance UUID that is passed with the log message. (string |
| # value) |
| #instance_uuid_format="[instance: %(uuid)s] " |
| |
| # Interval, number of seconds, of log rate limiting. (integer value) |
| #rate_limit_interval=0 |
| |
| # Maximum number of logged messages per rate_limit_interval. (integer value) |
| #rate_limit_burst=0 |
| |
| # Log level name used by rate limiting: CRITICAL, ERROR, INFO, WARNING, DEBUG or |
| # empty string. Logs with level greater or equal to rate_limit_except_level are |
| # not filtered. An empty string means that all levels are filtered. (string |
| # value) |
| #rate_limit_except_level=CRITICAL |
| |
| # Enables or disables fatal status of deprecations. (boolean value) |
| #fatal_deprecations=false |
| |
| # |
| # From oslo.messaging |
| # |
| |
| # Size of RPC connection pool. (integer value) |
| # Deprecated group/name - [DEFAULT]/rpc_conn_pool_size |
| #rpc_conn_pool_size=30 |
| {%- if controller.message_queue.rpc_conn_pool_size is defined %} |
| rpc_conn_pool_size = {{ controller.message_queue.rpc_conn_pool_size }} |
| {%- endif %} |
| |
| # The pool size limit for connections expiration policy (integer value) |
| #conn_pool_min_size=2 |
| {%- if controller.message_queue.conn_pool_min_size is defined %} |
| conn_pool_min_size = {{ controller.message_queue.conn_pool_min_size }} |
| {%- endif %} |
| |
| # The time-to-live in sec of idle connections in the pool (integer value) |
| #conn_pool_ttl=1200 |
| {%- if controller.message_queue.conn_pool_ttl is defined %} |
| conn_pool_ttl = {{ controller.message_queue.conn_pool_ttl }} |
| {%- endif %} |
| |
| # ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP. |
| # The "host" option should point or resolve to this address. (string value) |
| # Deprecated group/name - [DEFAULT]/rpc_zmq_bind_address |
| #rpc_zmq_bind_address=* |
| |
| # MatchMaker driver. (string value) |
| # Allowed values: redis, sentinel, dummy |
| # Deprecated group/name - [DEFAULT]/rpc_zmq_matchmaker |
| #rpc_zmq_matchmaker=redis |
| |
| # Number of ZeroMQ contexts, defaults to 1. (integer value) |
| # Deprecated group/name - [DEFAULT]/rpc_zmq_contexts |
| #rpc_zmq_contexts=1 |
| |
| # Maximum number of ingress messages to locally buffer per topic. Default is |
| # unlimited. (integer value) |
| # Deprecated group/name - [DEFAULT]/rpc_zmq_topic_backlog |
| #rpc_zmq_topic_backlog=<None> |
| |
| # Directory for holding IPC sockets. (string value) |
| # Deprecated group/name - [DEFAULT]/rpc_zmq_ipc_dir |
| #rpc_zmq_ipc_dir=/var/run/openstack |
| |
| # Name of this node. Must be a valid hostname, FQDN, or IP address. Must match |
| # "host" option, if running Nova. (string value) |
| # Deprecated group/name - [DEFAULT]/rpc_zmq_host |
| #rpc_zmq_host=localhost |
| |
| # Number of seconds to wait before all pending messages will be sent after |
| # closing a socket. The default value of -1 specifies an infinite linger period. |
| # The value of 0 specifies no linger period. Pending messages shall be discarded |
| # immediately when the socket is closed. Positive values specify an upper bound |
| # for the linger period. (integer value) |
| # Deprecated group/name - [DEFAULT]/rpc_cast_timeout |
| #zmq_linger=-1 |
| zmq_linger=30 |
| |
| # The default number of seconds that poll should wait. Poll raises timeout |
| # exception when timeout expired. (integer value) |
| # Deprecated group/name - [DEFAULT]/rpc_poll_timeout |
| #rpc_poll_timeout=1 |
| {%- if controller.message_queue.rpc_poll_timeout is defined %} |
| rpc_poll_timeout = {{ controller.message_queue.rpc_poll_timeout }} |
| {%- endif %} |
| |
| |
| # Expiration timeout in seconds of a name service record about existing target ( |
| # < 0 means no timeout). (integer value) |
| # Deprecated group/name - [DEFAULT]/zmq_target_expire |
| #zmq_target_expire=300 |
| |
| # Update period in seconds of a name service record about existing target. |
| # (integer value) |
| # Deprecated group/name - [DEFAULT]/zmq_target_update |
| #zmq_target_update=180 |
| |
| # Use PUB/SUB pattern for fanout methods. PUB/SUB always uses proxy. (boolean |
| # value) |
| # Deprecated group/name - [DEFAULT]/use_pub_sub |
| #use_pub_sub=false |
| |
| # Use ROUTER remote proxy. (boolean value) |
| # Deprecated group/name - [DEFAULT]/use_router_proxy |
| #use_router_proxy=false |
| |
| # This option makes direct connections dynamic or static. It makes sense only |
| # with use_router_proxy=False which means to use direct connections for direct |
| # message types (ignored otherwise). (boolean value) |
| #use_dynamic_connections=false |
| |
| # How many additional connections to a host will be made for failover reasons. |
| # This option is actual only in dynamic connections mode. (integer value) |
| #zmq_failover_connections=2 |
| |
| # Minimal port number for random ports range. (port value) |
| # Minimum value: 0 |
| # Maximum value: 65535 |
| # Deprecated group/name - [DEFAULT]/rpc_zmq_min_port |
| #rpc_zmq_min_port=49153 |
| |
| # Maximal port number for random ports range. (integer value) |
| # Minimum value: 1 |
| # Maximum value: 65536 |
| # Deprecated group/name - [DEFAULT]/rpc_zmq_max_port |
| #rpc_zmq_max_port=65536 |
| |
| # Number of retries to find free port number before fail with ZMQBindError. |
| # (integer value) |
| # Deprecated group/name - [DEFAULT]/rpc_zmq_bind_port_retries |
| #rpc_zmq_bind_port_retries=100 |
| |
| # Default serialization mechanism for serializing/deserializing |
| # outgoing/incoming messages (string value) |
| # Allowed values: json, msgpack |
| # Deprecated group/name - [DEFAULT]/rpc_zmq_serialization |
| #rpc_zmq_serialization=json |
| |
| # This option configures round-robin mode in zmq socket. True means not keeping |
| # a queue when server side disconnects. False means to keep queue and messages |
| # even if server is disconnected, when the server appears we send all |
| # accumulated messages to it. (boolean value) |
| #zmq_immediate=true |
| |
| # Enable/disable TCP keepalive (KA) mechanism. The default value of -1 (or any |
| # other negative value) means to skip any overrides and leave it to OS default; |
| # 0 and 1 (or any other positive value) mean to disable and enable the option |
| # respectively. (integer value) |
| #zmq_tcp_keepalive=-1 |
| |
| # The duration between two keepalive transmissions in idle condition. The unit |
| # is platform dependent, for example, seconds in Linux, milliseconds in Windows |
| # etc. The default value of -1 (or any other negative value and 0) means to skip |
| # any overrides and leave it to OS default. (integer value) |
| #zmq_tcp_keepalive_idle=-1 |
| |
| # The number of retransmissions to be carried out before declaring that remote |
| # end is not available. The default value of -1 (or any other negative value and |
| # 0) means to skip any overrides and leave it to OS default. (integer value) |
| #zmq_tcp_keepalive_cnt=-1 |
| |
| # The duration between two successive keepalive retransmissions, if |
| # acknowledgement to the previous keepalive transmission is not received. The |
| # unit is platform dependent, for example, seconds in Linux, milliseconds in |
| # Windows etc. The default value of -1 (or any other negative value and 0) means |
| # to skip any overrides and leave it to OS default. (integer value) |
| #zmq_tcp_keepalive_intvl=-1 |
| |
| # Maximum number of (green) threads to work concurrently. (integer value) |
| #rpc_thread_pool_size=100 |
| {%- if controller.message_queue.rpc_thread_pool_size is defined %} |
| rpc_thread_pool_size = {{ controller.message_queue.rpc_thread_pool_size }} |
| {%- endif %} |
| |
| # Expiration timeout in seconds of a sent/received message after which it is not |
| # tracked anymore by a client/server. (integer value) |
| #rpc_message_ttl=300 |
| {%- if controller.message_queue.rpc_message_ttl is defined %} |
| rpc_message_ttl = {{ controller.message_queue.rpc_message_ttl }} |
| {%- endif %} |
| |
| # Wait for message acknowledgements from receivers. This mechanism works only |
| # via proxy without PUB/SUB. (boolean value) |
| #rpc_use_acks=false |
| {%- if controller.message_queue.rpc_use_acks is defined %} |
| rpc_use_acks = {{ controller.message_queue.rpc_use_acks }} |
| {%- endif %} |
| |
| |
| # Number of seconds to wait for an ack from a cast/call. After each retry |
| # attempt this timeout is multiplied by some specified multiplier. (integer |
| # value) |
| #rpc_ack_timeout_base=15 |
| {%- if controller.message_queue.rpc_ack_timeout_base is defined %} |
| rpc_ack_timeout_base = {{ controller.message_queue.rpc_ack_timeout_base }} |
| {%- endif %} |
| |
| |
| # Number to multiply base ack timeout by after each retry attempt. (integer |
| # value) |
| #rpc_ack_timeout_multiplier=2 |
| {%- if controller.message_queue.rpc_ack_timeout_multiplier is defined %} |
| rpc_ack_timeout_multiplier = {{ controller.message_queue.rpc_ack_timeout_multiplier }} |
| {%- endif %} |
| |
| |
| # Default number of message sending attempts in case of any problems occurred: |
| # positive value N means at most N retries, 0 means no retries, None or -1 (or |
| # any other negative values) mean to retry forever. This option is used only if |
| # acknowledgments are enabled. (integer value) |
| #rpc_retry_attempts=3 |
| {%- if controller.message_queue.rpc_retry_attempts is defined %} |
| rpc_retry_attempts = {{ controller.message_queue.rpc_retry_attempts }} |
| {%- endif %} |
| |
| |
| # List of publisher hosts SubConsumer can subscribe on. This option has higher |
| # priority then the default publishers list taken from the matchmaker. (list |
| # value) |
| #subscribe_on = |
| |
| # Size of executor thread pool. (integer value) |
| # Deprecated group/name - [DEFAULT]/rpc_thread_pool_size |
| #executor_thread_pool_size=64 |
| {%- if controller.message_queue.executor_thread_pool_size is defined %} |
| executor_thread_pool_size = {{ controller.message_queue.executor_thread_pool_size }} |
| {%- endif %} |
| |
| |
| # Seconds to wait for a response from a call. (integer value) |
| #rpc_response_timeout=60 |
| {%- if controller.message_queue.rpc_response_timeout is defined %} |
| rpc_response_timeout = {{ controller.message_queue.rpc_response_timeout }} |
| {%- endif %} |
| |
| {%- set rabbit_port = controller.message_queue.get('port', 5671 if controller.message_queue.get('ssl',{}).get('enabled', False) else 5672) %} |
| |
| {%- if controller.message_queue.members is defined %} |
| transport_url = rabbit://{% for member in controller.message_queue.members -%} |
| {{ controller.message_queue.user }}:{{ controller.message_queue.password }}@{{ member.host }}:{{ member.get('port', rabbit_port) }} |
| {%- if not loop.last -%},{%- endif -%} |
| {%- endfor -%} |
| /{{ controller.message_queue.virtual_host }} |
| {%- else %} |
| transport_url = rabbit://{{ controller.message_queue.user }}:{{ controller.message_queue.password }}@{{ controller.message_queue.host }}:{{ rabbit_port}}/{{ controller.message_queue.virtual_host }} |
| {%- endif %} |
| |
| |
| # DEPRECATED: The messaging driver to use, defaults to rabbit. Other drivers |
| # include amqp and zmq. (string value) |
| # This option is deprecated for removal. |
| # Its value may be silently ignored in the future. |
| # Reason: Replaced by [DEFAULT]/transport_url |
| #rpc_backend=rabbit |
| rpc_backend=rabbit |
| |
| # The default exchange under which topics are scoped. May be overridden by an |
| # exchange name specified in the transport_url option. (string value) |
| #control_exchange=openstack |
| |
| # |
| # From oslo.service.periodic_task |
| # |
| |
| # Some periodic tasks can be run in a separate process. Should we run them here? |
| # (boolean value) |
| #run_external_periodic_tasks=true |
| |
| # |
| # From oslo.service.service |
| # |
| |
| # Enable eventlet backdoor. Acceptable values are 0, <port>, and <start>:<end>, |
| # where 0 results in listening on a random tcp port number; <port> results in |
| # listening on the specified port number (and not enabling backdoor if that port |
| # is in use); and <start>:<end> results in listening on the smallest unused port |
| # number within the specified range of port numbers. The chosen port is |
| # displayed in the service's log file. (string value) |
| #backdoor_port=<None> |
| |
| # Enable eventlet backdoor, using the provided path as a unix socket that can |
| # receive connections. This option is mutually exclusive with 'backdoor_port' in |
| # that only one should be provided. If both are provided then the existence of |
| # this option overrides the usage of that option. (string value) |
| #backdoor_socket=<None> |
| |
| # Enables or disables logging values of all registered options when starting a |
| # service (at DEBUG level). (boolean value) |
| #log_options=true |
| |
| # Specify a timeout after which a gracefully shutdown server will exit. Zero |
| # value means endless wait. (integer value) |
| #graceful_shutdown_timeout=60 |
| |
| |
| [api] |
| # |
| # Options under this group are used to define Nova API. |
| |
| # |
| # From nova.conf |
| # |
| |
| # |
| # This determines the strategy to use for authentication: keystone or noauth2. |
| # 'noauth2' is designed for testing only, as it does no actual credential |
| # checking. 'noauth2' provides administrative credentials only if 'admin' is |
| # specified as the username. |
| # (string value) |
| # Allowed values: keystone, noauth2 |
| # Deprecated group/name - [DEFAULT]/auth_strategy |
| #auth_strategy=keystone |
| auth_strategy=keystone |
| |
| # |
| # When True, the 'X-Forwarded-For' header is treated as the canonical remote |
| # address. When False (the default), the 'remote_address' header is used. |
| # |
| # You should only enable this if you have an HTML sanitizing proxy. |
| # (boolean value) |
| # Deprecated group/name - [DEFAULT]/use_forwarded_for |
| #use_forwarded_for=false |
| use_forwarded_for=false |
| |
| # |
| # When gathering the existing metadata for a config drive, the EC2-style |
| # metadata is returned for all versions that don't appear in this option. |
| # As of the Liberty release, the available versions are: |
| # |
| # * 1.0 |
| # * 2007-01-19 |
| # * 2007-03-01 |
| # * 2007-08-29 |
| # * 2007-10-10 |
| # * 2007-12-15 |
| # * 2008-02-01 |
| # * 2008-09-01 |
| # * 2009-04-04 |
| # |
| # The option is in the format of a single string, with each version separated |
| # by a space. |
| # |
| # Possible values: |
| # |
| # * Any string that represents zero or more versions, separated by spaces. |
| # (string value) |
| # Deprecated group/name - [DEFAULT]/config_drive_skip_versions |
| #config_drive_skip_versions=1.0 2007-01-19 2007-03-01 2007-08-29 2007-10-10 2007-12-15 2008-02-01 2008-09-01 |
| |
| # |
| # A list of vendordata providers. |
| # |
| # vendordata providers are how deployers can provide metadata via configdrive |
| # and metadata that is specific to their deployment. There are currently two |
| # supported providers: StaticJSON and DynamicJSON. |
| # |
| # StaticJSON reads a JSON file configured by the flag vendordata_jsonfile_path |
| # and places the JSON from that file into vendor_data.json and |
| # vendor_data2.json. |
| # |
| # DynamicJSON is configured via the vendordata_dynamic_targets flag, which is |
| # documented separately. For each of the endpoints specified in that flag, a |
| # section is added to the vendor_data2.json. |
| # |
| # For more information on the requirements for implementing a vendordata |
| # dynamic endpoint, please see the vendordata.rst file in the nova developer |
| # reference. |
| # |
| # Possible values: |
| # |
| # * A list of vendordata providers, with StaticJSON and DynamicJSON being |
| # current options. |
| # |
| # Related options: |
| # |
| # * vendordata_dynamic_targets |
| # * vendordata_dynamic_ssl_certfile |
| # * vendordata_dynamic_connect_timeout |
| # * vendordata_dynamic_read_timeout |
| # * vendordata_dynamic_failure_fatal |
| # (list value) |
| # Deprecated group/name - [DEFAULT]/vendordata_providers |
| #vendordata_providers = |
| |
| # |
| # A list of targets for the dynamic vendordata provider. These targets are of |
| # the form <name>@<url>. |
| # |
| # The dynamic vendordata provider collects metadata by contacting external REST |
| # services and querying them for information about the instance. This behaviour |
| # is documented in the vendordata.rst file in the nova developer reference. |
| # (list value) |
| # Deprecated group/name - [DEFAULT]/vendordata_dynamic_targets |
| #vendordata_dynamic_targets = |
| |
| # |
| # Path to an optional certificate file or CA bundle to verify dynamic |
| # vendordata REST services ssl certificates against. |
| # |
| # Possible values: |
| # |
| # * An empty string, or a path to a valid certificate file |
| # |
| # Related options: |
| # |
| # * vendordata_providers |
| # * vendordata_dynamic_targets |
| # * vendordata_dynamic_connect_timeout |
| # * vendordata_dynamic_read_timeout |
| # * vendordata_dynamic_failure_fatal |
| # (string value) |
| # Deprecated group/name - [DEFAULT]/vendordata_dynamic_ssl_certfile |
| #vendordata_dynamic_ssl_certfile = |
| |
| # |
| # Maximum wait time for an external REST service to connect. |
| # |
| # Possible values: |
| # |
| # * Any integer with a value greater than three (the TCP packet retransmission |
| # timeout). Note that instance start may be blocked during this wait time, |
| # so this value should be kept small. |
| # |
| # Related options: |
| # |
| # * vendordata_providers |
| # * vendordata_dynamic_targets |
| # * vendordata_dynamic_ssl_certfile |
| # * vendordata_dynamic_read_timeout |
| # * vendordata_dynamic_failure_fatal |
| # (integer value) |
| # Minimum value: 3 |
| # Deprecated group/name - [DEFAULT]/vendordata_dynamic_connect_timeout |
| #vendordata_dynamic_connect_timeout=5 |
| |
| # |
| # Maximum wait time for an external REST service to return data once connected. |
| # |
| # Possible values: |
| # |
| # * Any integer. Note that instance start is blocked during this wait time, |
| # so this value should be kept small. |
| # |
| # Related options: |
| # |
| # * vendordata_providers |
| # * vendordata_dynamic_targets |
| # * vendordata_dynamic_ssl_certfile |
| # * vendordata_dynamic_connect_timeout |
| # * vendordata_dynamic_failure_fatal |
| # (integer value) |
| # Minimum value: 0 |
| # Deprecated group/name - [DEFAULT]/vendordata_dynamic_read_timeout |
| #vendordata_dynamic_read_timeout=5 |
| |
| # |
| # Should failures to fetch dynamic vendordata be fatal to instance boot? |
| # |
| # Related options: |
| # |
| # * vendordata_providers |
| # * vendordata_dynamic_targets |
| # * vendordata_dynamic_ssl_certfile |
| # * vendordata_dynamic_connect_timeout |
| # * vendordata_dynamic_read_timeout |
| # (boolean value) |
| #vendordata_dynamic_failure_fatal=false |
| |
| # |
| # This option is the time (in seconds) to cache metadata. When set to 0, |
| # metadata caching is disabled entirely; this is generally not recommended for |
| # performance reasons. Increasing this setting should improve response times |
| # of the metadata API when under heavy load. Higher values may increase memory |
| # usage, and result in longer times for host metadata changes to take effect. |
| # (integer value) |
| # Minimum value: 0 |
| # Deprecated group/name - [DEFAULT]/metadata_cache_expiration |
| #metadata_cache_expiration=15 |
| |
| # |
| # Cloud providers may store custom data in vendor data file that will then be |
| # available to the instances via the metadata service, and to the rendering of |
| # config-drive. The default class for this, JsonFileVendorData, loads this |
| # information from a JSON file, whose path is configured by this option. If |
| # there is no path set by this option, the class returns an empty dictionary. |
| # |
| # Possible values: |
| # |
| # * Any string representing the path to the data file, or an empty string |
| # (default). |
| # (string value) |
| # Deprecated group/name - [DEFAULT]/vendordata_jsonfile_path |
| #vendordata_jsonfile_path=<None> |
| |
| # |
| # As a query can potentially return many thousands of items, you can limit the |
| # maximum number of items in a single response by setting this option. |
| # (integer value) |
| # Minimum value: 0 |
| # Deprecated group/name - [DEFAULT]/osapi_max_limit |
| #max_limit=1000 |
| max_limit={{ controller.osapi_max_limit|default('1000') }} |
| |
| # |
| # This string is prepended to the normal URL that is returned in links to the |
| # OpenStack Compute API. If it is empty (the default), the URLs are returned |
| # unchanged. |
| # |
| # Possible values: |
| # |
| # * Any string, including an empty string (the default). |
| # (string value) |
| # Deprecated group/name - [DEFAULT]/osapi_compute_link_prefix |
| #compute_link_prefix=<None> |
| |
| # |
| # This string is prepended to the normal URL that is returned in links to |
| # Glance resources. If it is empty (the default), the URLs are returned |
| # unchanged. |
| # |
| # Possible values: |
| # |
| # * Any string, including an empty string (the default). |
| # (string value) |
| # Deprecated group/name - [DEFAULT]/osapi_glance_link_prefix |
| #glance_link_prefix=<None> |
| |
| # |
| # Operators can turn off the ability for a user to take snapshots of their |
| # instances by setting this option to False. When disabled, any attempt to |
| # take a snapshot will result in a HTTP 400 response ("Bad Request"). |
| # (boolean value) |
| # Deprecated group/name - [DEFAULT]/allow_instance_snapshots |
| #allow_instance_snapshots=true |
| |
| # |
| # This option is a list of all instance states for which network address |
| # information should not be returned from the API. |
| # |
| # Possible values: |
| # |
| # A list of strings, where each string is a valid VM state, as defined in |
| # nova/compute/vm_states.py. As of the Newton release, they are: |
| # |
| # * "active" |
| # * "building" |
| # * "paused" |
| # * "suspended" |
| # * "stopped" |
| # * "rescued" |
| # * "resized" |
| # * "soft-delete" |
| # * "deleted" |
| # * "error" |
| # * "shelved" |
| # * "shelved_offloaded" |
| # (list value) |
| # Deprecated group/name - [DEFAULT]/osapi_hide_server_address_states |
| #hide_server_address_states=building |
| |
| # The full path to the fping binary. (string value) |
| # Deprecated group/name - [DEFAULT]/fping_path |
| #fping_path=/usr/sbin/fping |
| fping_path=/usr/sbin/fping |
| |
| # |
| # When True, the TenantNetworkController will query the Neutron API to get the |
| # default networks to use. |
| # |
| # Related options: |
| # |
| # * neutron_default_tenant_id |
| # (boolean value) |
| # Deprecated group/name - [DEFAULT]/use_neutron_default_nets |
| #use_neutron_default_nets=false |
| |
| # |
| # Tenant ID for getting the default network from Neutron API (also referred in |
| # some places as the 'project ID') to use. |
| # |
| # Related options: |
| # |
| # * use_neutron_default_nets |
| # (string value) |
| # Deprecated group/name - [DEFAULT]/neutron_default_tenant_id |
| #neutron_default_tenant_id=default |
| |
| # |
| # Enables returning of the instance password by the relevant server API calls |
| # such as create, rebuild, evacuate, or rescue. If the hypervisor does not |
| # support password injection, then the password returned will not be correct, |
| # so if your hypervisor does not support password injection, set this to False. |
| # (boolean value) |
| # Deprecated group/name - [DEFAULT]/enable_instance_password |
| #enable_instance_password=true |
| |
| |
| [api_database] |
| # |
| # The *Nova API Database* is a separate database which is used for information |
| # which is used across *cells*. This database is mandatory since the Mitaka |
| # release (13.0.0). |
| |
| # |
| # From nova.conf |
| # |
| idle_timeout = {{ controller.database.get('idle_timeout', 180) }} |
| min_pool_size = {{ controller.database.get('min_pool_size', 100) }} |
| max_pool_size = {{ controller.database.get('max_pool_size', 700) }} |
| max_overflow = {{ controller.database.get('max_overflow', 100) }} |
| retry_interval = {{ controller.database.get('retry_interval', 5) }} |
| max_retries = {{ controller.database.get('max_retries', '-1') }} |
| db_max_retries = {{ controller.database.get('db_max_retries', 3) }} |
| db_retry_interval = {{ controller.database.get('db_retry_interval', 1) }} |
| connection_debug = {{ controller.database.get('connection_debug', 10) }} |
| pool_timeout = {{ controller.database.get('pool_timeout', 120) }} |
| connection = {{ controller.database.engine }}+pymysql://{{ controller.database.user }}:{{ controller.database.password }}@{{ controller.database.host }}/{{ controller.database.name }}_api?charset=utf8{{ connection_x509_ssl_option|string }} |
| |
| # The SQLAlchemy connection string to use to connect to the database. (string |
| # value) |
| #connection=sqlite:////var/lib/nova/nova.sqlite |
| |
| # If True, SQLite uses synchronous mode. (boolean value) |
| #sqlite_synchronous=true |
| |
| # The SQLAlchemy connection string to use to connect to the slave database. |
| # (string value) |
| #slave_connection=<None> |
| |
| # The SQL mode to be used for MySQL sessions. This option, including the |
| # default, overrides any server-set SQL mode. To use whatever SQL mode is set by |
| # the server configuration, set this to no value. Example: mysql_sql_mode= |
| # (string value) |
| #mysql_sql_mode=TRADITIONAL |
| |
| # Timeout before idle SQL connections are reaped. (integer value) |
| #idle_timeout=3600 |
| |
| # Maximum number of SQL connections to keep open in a pool. Setting a value of 0 |
| # indicates no limit. (integer value) |
| #max_pool_size=<None> |
| |
| # Maximum number of database connection retries during startup. Set to -1 to |
| # specify an infinite retry count. (integer value) |
| #max_retries=10 |
| |
| # Interval between retries of opening a SQL connection. (integer value) |
| #retry_interval=10 |
| |
| # If set, use this value for max_overflow with SQLAlchemy. (integer value) |
| #max_overflow=<None> |
| |
| # Verbosity of SQL debugging information: 0=None, 100=Everything. (integer |
| # value) |
| #connection_debug=0 |
| |
| # Add Python stack traces to SQL as comment strings. (boolean value) |
| #connection_trace=false |
| |
| # If set, use this value for pool_timeout with SQLAlchemy. (integer value) |
| #pool_timeout=<None> |
| |
| |
| [barbican] |
| |
| # |
| # From nova.conf |
| # |
| |
| # Use this endpoint to connect to Barbican, for example: |
| # "http://localhost:9311/" (string value) |
| #barbican_endpoint=<None> |
| |
| # Version of the Barbican API, for example: "v1" (string value) |
| #barbican_api_version=<None> |
| |
| # Use this endpoint to connect to Keystone (string value) |
| {%- if controller.get('barbican', {}).get('enabled', False) %} |
| auth_endpoint={{ controller.identity.get('protocol', 'http') }}://{{ controller.identity.get('host', 'localhost') }}:{{ controller.identity.get('port', '5000') }}/v3 |
| {%- if controller.identity.get('protocol', 'http') == 'https' %} |
| cafile={{ controller.identity.get('cacert_file', controller.cacert_file) }} |
| {%- endif %} |
| {%- endif %} |
| |
| # Number of seconds to wait before retrying poll for key creation completion |
| # (integer value) |
| #retry_delay=1 |
| |
| # Number of times to retry poll for key creation completion (integer value) |
| #number_of_retries=60 |
| |
| |
| [cache] |
| |
| # |
| # From nova.conf |
| # |
| {%- if controller.cache is defined %} |
| enabled = true |
| backend = oslo_cache.memcache_pool |
| memcache_servers={%- for member in controller.cache.members %}{{ member.host }}:11211{% if not loop.last %},{% endif %}{%- endfor %} |
| {%- endif %} |
| # Prefix for building the configuration dictionary for the cache region. This |
| # should not need to be changed unless there is another dogpile.cache region |
| # with the same configuration name. (string value) |
| #config_prefix=cache.oslo |
| |
| # Default TTL, in seconds, for any cached item in the dogpile.cache region. This |
| # applies to any cached method that doesn't have an explicit cache expiration |
| # time defined for it. (integer value) |
| #expiration_time=600 |
| |
| # Dogpile.cache backend module. It is recommended that Memcache or Redis |
| # (dogpile.cache.redis) be used in production deployments. For eventlet-based or |
| # highly threaded servers, Memcache with pooling (oslo_cache.memcache_pool) is |
| # recommended. For low thread servers, dogpile.cache.memcached is recommended. |
| # Test environments with a single instance of the server can use the |
| # dogpile.cache.memory backend. (string value) |
| #backend=dogpile.cache.null |
| |
| # Arguments supplied to the backend module. Specify this option once per |
| # argument to be passed to the dogpile.cache backend. Example format: |
| # "<argname>:<value>". (multi valued) |
| #backend_argument = |
| |
| # Proxy classes to import that will affect the way the dogpile.cache backend |
| # functions. See the dogpile.cache documentation on changing-backend-behavior. |
| # (list value) |
| #proxies = |
| |
| # Global toggle for caching. (boolean value) |
| #enabled=false |
| |
| # Extra debugging from the cache backend (cache keys, get/set/delete/etc calls). |
| # This is only really useful if you need to see the specific cache-backend |
| # get/set/delete calls with the keys/values. Typically this should be left set |
| # to false. (boolean value) |
| #debug_cache_backend=false |
| |
| # Memcache servers in the format of "host:port". (dogpile.cache.memcache and |
| # oslo_cache.memcache_pool backends only). (list value) |
| #memcache_servers=localhost:11211 |
| |
| # Number of seconds memcached server is considered dead before it is tried |
| # again. (dogpile.cache.memcache and oslo_cache.memcache_pool backends only). |
| # (integer value) |
| #memcache_dead_retry=300 |
| |
| # Timeout in seconds for every call to a server. (dogpile.cache.memcache and |
| # oslo_cache.memcache_pool backends only). (integer value) |
| #memcache_socket_timeout=3 |
| |
| # Max total number of open connections to every memcached server. |
| # (oslo_cache.memcache_pool backend only). (integer value) |
| #memcache_pool_maxsize=10 |
| |
| # Number of seconds a connection to memcached is held unused in the pool before |
| # it is closed. (oslo_cache.memcache_pool backend only). (integer value) |
| #memcache_pool_unused_timeout=60 |
| |
| # Number of seconds that an operation will wait to get a memcache client |
| # connection. (integer value) |
| #memcache_pool_connection_get_timeout=10 |
| |
| |
| [cells] |
| # |
| # Cells options allow you to use cells functionality in openstack |
| # deployment. |
| # |
| # Note that the options in this group are only for cells v1 functionality, which |
| # is considered experimental and not recommended for new deployments. Cells v1 |
| # is being replaced with cells v2, which starting in the 15.0.0 Ocata release is |
| # required and all Nova deployments will be at least a cells v2 cell of one. |
| # |
| |
| # |
| # From nova.conf |
| # |
| |
| # DEPRECATED: |
| # Topic. |
| # |
| # This is the message queue topic that cells nodes listen on. It is |
| # used when the cells service is started up to configure the queue, |
| # and whenever an RPC call to the scheduler is made. |
| # |
| # Possible values: |
| # |
| # * cells: This is the recommended and the default value. |
| # (string value) |
| # This option is deprecated for removal since 15.0.0. |
| # Its value may be silently ignored in the future. |
| # Reason: |
| # Configurable RPC topics provide little value and can result in a wide variety |
| # of errors. They should not be used. |
| #topic=cells |
| |
| # |
| # Enable cell v1 functionality. |
| # |
| # Note that cells v1 is considered experimental and not recommended for new |
| # Nova deployments. Cells v1 is being replaced by cells v2 which starting in |
| # the 15.0.0 Ocata release, all Nova deployments are at least a cells v2 cell |
| # of one. Setting this option, or any other options in the [cells] group, is |
| # not required for cells v2. |
| # |
| # When this functionality is enabled, it lets you to scale an OpenStack |
| # Compute cloud in a more distributed fashion without having to use |
| # complicated technologies like database and message queue clustering. |
| # Cells are configured as a tree. The top-level cell should have a host |
| # that runs a nova-api service, but no nova-compute services. Each |
| # child cell should run all of the typical nova-* services in a regular |
| # Compute cloud except for nova-api. You can think of cells as a normal |
| # Compute deployment in that each cell has its own database server and |
| # message queue broker. |
| # |
| # Related options: |
| # |
| # * name: A unique cell name must be given when this functionality |
| # is enabled. |
| # * cell_type: Cell type should be defined for all cells. |
| # (boolean value) |
| enable=False |
| |
| # |
| # Name of the current cell. |
| # |
| # This value must be unique for each cell. Name of a cell is used as |
| # its id, leaving this option unset or setting the same name for |
| # two or more cells may cause unexpected behaviour. |
| # |
| # Related options: |
| # |
| # * enabled: This option is meaningful only when cells service |
| # is enabled |
| # (string value) |
| #name=nova |
| |
| # |
| # Cell capabilities. |
| # |
| # List of arbitrary key=value pairs defining capabilities of the |
| # current cell to be sent to the parent cells. These capabilities |
| # are intended to be used in cells scheduler filters/weighers. |
| # |
| # Possible values: |
| # |
| # * key=value pairs list for example; |
| # ``hypervisor=xenserver;kvm,os=linux;windows`` |
| # (list value) |
| #capabilities=hypervisor=xenserver;kvm,os=linux;windows |
| |
| # |
| # Call timeout. |
| # |
| # Cell messaging module waits for response(s) to be put into the |
| # eventlet queue. This option defines the seconds waited for |
| # response from a call to a cell. |
| # |
| # Possible values: |
| # |
| # * An integer, corresponding to the interval time in seconds. |
| # (integer value) |
| # Minimum value: 0 |
| #call_timeout=60 |
| |
| # |
| # Reserve percentage |
| # |
| # Percentage of cell capacity to hold in reserve, so the minimum |
| # amount of free resource is considered to be; |
| # |
| # min_free = total * (reserve_percent / 100.0) |
| # |
| # This option affects both memory and disk utilization. |
| # |
| # The primary purpose of this reserve is to ensure some space is |
| # available for users who want to resize their instance to be larger. |
| # Note that currently once the capacity expands into this reserve |
| # space this option is ignored. |
| # |
| # Possible values: |
| # |
| # * An integer or float, corresponding to the percentage of cell capacity to |
| # be held in reserve. |
| # (floating point value) |
| #reserve_percent=10.0 |
| |
| # |
| # Type of cell. |
| # |
| # When cells feature is enabled the hosts in the OpenStack Compute |
| # cloud are partitioned into groups. Cells are configured as a tree. |
| # The top-level cell's cell_type must be set to ``api``. All other |
| # cells are defined as a ``compute cell`` by default. |
| # |
| # Related option: |
| # |
| # * quota_driver: Disable quota checking for the child cells. |
| # (nova.quota.NoopQuotaDriver) |
| # (string value) |
| # Allowed values: api, compute |
| #cell_type=compute |
| |
| # |
| # Mute child interval. |
| # |
| # Number of seconds after which a lack of capability and capacity |
| # update the child cell is to be treated as a mute cell. Then the |
| # child cell will be weighed as recommend highly that it be skipped. |
| # |
| # Possible values: |
| # |
| # * An integer, corresponding to the interval time in seconds. |
| # (integer value) |
| #mute_child_interval=300 |
| |
| # |
| # Bandwidth update interval. |
| # |
| # Seconds between bandwidth usage cache updates for cells. |
| # |
| # Possible values: |
| # |
| # * An integer, corresponding to the interval time in seconds. |
| # (integer value) |
| #bandwidth_update_interval=600 |
| |
| # |
| # Instance update sync database limit. |
| # |
| # Number of instances to pull from the database at one time for |
| # a sync. If there are more instances to update the results will |
| # be paged through. |
| # |
| # Possible values: |
| # |
| # * An integer, corresponding to a number of instances. |
| # (integer value) |
| #instance_update_sync_database_limit=100 |
| |
| # |
| # Mute weight multiplier. |
| # |
| # Multiplier used to weigh mute children. Mute children cells are |
| # recommended to be skipped so their weight is multiplied by this |
| # negative value. |
| # |
| # Possible values: |
| # |
| # * Negative numeric number |
| # (floating point value) |
| #mute_weight_multiplier=-10000.0 |
| |
| # |
| # Ram weight multiplier. |
| # |
| # Multiplier used for weighing ram. Negative numbers indicate that |
| # Compute should stack VMs on one host instead of spreading out new |
| # VMs to more hosts in the cell. |
| # |
| # Possible values: |
| # |
| # * Numeric multiplier |
| # (floating point value) |
| #ram_weight_multiplier=10.0 |
| |
| # |
| # Offset weight multiplier |
| # |
| # Multiplier used to weigh offset weigher. Cells with higher |
| # weight_offsets in the DB will be preferred. The weight_offset |
| # is a property of a cell stored in the database. It can be used |
| # by a deployer to have scheduling decisions favor or disfavor |
| # cells based on the setting. |
| # |
| # Possible values: |
| # |
| # * Numeric multiplier |
| # (floating point value) |
| #offset_weight_multiplier=1.0 |
| |
| # |
| # Instance updated at threshold |
| # |
| # Number of seconds after an instance was updated or deleted to |
| # continue to update cells. This option lets cells manager to only |
| # attempt to sync instances that have been updated recently. |
| # i.e., a threshold of 3600 means to only update instances that |
| # have modified in the last hour. |
| # |
| # Possible values: |
| # |
| # * Threshold in seconds |
| # |
| # Related options: |
| # |
| # * This value is used with the ``instance_update_num_instances`` |
| # value in a periodic task run. |
| # (integer value) |
| #instance_updated_at_threshold=3600 |
| |
| # |
| # Instance update num instances |
| # |
| # On every run of the periodic task, nova cells manager will attempt to |
| # sync instance_updated_at_threshold number of instances. When the |
| # manager gets the list of instances, it shuffles them so that multiple |
| # nova-cells services do not attempt to sync the same instances in |
| # lockstep. |
| # |
| # Possible values: |
| # |
| # * Positive integer number |
| # |
| # Related options: |
| # |
| # * This value is used with the ``instance_updated_at_threshold`` |
| # value in a periodic task run. |
| # (integer value) |
| #instance_update_num_instances=1 |
| |
| # |
| # Maximum hop count |
| # |
| # When processing a targeted message, if the local cell is not the |
| # target, a route is defined between neighbouring cells. And the |
| # message is processed across the whole routing path. This option |
| # defines the maximum hop counts until reaching the target. |
| # |
| # Possible values: |
| # |
| # * Positive integer value |
| # (integer value) |
| #max_hop_count=10 |
| |
| # |
| # Cells scheduler. |
| # |
| # The class of the driver used by the cells scheduler. This should be |
| # the full Python path to the class to be used. If nothing is specified |
| # in this option, the CellsScheduler is used. |
| # (string value) |
| #scheduler=nova.cells.scheduler.CellsScheduler |
| |
| # |
| # RPC driver queue base. |
| # |
| # When sending a message to another cell by JSON-ifying the message |
| # and making an RPC cast to 'process_message', a base queue is used. |
| # This option defines the base queue name to be used when communicating |
| # between cells. Various topics by message type will be appended to this. |
| # |
| # Possible values: |
| # |
| # * The base queue name to be used when communicating between cells. |
| # (string value) |
| #rpc_driver_queue_base=cells.intercell |
| |
| # |
| # Scheduler filter classes. |
| # |
| # Filter classes the cells scheduler should use. An entry of |
| # "nova.cells.filters.all_filters" maps to all cells filters |
| # included with nova. As of the Mitaka release the following |
| # filter classes are available: |
| # |
| # Different cell filter: A scheduler hint of 'different_cell' |
| # with a value of a full cell name may be specified to route |
| # a build away from a particular cell. |
| # |
| # Image properties filter: Image metadata named |
| # 'hypervisor_version_requires' with a version specification |
| # may be specified to ensure the build goes to a cell which |
| # has hypervisors of the required version. If either the version |
| # requirement on the image or the hypervisor capability of the |
| # cell is not present, this filter returns without filtering out |
| # the cells. |
| # |
| # Target cell filter: A scheduler hint of 'target_cell' with a |
| # value of a full cell name may be specified to route a build to |
| # a particular cell. No error handling is done as there's no way |
| # to know whether the full path is a valid. |
| # |
| # As an admin user, you can also add a filter that directs builds |
| # to a particular cell. |
| # |
| # (list value) |
| #scheduler_filter_classes=nova.cells.filters.all_filters |
| |
| # |
| # Scheduler weight classes. |
| # |
| # Weigher classes the cells scheduler should use. An entry of |
| # "nova.cells.weights.all_weighers" maps to all cell weighers |
| # included with nova. As of the Mitaka release the following |
| # weight classes are available: |
| # |
| # mute_child: Downgrades the likelihood of child cells being |
| # chosen for scheduling requests, which haven't sent capacity |
| # or capability updates in a while. Options include |
| # mute_weight_multiplier (multiplier for mute children; value |
| # should be negative). |
| # |
| # ram_by_instance_type: Select cells with the most RAM capacity |
| # for the instance type being requested. Because higher weights |
| # win, Compute returns the number of available units for the |
| # instance type requested. The ram_weight_multiplier option defaults |
| # to 10.0 that adds to the weight by a factor of 10. Use a negative |
| # number to stack VMs on one host instead of spreading out new VMs |
| # to more hosts in the cell. |
| # |
| # weight_offset: Allows modifying the database to weight a particular |
| # cell. The highest weight will be the first cell to be scheduled for |
| # launching an instance. When the weight_offset of a cell is set to 0, |
| # it is unlikely to be picked but it could be picked if other cells |
| # have a lower weight, like if they're full. And when the weight_offset |
| # is set to a very high value (for example, '999999999999999'), it is |
| # likely to be picked if another cell do not have a higher weight. |
| # (list value) |
| #scheduler_weight_classes=nova.cells.weights.all_weighers |
| |
| # |
| # Scheduler retries. |
| # |
| # How many retries when no cells are available. Specifies how many |
| # times the scheduler tries to launch a new instance when no cells |
| # are available. |
| # |
| # Possible values: |
| # |
| # * Positive integer value |
| # |
| # Related options: |
| # |
| # * This value is used with the ``scheduler_retry_delay`` value |
| # while retrying to find a suitable cell. |
| # (integer value) |
| #scheduler_retries=10 |
| |
| # |
| # Scheduler retry delay. |
| # |
| # Specifies the delay (in seconds) between scheduling retries when no |
| # cell can be found to place the new instance on. When the instance |
| # could not be scheduled to a cell after ``scheduler_retries`` in |
| # combination with ``scheduler_retry_delay``, then the scheduling |
| # of the instance failed. |
| # |
| # Possible values: |
| # |
| # * Time in seconds. |
| # |
| # Related options: |
| # |
| # * This value is used with the ``scheduler_retries`` value |
| # while retrying to find a suitable cell. |
| # (integer value) |
| #scheduler_retry_delay=2 |
| |
| # |
| # DB check interval. |
| # |
| # Cell state manager updates cell status for all cells from the DB |
| # only after this particular interval time is passed. Otherwise cached |
| # status are used. If this value is 0 or negative all cell status are |
| # updated from the DB whenever a state is needed. |
| # |
| # Possible values: |
| # |
| # * Interval time, in seconds. |
| # |
| # (integer value) |
| #db_check_interval=60 |
| |
| # |
| # Optional cells configuration. |
| # |
| # Configuration file from which to read cells configuration. If given, |
| # overrides reading cells from the database. |
| # |
| # Cells store all inter-cell communication data, including user names |
| # and passwords, in the database. Because the cells data is not updated |
| # very frequently, use this option to specify a JSON file to store |
| # cells data. With this configuration, the database is no longer |
| # consulted when reloading the cells data. The file must have columns |
| # present in the Cell model (excluding common database fields and the |
| # id column). You must specify the queue connection information through |
| # a transport_url field, instead of username, password, and so on. |
| # |
| # The transport_url has the following form: |
| # rabbit://USERNAME:PASSWORD@HOSTNAME:PORT/VIRTUAL_HOST |
| # |
| # Possible values: |
| # |
| # The scheme can be either qpid or rabbit, the following sample shows |
| # this optional configuration: |
| # |
| # { |
| # "parent": { |
| # "name": "parent", |
| # "api_url": "http://api.example.com:8774", |
| # "transport_url": "rabbit://rabbit.example.com", |
| # "weight_offset": 0.0, |
| # "weight_scale": 1.0, |
| # "is_parent": true |
| # }, |
| # "cell1": { |
| # "name": "cell1", |
| # "api_url": "http://api.example.com:8774", |
| # "transport_url": "rabbit://rabbit1.example.com", |
| # "weight_offset": 0.0, |
| # "weight_scale": 1.0, |
| # "is_parent": false |
| # }, |
| # "cell2": { |
| # "name": "cell2", |
| # "api_url": "http://api.example.com:8774", |
| # "transport_url": "rabbit://rabbit2.example.com", |
| # "weight_offset": 0.0, |
| # "weight_scale": 1.0, |
| # "is_parent": false |
| # } |
| # } |
| # |
| # (string value) |
| #cells_config=<None> |
| |
| |
| [cinder] |
| |
| # |
| # From nova.conf |
| # |
| |
| # |
| # Info to match when looking for cinder in the service catalog. |
| # |
| # Possible values: |
| # |
| # * Format is separated values of the form: |
| # <service_type>:<service_name>:<endpoint_type> |
| # |
| # Note: Nova does not support the Cinder v1 API since the Nova 15.0.0 Ocata |
| # release. |
| # |
| # Related options: |
| # |
| # * endpoint_template - Setting this option will override catalog_info |
| # (string value) |
| #catalog_info=volumev2:cinderv2:publicURL |
| catalog_info=volumev2:cinderv2:internalURL |
| {%- if controller.glance.get('protocol', 'http') == 'https' %} |
| cafile={{ controller.identity.get('cacert_file', controller.cacert_file) }} |
| {%- endif %} |
| # |
| # If this option is set then it will override service catalog lookup with |
| # this template for cinder endpoint |
| # |
| # Possible values: |
| # |
| # * URL for cinder endpoint API |
| # e.g. http://localhost:8776/v2/%(project_id)s |
| # |
| # Note: Nova does not support the Cinder v1 API since the Nova 15.0.0 Ocata |
| # release. |
| # |
| # Related options: |
| # |
| # * catalog_info - If endpoint_template is not set, catalog_info will be used. |
| # (string value) |
| #endpoint_template=<None> |
| |
| # |
| # Region name of this node. This is used when picking the URL in the service |
| # catalog. |
| # |
| # Possible values: |
| # |
| # * Any string representing region name |
| # (string value) |
| #os_region_name=<None> |
| os_region_name = {{ controller.identity.region }} |
| |
| # |
| # Number of times cinderclient should retry on any failed http call. |
| # 0 means connection is attempted only once. Setting it to any positive integer |
| # means that on failure connection is retried that many times e.g. setting it |
| # to 3 means total attempts to connect will be 4. |
| # |
| # Possible values: |
| # |
| # * Any integer value. 0 means connection is attempted only once |
| # (integer value) |
| # Minimum value: 0 |
| #http_retries=3 |
| |
| # |
| # Allow attach between instance and volume in different availability zones. |
| # |
| # If False, volumes attached to an instance must be in the same availability |
| # zone in Cinder as the instance availability zone in Nova. |
| # This also means care should be taken when booting an instance from a volume |
| # where source is not "volume" because Nova will attempt to create a volume |
| # using |
| # the same availability zone as what is assigned to the instance. |
| # If that AZ is not in Cinder (or allow_availability_zone_fallback=False in |
| # cinder.conf), the volume create request will fail and the instance will fail |
| # the build request. |
| # By default there is no availability zone restriction on volume attach. |
| # (boolean value) |
| #cross_az_attach=true |
| {%- if controller.cross_az_attach is defined %} |
| cross_az_attach={{ controller.cross_az_attach }} |
| {%- endif %} |
| |
| [cloudpipe] |
| |
| # |
| # From nova.conf |
| # |
| |
| # |
| # Image ID used when starting up a cloudpipe VPN client. |
| # |
| # An empty instance is created and configured with OpenVPN using |
| # boot_script_template. This instance would be snapshotted and stored |
| # in glance. ID of the stored image is used in 'vpn_image_id' to |
| # create cloudpipe VPN client. |
| # |
| # Possible values: |
| # |
| # * Any valid ID of a VPN image |
| # (string value) |
| # Deprecated group/name - [DEFAULT]/vpn_image_id |
| #vpn_image_id=0 |
| |
| # |
| # Flavor for VPN instances. |
| # |
| # Possible values: |
| # |
| # * Any valid flavor name |
| # (string value) |
| # Deprecated group/name - [DEFAULT]/vpn_flavor |
| #vpn_flavor=m1.tiny |
| |
| # |
| # Template for cloudpipe instance boot script. |
| # |
| # Possible values: |
| # |
| # * Any valid path to a cloudpipe instance boot script template |
| # |
| # Related options: |
| # |
| # The following options are required to configure cloudpipe-managed |
| # OpenVPN server. |
| # |
| # * dmz_net |
| # * dmz_mask |
| # * cnt_vpn_clients |
| # (string value) |
| # Deprecated group/name - [DEFAULT]/boot_script_template |
| #boot_script_template=$pybasedir/nova/cloudpipe/bootscript.template |
| |
| # |
| # Network to push into OpenVPN config. |
| # |
| # Note: Above mentioned OpenVPN config can be found at |
| # /etc/openvpn/server.conf. |
| # |
| # Possible values: |
| # |
| # * Any valid IPv4/IPV6 address |
| # |
| # Related options: |
| # |
| # * boot_script_template - dmz_net is pushed into bootscript.template |
| # to configure cloudpipe-managed OpenVPN server |
| # (IP address value) |
| # Deprecated group/name - [DEFAULT]/dmz_net |
| #dmz_net=10.0.0.0 |
| |
| # |
| # Netmask to push into OpenVPN config. |
| # |
| # Possible values: |
| # |
| # * Any valid IPv4/IPV6 netmask |
| # |
| # Related options: |
| # |
| # * dmz_net - dmz_net and dmz_mask is pushed into bootscript.template |
| # to configure cloudpipe-managed OpenVPN server |
| # * boot_script_template |
| # (IP address value) |
| # Deprecated group/name - [DEFAULT]/dmz_mask |
| #dmz_mask=255.255.255.0 |
| |
| # |
| # Suffix to add to project name for VPN key and secgroups |
| # |
| # Possible values: |
| # |
| # * Any string value representing the VPN key suffix |
| # (string value) |
| # Deprecated group/name - [DEFAULT]/vpn_key_suffix |
| #vpn_key_suffix=-vpn |
| |
| |
| [conductor] |
| # |
| # Options under this group are used to define Conductor's communication, |
| # which manager should be act as a proxy between computes and database, |
| # and finally, how many worker processes will be used. |
| |
| # |
| # From nova.conf |
| # |
| |
| # DEPRECATED: |
| # Topic exchange name on which conductor nodes listen. |
| # (string value) |
| # This option is deprecated for removal since 15.0.0. |
| # Its value may be silently ignored in the future. |
| # Reason: |
| # There is no need to let users choose the RPC topic for all services - there |
| # is little gain from this. Furthermore, it makes it really easy to break Nova |
| # by using this option. |
| #topic=conductor |
| |
| # |
| # Number of workers for OpenStack Conductor service. The default will be the |
| # number of CPUs available. |
| # (integer value) |
| #workers=<None> |
| workers = {{ controller.get('conductor', {}).get('workers', controller.workers) }} |
| |
| [console] |
| # |
| # Options under this group allow to tune the configuration of the console proxy |
| # service. |
| # |
| # Note: in configuration of every compute is a ``console_host`` option, |
| # which allows to select the console proxy service to connect to. |
| |
| # |
| # From nova.conf |
| # |
| |
| # |
| # Adds list of allowed origins to the console websocket proxy to allow |
| # connections from other origin hostnames. |
| # Websocket proxy matches the host header with the origin header to |
| # prevent cross-site requests. This list specifies if any there are |
| # values other than host are allowed in the origin header. |
| # |
| # Possible values: |
| # |
| # * A list where each element is an allowed origin hostnames, else an empty list |
| # (list value) |
| # Deprecated group/name - [DEFAULT]/console_allowed_origins |
| #allowed_origins = |
| |
| |
| [consoleauth] |
| |
| # |
| # From nova.conf |
| # |
| |
| # |
| # The lifetime of a console auth token. |
| # |
| # A console auth token is used in authorizing console access for a user. |
| # Once the auth token time to live count has elapsed, the token is |
| # considered expired. Expired tokens are then deleted. |
| # (integer value) |
| # Minimum value: 0 |
| # Deprecated group/name - [DEFAULT]/console_token_ttl |
| #token_ttl=600 |
| {% if controller.consoleauth_token_ttl is defined %} |
| {%- set token_ttl = controller.consoleauth_token_ttl %} |
| token_ttl = {{ token_ttl }} |
| {%- elif controller.get('consoleauth', {}).token_ttl is defined %} |
| token_ttl = {{ controller.consoleauth.token_ttl }} |
| {% endif %} |
| |
| [cors] |
| |
| # |
| # From oslo.middleware |
| # |
| |
| # Indicate whether this resource may be shared with the domain received in the |
| # requests "origin" header. Format: "<protocol>://<host>[:<port>]", no trailing |
| # slash. Example: https://horizon.example.com (list value) |
| #allowed_origin=<None> |
| {% if controller.cors.allowed_origin is defined %} |
| allowed_origin = {{ controller.cors.allowed_origin }} |
| {% endif %} |
| |
| # Indicate that the actual request can include user credentials (boolean value) |
| #allow_credentials=true |
| {% if controller.cors.allow_credentials is defined %} |
| allow_credentials = {{ controller.cors.allow_credentials }} |
| {% endif %} |
| |
| # Indicate which headers are safe to expose to the API. Defaults to HTTP Simple |
| # Headers. (list value) |
| #expose_headers=X-Auth-Token,X-Openstack-Request-Id,X-Subject-Token,X-Service-Token |
| {% if controller.cors.expose_headers is defined %} |
| expose_headers = {{ controller.cors.expose_headers }} |
| {% endif %} |
| |
| # Maximum cache age of CORS preflight requests. (integer value) |
| #max_age=3600 |
| {% if controller.cors.max_age is defined %} |
| max_age = {{ controller.cors.max_age }} |
| {% endif %} |
| |
| # Indicate which methods can be used during the actual request. (list value) |
| #allow_methods=GET,PUT,POST,DELETE,PATCH |
| {% if controller.cors.allow_methods is defined %} |
| allow_methods = {{ controller.cors.allow_methods }} |
| {% endif %} |
| |
| # Indicate which header field names may be used during the actual request. (list |
| # value) |
| #allow_headers=X-Auth-Token,X-Openstack-Request-Id,X-Identity-Status,X-Roles,X-Service-Catalog,X-User-Id,X-Tenant-Id |
| {% if controller.cors.allow_headers is defined %} |
| allow_headers = {{ controller.cors.allow_headers }} |
| {% endif %} |
| |
| [cors.subdomain] |
| |
| # |
| # From oslo.middleware |
| # |
| |
| # Indicate whether this resource may be shared with the domain received in the |
| # requests "origin" header. Format: "<protocol>://<host>[:<port>]", no trailing |
| # slash. Example: https://horizon.example.com (list value) |
| #allowed_origin=<None> |
| |
| # Indicate that the actual request can include user credentials (boolean value) |
| #allow_credentials=true |
| |
| # Indicate which headers are safe to expose to the API. Defaults to HTTP Simple |
| # Headers. (list value) |
| #expose_headers=X-Auth-Token,X-Openstack-Request-Id,X-Subject-Token,X-Service-Token |
| |
| # Maximum cache age of CORS preflight requests. (integer value) |
| #max_age=3600 |
| |
| # Indicate which methods can be used during the actual request. (list value) |
| #allow_methods=GET,PUT,POST,DELETE,PATCH |
| |
| # Indicate which header field names may be used during the actual request. (list |
| # value) |
| #allow_headers=X-Auth-Token,X-Openstack-Request-Id,X-Identity-Status,X-Roles,X-Service-Catalog,X-User-Id,X-Tenant-Id |
| |
| |
| [crypto] |
| |
| # |
| # From nova.conf |
| # |
| |
| # |
| # Filename of root CA (Certificate Authority). This is a container format |
| # and includes root certificates. |
| # |
| # Possible values: |
| # |
| # * Any file name containing root CA, cacert.pem is default |
| # |
| # Related options: |
| # |
| # * ca_path |
| # (string value) |
| # Deprecated group/name - [DEFAULT]/ca_file |
| #ca_file=cacert.pem |
| |
| # |
| # Filename of a private key. |
| # |
| # Related options: |
| # |
| # * keys_path |
| # (string value) |
| # Deprecated group/name - [DEFAULT]/key_file |
| #key_file=private/cakey.pem |
| |
| # |
| # Filename of root Certificate Revocation List (CRL). This is a list of |
| # certificates that have been revoked, and therefore, entities presenting |
| # those (revoked) certificates should no longer be trusted. |
| # |
| # Related options: |
| # |
| # * ca_path |
| # (string value) |
| # Deprecated group/name - [DEFAULT]/crl_file |
| #crl_file=crl.pem |
| |
| # |
| # Directory path where keys are located. |
| # |
| # Related options: |
| # |
| # * key_file |
| # (string value) |
| # Deprecated group/name - [DEFAULT]/keys_path |
| #keys_path=$state_path/keys |
| |
| # |
| # Directory path where root CA is located. |
| # |
| # Related options: |
| # |
| # * ca_file |
| # (string value) |
| # Deprecated group/name - [DEFAULT]/ca_path |
| #ca_path=$state_path/CA |
| |
| # Option to enable/disable use of CA for each project. (boolean value) |
| # Deprecated group/name - [DEFAULT]/use_project_ca |
| #use_project_ca=false |
| |
| # |
| # Subject for certificate for users, %s for |
| # project, user, timestamp |
| # (string value) |
| # Deprecated group/name - [DEFAULT]/user_cert_subject |
| #user_cert_subject=/C=US/ST=California/O=OpenStack/OU=NovaDev/CN=%.16s-%.16s-%s |
| |
| # |
| # Subject for certificate for projects, %s for |
| # project, timestamp |
| # (string value) |
| # Deprecated group/name - [DEFAULT]/project_cert_subject |
| #project_cert_subject=/C=US/ST=California/O=OpenStack/OU=NovaDev/CN=project-ca-%.16s-%s |
| |
| |
| [database] |
| |
| # |
| # From oslo.db |
| # |
| |
| # DEPRECATED: The file name to use with SQLite. (string value) |
| # Deprecated group/name - [DEFAULT]/sqlite_db |
| # This option is deprecated for removal. |
| # Its value may be silently ignored in the future. |
| # Reason: Should use config option connection or slave_connection to connect the |
| # database. |
| #sqlite_db=oslo.sqlite |
| idle_timeout = {{ controller.database.get('idle_timeout', 180) }} |
| min_pool_size = {{ controller.database.get('min_pool_size', 100) }} |
| max_pool_size = {{ controller.database.get('max_pool_size', 700) }} |
| max_overflow = {{ controller.database.get('max_overflow', 100) }} |
| retry_interval = {{ controller.database.get('retry_interval', 5) }} |
| max_retries = {{ controller.database.get('max_retries', '-1') }} |
| db_max_retries = {{ controller.database.get('db_max_retries', 3) }} |
| db_retry_interval = {{ controller.database.get('db_retry_interval', 1) }} |
| connection_debug = {{ controller.database.get('connection_debug', 10) }} |
| pool_timeout = {{ controller.database.get('pool_timeout', 120) }} |
| connection = {{ controller.database.engine }}+pymysql://{{ controller.database.user }}:{{ controller.database.password }}@{{ controller.database.host }}/{{ controller.database.name }}?charset=utf8{{ connection_x509_ssl_option|string }} |
| |
| # If True, SQLite uses synchronous mode. (boolean value) |
| # Deprecated group/name - [DEFAULT]/sqlite_synchronous |
| #sqlite_synchronous=true |
| |
| # The back end to use for the database. (string value) |
| # Deprecated group/name - [DEFAULT]/db_backend |
| #backend=sqlalchemy |
| |
| # The SQLAlchemy connection string to use to connect to the database. (string |
| # value) |
| # Deprecated group/name - [DEFAULT]/sql_connection |
| # Deprecated group/name - [DATABASE]/sql_connection |
| # Deprecated group/name - [sql]/connection |
| #connection=<None> |
| |
| # The SQLAlchemy connection string to use to connect to the slave database. |
| # (string value) |
| #slave_connection=<None> |
| |
| # The SQL mode to be used for MySQL sessions. This option, including the |
| # default, overrides any server-set SQL mode. To use whatever SQL mode is set by |
| # the server configuration, set this to no value. Example: mysql_sql_mode= |
| # (string value) |
| #mysql_sql_mode=TRADITIONAL |
| |
| # Timeout before idle SQL connections are reaped. (integer value) |
| # Deprecated group/name - [DEFAULT]/sql_idle_timeout |
| # Deprecated group/name - [DATABASE]/sql_idle_timeout |
| # Deprecated group/name - [sql]/idle_timeout |
| #idle_timeout=3600 |
| |
| # Minimum number of SQL connections to keep open in a pool. (integer value) |
| # Deprecated group/name - [DEFAULT]/sql_min_pool_size |
| # Deprecated group/name - [DATABASE]/sql_min_pool_size |
| #min_pool_size=1 |
| |
| # Maximum number of SQL connections to keep open in a pool. Setting a value of 0 |
| # indicates no limit. (integer value) |
| # Deprecated group/name - [DEFAULT]/sql_max_pool_size |
| # Deprecated group/name - [DATABASE]/sql_max_pool_size |
| #max_pool_size=5 |
| |
| # Maximum number of database connection retries during startup. Set to -1 to |
| # specify an infinite retry count. (integer value) |
| # Deprecated group/name - [DEFAULT]/sql_max_retries |
| # Deprecated group/name - [DATABASE]/sql_max_retries |
| #max_retries=10 |
| |
| # Interval between retries of opening a SQL connection. (integer value) |
| # Deprecated group/name - [DEFAULT]/sql_retry_interval |
| # Deprecated group/name - [DATABASE]/reconnect_interval |
| #retry_interval=10 |
| |
| # If set, use this value for max_overflow with SQLAlchemy. (integer value) |
| # Deprecated group/name - [DEFAULT]/sql_max_overflow |
| # Deprecated group/name - [DATABASE]/sqlalchemy_max_overflow |
| #max_overflow=50 |
| |
| # Verbosity of SQL debugging information: 0=None, 100=Everything. (integer |
| # value) |
| # Minimum value: 0 |
| # Maximum value: 100 |
| # Deprecated group/name - [DEFAULT]/sql_connection_debug |
| #connection_debug=0 |
| |
| # Add Python stack traces to SQL as comment strings. (boolean value) |
| # Deprecated group/name - [DEFAULT]/sql_connection_trace |
| #connection_trace=false |
| |
| # If set, use this value for pool_timeout with SQLAlchemy. (integer value) |
| # Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout |
| #pool_timeout=<None> |
| |
| # Enable the experimental use of database reconnect on connection lost. (boolean |
| # value) |
| #use_db_reconnect=false |
| |
| # Seconds between retries of a database transaction. (integer value) |
| #db_retry_interval=1 |
| |
| # If True, increases the interval between retries of a database operation up to |
| # db_max_retry_interval. (boolean value) |
| #db_inc_retry_interval=true |
| |
| # If db_inc_retry_interval is set, the maximum seconds between retries of a |
| # database operation. (integer value) |
| #db_max_retry_interval=10 |
| |
| # Maximum retries in case of connection error or deadlock error before error is |
| # raised. Set to -1 to specify an infinite retry count. (integer value) |
| #db_max_retries=20 |
| |
| # |
| # From oslo.db.concurrency |
| # |
| |
| # Enable the experimental use of thread pooling for all DB API calls (boolean |
| # value) |
| # Deprecated group/name - [DEFAULT]/dbapi_use_tpool |
| #use_tpool=false |
| |
| |
| [ephemeral_storage_encryption] |
| |
| # |
| # From nova.conf |
| # |
| |
| # |
| # Enables/disables LVM ephemeral storage encryption. |
| # (boolean value) |
| #enabled=false |
| |
| # |
| # Cipher-mode string to be used. |
| # |
| # The cipher and mode to be used to encrypt ephemeral storage. The set of |
| # cipher-mode combinations available depends on kernel support. |
| # |
| # Possible values: |
| # |
| # * Any crypto option listed in ``/proc/crypto``. |
| # (string value) |
| #cipher=aes-xts-plain64 |
| |
| # |
| # Encryption key length in bits. |
| # |
| # The bit length of the encryption key to be used to encrypt ephemeral storage. |
| # In XTS mode only half of the bits are used for encryption key. |
| # (integer value) |
| # Minimum value: 1 |
| #key_size=512 |
| |
| |
| [filter_scheduler] |
| |
| # |
| # From nova.conf |
| # |
| |
| # |
| # Size of subset of best hosts selected by scheduler. |
| # |
| # New instances will be scheduled on a host chosen randomly from a subset of the |
| # N best hosts, where N is the value set by this option. |
| # |
| # Setting this to a value greater than 1 will reduce the chance that multiple |
| # scheduler processes handling similar requests will select the same host, |
| # creating a potential race condition. By selecting a host randomly from the N |
| # hosts that best fit the request, the chance of a conflict is reduced. However, |
| # the higher you set this value, the less optimal the chosen host may be for a |
| # given request. |
| # |
| # This option is only used by the FilterScheduler and its subclasses; if you use |
| # a different scheduler, this option has no effect. |
| # |
| # Possible values: |
| # |
| # * An integer, where the integer corresponds to the size of a host subset. Any |
| # integer is valid, although any value less than 1 will be treated as 1 |
| # (integer value) |
| # Minimum value: 1 |
| # Deprecated group/name - [DEFAULT]/scheduler_host_subset_size |
| #host_subset_size=1 |
| host_subset_size=30 |
| |
| # |
| # The number of instances that can be actively performing IO on a host. |
| # |
| # Instances performing IO includes those in the following states: build, resize, |
| # snapshot, migrate, rescue, unshelve. |
| # |
| # This option is only used by the FilterScheduler and its subclasses; if you use |
| # a different scheduler, this option has no effect. Also note that this setting |
| # only affects scheduling if the 'io_ops_filter' filter is enabled. |
| # |
| # Possible values: |
| # |
| # * An integer, where the integer corresponds to the max number of instances |
| # that can be actively performing IO on any given host. |
| # (integer value) |
| # Deprecated group/name - [DEFAULT]/max_io_ops_per_host |
| #max_io_ops_per_host=8 |
| max_io_ops_per_host=8 |
| |
| # |
| # Maximum number of instances that be active on a host. |
| # |
| # If you need to limit the number of instances on any given host, set this |
| # option |
| # to the maximum number of instances you want to allow. The num_instances_filter |
| # will reject any host that has at least as many instances as this option's |
| # value. |
| # |
| # This option is only used by the FilterScheduler and its subclasses; if you use |
| # a different scheduler, this option has no effect. Also note that this setting |
| # only affects scheduling if the 'num_instances_filter' filter is enabled. |
| # |
| # Possible values: |
| # |
| # * An integer, where the integer corresponds to the max instances that can be |
| # scheduled on a host. |
| # (integer value) |
| # Deprecated group/name - [DEFAULT]/max_instances_per_host |
| #max_instances_per_host=50 |
| max_instances_per_host=50 |
| |
| # |
| # Enable querying of individual hosts for instance information. |
| # |
| # The scheduler may need information about the instances on a host in order to |
| # evaluate its filters and weighers. The most common need for this information |
| # is |
| # for the (anti-)affinity filters, which need to choose a host based on the |
| # instances already running on a host. |
| # |
| # If the configured filters and weighers do not need this information, disabling |
| # this option will improve performance. It may also be disabled when the |
| # tracking |
| # overhead proves too heavy, although this will cause classes requiring host |
| # usage data to query the database on each request instead. |
| # |
| # This option is only used by the FilterScheduler and its subclasses; if you use |
| # a different scheduler, this option has no effect. |
| # (boolean value) |
| # Deprecated group/name - [DEFAULT]/scheduler_tracks_instance_changes |
| #track_instance_changes=true |
| |
| # |
| # Filters that the scheduler can use. |
| # |
| # An unordered list of the filter classes the nova scheduler may apply. Only |
| # the |
| # filters specified in the 'scheduler_enabled_filters' option will be used, but |
| # any filter appearing in that option must also be included in this list. |
| # |
| # By default, this is set to all filters that are included with nova. |
| # |
| # This option is only used by the FilterScheduler and its subclasses; if you use |
| # a different scheduler, this option has no effect. |
| # |
| # Possible values: |
| # |
| # * A list of zero or more strings, where each string corresponds to the name of |
| # a filter that may be used for selecting a host |
| # |
| # Related options: |
| # |
| # * scheduler_enabled_filters |
| # (multi valued) |
| # Deprecated group/name - [DEFAULT]/scheduler_available_filters |
| #available_filters=nova.scheduler.filters.all_filters |
| available_filters=nova.scheduler.filters.all_filters |
| available_filters=nova.scheduler.filters.pci_passthrough_filter.PciPassthroughFilter |
| {% for filter in controller.get('scheduler_custom_filters', []) %} |
| available_filters = {{ filter }} |
| {% endfor %} |
| |
| # |
| # Filters that the scheduler will use. |
| # |
| # An ordered list of filter class names that will be used for filtering |
| # hosts. Ignore the word 'default' in the name of this option: these filters |
| # will |
| # *always* be applied, and they will be applied in the order they are listed so |
| # place your most restrictive filters first to make the filtering process more |
| # efficient. |
| # |
| # This option is only used by the FilterScheduler and its subclasses; if you use |
| # a different scheduler, this option has no effect. |
| # |
| # Possible values: |
| # |
| # * A list of zero or more strings, where each string corresponds to the name of |
| # a filter to be used for selecting a host |
| # |
| # Related options: |
| # |
| # * All of the filters in this option *must* be present in the |
| # 'scheduler_available_filters' option, or a SchedulerHostFilterNotFound |
| # exception will be raised. |
| # (list value) |
| # Deprecated group/name - [DEFAULT]/scheduler_default_filters |
| #enabled_filters=RetryFilter,AvailabilityZoneFilter,RamFilter,DiskFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,ServerGroupAntiAffinityFilter,ServerGroupAffinityFilter |
| enabled_filters={{ controller.scheduler_default_filters }} |
| |
| # |
| # Filters used for filtering baremetal hosts. |
| # |
| # Filters are applied in order, so place your most restrictive filters first to |
| # make the filtering process more efficient. |
| # |
| # This option is only used by the FilterScheduler and its subclasses; if you use |
| # a different scheduler, this option has no effect. |
| # |
| # Possible values: |
| # |
| # * A list of zero or more strings, where each string corresponds to the name of |
| # a filter to be used for selecting a baremetal host |
| # |
| # Related options: |
| # |
| # * If the 'scheduler_use_baremetal_filters' option is False, this option has |
| # no effect. |
| # (list value) |
| # Deprecated group/name - [DEFAULT]/baremetal_scheduler_default_filters |
| #baremetal_enabled_filters=RetryFilter,AvailabilityZoneFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,ExactRamFilter,ExactDiskFilter,ExactCoreFilter |
| |
| # |
| # Enable baremetal filters. |
| # |
| # Set this to True to tell the nova scheduler that it should use the filters |
| # specified in the 'baremetal_scheduler_enabled_filters' option. If you are not |
| # scheduling baremetal nodes, leave this at the default setting of False. |
| # |
| # This option is only used by the FilterScheduler and its subclasses; if you use |
| # a different scheduler, this option has no effect. |
| # |
| # Related options: |
| # |
| # * If this option is set to True, then the filters specified in the |
| # 'baremetal_scheduler_enabled_filters' are used instead of the filters |
| # specified in 'scheduler_enabled_filters'. |
| # (boolean value) |
| # Deprecated group/name - [DEFAULT]/scheduler_use_baremetal_filters |
| #use_baremetal_filters=false |
| use_baremetal_filters=false |
| |
| # |
| # Weighers that the scheduler will use. |
| # |
| # Only hosts which pass the filters are weighed. The weight for any host starts |
| # at 0, and the weighers order these hosts by adding to or subtracting from the |
| # weight assigned by the previous weigher. Weights may become negative. An |
| # instance will be scheduled to one of the N most-weighted hosts, where N is |
| # 'scheduler_host_subset_size'. |
| # |
| # By default, this is set to all weighers that are included with Nova. |
| # |
| # This option is only used by the FilterScheduler and its subclasses; if you use |
| # a different scheduler, this option has no effect. |
| # |
| # Possible values: |
| # |
| # * A list of zero or more strings, where each string corresponds to the name of |
| # a weigher that will be used for selecting a host |
| # (list value) |
| # Deprecated group/name - [DEFAULT]/scheduler_weight_classes |
| #weight_classes=nova.scheduler.weights.all_weighers |
| |
| # |
| # Ram weight multipler ratio. |
| # |
| # This option determines how hosts with more or less available RAM are weighed. |
| # A |
| # positive value will result in the scheduler preferring hosts with more |
| # available RAM, and a negative number will result in the scheduler preferring |
| # hosts with less available RAM. Another way to look at it is that positive |
| # values for this option will tend to spread instances across many hosts, while |
| # negative values will tend to fill up (stack) hosts as much as possible before |
| # scheduling to a less-used host. The absolute value, whether positive or |
| # negative, controls how strong the RAM weigher is relative to other weighers. |
| # |
| # This option is only used by the FilterScheduler and its subclasses; if you use |
| # a different scheduler, this option has no effect. Also note that this setting |
| # only affects scheduling if the 'ram' weigher is enabled. |
| # |
| # Possible values: |
| # |
| # * An integer or float value, where the value corresponds to the multipler |
| # ratio for this weigher. |
| # (floating point value) |
| # Deprecated group/name - [DEFAULT]/ram_weight_multiplier |
| #ram_weight_multiplier=1.0 |
| |
| # |
| # Disk weight multipler ratio. |
| # |
| # Multiplier used for weighing free disk space. Negative numbers mean to |
| # stack vs spread. |
| # |
| # This option is only used by the FilterScheduler and its subclasses; if you use |
| # a different scheduler, this option has no effect. Also note that this setting |
| # only affects scheduling if the 'ram' weigher is enabled. |
| # |
| # Possible values: |
| # |
| # * An integer or float value, where the value corresponds to the multipler |
| # ratio for this weigher. |
| # (floating point value) |
| # Deprecated group/name - [DEFAULT]/disk_weight_multiplier |
| #disk_weight_multiplier=1.0 |
| |
| # |
| # IO operations weight multipler ratio. |
| # |
| # This option determines how hosts with differing workloads are weighed. |
| # Negative |
| # values, such as the default, will result in the scheduler preferring hosts |
| # with |
| # lighter workloads whereas positive values will prefer hosts with heavier |
| # workloads. Another way to look at it is that positive values for this option |
| # will tend to schedule instances onto hosts that are already busy, while |
| # negative values will tend to distribute the workload across more hosts. The |
| # absolute value, whether positive or negative, controls how strong the io_ops |
| # weigher is relative to other weighers. |
| # |
| # This option is only used by the FilterScheduler and its subclasses; if you use |
| # a different scheduler, this option has no effect. Also note that this setting |
| # only affects scheduling if the 'io_ops' weigher is enabled. |
| # |
| # Possible values: |
| # |
| # * An integer or float value, where the value corresponds to the multipler |
| # ratio for this weigher. |
| # (floating point value) |
| # Deprecated group/name - [DEFAULT]/io_ops_weight_multiplier |
| #io_ops_weight_multiplier=-1.0 |
| |
| # |
| # Multiplier used for weighing hosts for group soft-affinity. |
| # |
| # Possible values: |
| # |
| # * An integer or float value, where the value corresponds to weight multiplier |
| # for hosts with group soft affinity. Only a positive value are meaningful, as |
| # negative values would make this behave as a soft anti-affinity weigher. |
| # (floating point value) |
| # Deprecated group/name - [DEFAULT]/soft_affinity_weight_multiplier |
| #soft_affinity_weight_multiplier=1.0 |
| |
| # |
| # Multiplier used for weighing hosts for group soft-anti-affinity. |
| # |
| # Possible values: |
| # |
| # * An integer or float value, where the value corresponds to weight multiplier |
| # for hosts with group soft anti-affinity. Only a positive value are |
| # meaningful, as negative values would make this behave as a soft affinity |
| # weigher. |
| # (floating point value) |
| # Deprecated group/name - [DEFAULT]/soft_anti_affinity_weight_multiplier |
| #soft_anti_affinity_weight_multiplier=1.0 |
| |
| # |
| # List of UUIDs for images that can only be run on certain hosts. |
| # |
| # If there is a need to restrict some images to only run on certain designated |
| # hosts, list those image UUIDs here. |
| # |
| # This option is only used by the FilterScheduler and its subclasses; if you use |
| # a different scheduler, this option has no effect. Also note that this setting |
| # only affects scheduling if the 'IsolatedHostsFilter' filter is enabled. |
| # |
| # Possible values: |
| # |
| # * A list of UUID strings, where each string corresponds to the UUID of an |
| # image |
| # |
| # Related options: |
| # |
| # * scheduler/isolated_hosts |
| # * scheduler/restrict_isolated_hosts_to_isolated_images |
| # (list value) |
| # Deprecated group/name - [DEFAULT]/isolated_images |
| #isolated_images = |
| |
| # |
| # List of hosts that can only run certain images. |
| # |
| # If there is a need to restrict some images to only run on certain designated |
| # hosts, list those host names here. |
| # |
| # This option is only used by the FilterScheduler and its subclasses; if you use |
| # a different scheduler, this option has no effect. Also note that this setting |
| # only affects scheduling if the 'IsolatedHostsFilter' filter is enabled. |
| # |
| # Possible values: |
| # |
| # * A list of strings, where each string corresponds to the name of a host |
| # |
| # Related options: |
| # |
| # * scheduler/isolated_images |
| # * scheduler/restrict_isolated_hosts_to_isolated_images |
| # (list value) |
| # Deprecated group/name - [DEFAULT]/isolated_hosts |
| #isolated_hosts = |
| |
| # |
| # Prevent non-isolated images from being built on isolated hosts. |
| # |
| # This option is only used by the FilterScheduler and its subclasses; if you use |
| # a different scheduler, this option has no effect. Also note that this setting |
| # only affects scheduling if the 'IsolatedHostsFilter' filter is enabled. Even |
| # then, this option doesn't affect the behavior of requests for isolated images, |
| # which will *always* be restricted to isolated hosts. |
| # |
| # Related options: |
| # |
| # * scheduler/isolated_images |
| # * scheduler/isolated_hosts |
| # (boolean value) |
| # Deprecated group/name - [DEFAULT]/restrict_isolated_hosts_to_isolated_images |
| #restrict_isolated_hosts_to_isolated_images=true |
| |
| # |
| # Image property namespace for use in the host aggregate. |
| # |
| # Images and hosts can be configured so that certain images can only be |
| # scheduled |
| # to hosts in a particular aggregate. This is done with metadata values set on |
| # the host aggregate that are identified by beginning with the value of this |
| # option. If the host is part of an aggregate with such a metadata key, the |
| # image |
| # in the request spec must have the value of that metadata in its properties in |
| # order for the scheduler to consider the host as acceptable. |
| # |
| # This option is only used by the FilterScheduler and its subclasses; if you use |
| # a different scheduler, this option has no effect. Also note that this setting |
| # only affects scheduling if the 'aggregate_image_properties_isolation' filter |
| # is |
| # enabled. |
| # |
| # Possible values: |
| # |
| # * A string, where the string corresponds to an image property namespace |
| # |
| # Related options: |
| # |
| # * aggregate_image_properties_isolation_separator |
| # (string value) |
| # Deprecated group/name - [DEFAULT]/aggregate_image_properties_isolation_namespace |
| #aggregate_image_properties_isolation_namespace=<None> |
| |
| # |
| # Separator character(s) for image property namespace and name. |
| # |
| # When using the aggregate_image_properties_isolation filter, the relevant |
| # metadata keys are prefixed with the namespace defined in the |
| # aggregate_image_properties_isolation_namespace configuration option plus a |
| # separator. This option defines the separator to be used. |
| # |
| # This option is only used by the FilterScheduler and its subclasses; if you use |
| # a different scheduler, this option has no effect. Also note that this setting |
| # only affects scheduling if the 'aggregate_image_properties_isolation' filter |
| # is enabled. |
| # |
| # Possible values: |
| # |
| # * A string, where the string corresponds to an image property namespace |
| # separator character |
| # |
| # Related options: |
| # |
| # * aggregate_image_properties_isolation_namespace |
| # (string value) |
| # Deprecated group/name - [DEFAULT]/aggregate_image_properties_isolation_separator |
| #aggregate_image_properties_isolation_separator=. |
| |
| |
| [glance] |
| # Configuration options for the Image service |
| |
| # |
| # From nova.conf |
| # |
| |
| # |
| # List of glance api servers endpoints available to nova. |
| # |
| # https is used for ssl-based glance api servers. |
| # |
| # Possible values: |
| # |
| # * A list of any fully qualified url of the form |
| # "scheme://hostname:port[/path]" |
| # (i.e. "http://10.0.1.0:9292" or "https://my.glance.server/image"). |
| # (list value) |
| #api_servers=<None> |
| api_servers = {{ controller.glance.get('protocol', 'http') }}://{{ controller.glance.host }}:{{ controller.glance.get('port', 9292) }} |
| |
| # |
| # Enable insecure SSL (https) requests to glance. |
| # |
| # This setting can be used to turn off verification of the glance server |
| # certificate against the certificate authorities. |
| # (boolean value) |
| #api_insecure=false |
| |
| # |
| # Enable glance operation retries. |
| # |
| # Specifies the number of retries when uploading / downloading |
| # an image to / from glance. 0 means no retries. |
| # (integer value) |
| # Minimum value: 0 |
| #num_retries=0 |
| |
| # |
| # List of url schemes that can be directly accessed. |
| # |
| # This option specifies a list of url schemes that can be downloaded |
| # directly via the direct_url. This direct_URL can be fetched from |
| # Image metadata which can be used by nova to get the |
| # image more efficiently. nova-compute could benefit from this by |
| # invoking a copy when it has access to the same file system as glance. |
| # |
| # Possible values: |
| # |
| # * [file], Empty list (default) |
| # (list value) |
| #allowed_direct_url_schemes = |
| |
| # |
| # Enable image signature verification. |
| # |
| # nova uses the image signature metadata from glance and verifies the signature |
| # of a signed image while downloading that image. If the image signature cannot |
| # be verified or if the image signature metadata is either incomplete or |
| # unavailable, then nova will not boot the image and instead will place the |
| # instance into an error state. This provides end users with stronger assurances |
| # of the integrity of the image data they are using to create servers. |
| # |
| # Related options: |
| # |
| # * The options in the `key_manager` group, as the key_manager is used |
| # for the signature validation. |
| # (boolean value) |
| {%- if controller.get('barbican', {}).get('enabled', False) %} |
| verify_glance_signatures=true |
| {%- else %} |
| #verify_glance_signatures=false |
| {%- endif %} |
| |
| # Enable or disable debug logging with glanceclient. (boolean value) |
| #debug=false |
| |
| |
| [guestfs] |
| # |
| # libguestfs is a set of tools for accessing and modifying virtual |
| # machine (VM) disk images. You can use this for viewing and editing |
| # files inside guests, scripting changes to VMs, monitoring disk |
| # used/free statistics, creating guests, P2V, V2V, performing backups, |
| # cloning VMs, building VMs, formatting disks and resizing disks. |
| |
| # |
| # From nova.conf |
| # |
| |
| # |
| # Enable/disables guestfs logging. |
| # |
| # This configures guestfs to debug messages and push them to Openstack |
| # logging system. When set to True, it traces libguestfs API calls and |
| # enable verbose debug messages. In order to use the above feature, |
| # "libguestfs" package must be installed. |
| # |
| # Related options: |
| # Since libguestfs access and modifies VM's managed by libvirt, below options |
| # should be set to give access to those VM's. |
| # * libvirt.inject_key |
| # * libvirt.inject_partition |
| # * libvirt.inject_password |
| # (boolean value) |
| #debug=false |
| |
| |
| [healthcheck] |
| |
| # |
| # From oslo.middleware |
| # |
| |
| # DEPRECATED: The path to respond to healtcheck requests on. (string value) |
| # This option is deprecated for removal. |
| # Its value may be silently ignored in the future. |
| #path=/healthcheck |
| |
| # Show more detailed information as part of the response (boolean value) |
| #detailed=false |
| |
| # Additional backends that can perform health checks and report that information |
| # back as part of a request. (list value) |
| #backends = |
| |
| # Check the presence of a file to determine if an application is running on a |
| # port. Used by DisableByFileHealthcheck plugin. (string value) |
| #disable_by_file_path=<None> |
| |
| # Check the presence of a file based on a port to determine if an application is |
| # running on a port. Expects a "port:path" list of strings. Used by |
| # DisableByFilesPortsHealthcheck plugin. (list value) |
| #disable_by_file_paths = |
| |
| |
| [hyperv] |
| # |
| # The hyperv feature allows you to configure the Hyper-V hypervisor |
| # driver to be used within an OpenStack deployment. |
| |
| # |
| # From nova.conf |
| # |
| |
| # |
| # Dynamic memory ratio |
| # |
| # Enables dynamic memory allocation (ballooning) when set to a value |
| # greater than 1. The value expresses the ratio between the total RAM |
| # assigned to an instance and its startup RAM amount. For example a |
| # ratio of 2.0 for an instance with 1024MB of RAM implies 512MB of |
| # RAM allocated at startup. |
| # |
| # Possible values: |
| # |
| # * 1.0: Disables dynamic memory allocation (Default). |
| # * Float values greater than 1.0: Enables allocation of total implied |
| # RAM divided by this value for startup. |
| # (floating point value) |
| #dynamic_memory_ratio=1.0 |
| |
| # |
| # Enable instance metrics collection |
| # |
| # Enables metrics collections for an instance by using Hyper-V's |
| # metric APIs. Collected data can by retrieved by other apps and |
| # services, e.g.: Ceilometer. |
| # (boolean value) |
| #enable_instance_metrics_collection=false |
| |
| # |
| # Instances path share |
| # |
| # The name of a Windows share mapped to the "instances_path" dir |
| # and used by the resize feature to copy files to the target host. |
| # If left blank, an administrative share (hidden network share) will |
| # be used, looking for the same "instances_path" used locally. |
| # |
| # Possible values: |
| # |
| # * "": An administrative share will be used (Default). |
| # * Name of a Windows share. |
| # |
| # Related options: |
| # |
| # * "instances_path": The directory which will be used if this option |
| # here is left blank. |
| # (string value) |
| #instances_path_share = |
| |
| # |
| # Limit CPU features |
| # |
| # This flag is needed to support live migration to hosts with |
| # different CPU features and checked during instance creation |
| # in order to limit the CPU features used by the instance. |
| # (boolean value) |
| #limit_cpu_features=false |
| |
| # |
| # Mounted disk query retry count |
| # |
| # The number of times to retry checking for a mounted disk. |
| # The query runs until the device can be found or the retry |
| # count is reached. |
| # |
| # Possible values: |
| # |
| # * Positive integer values. Values greater than 1 is recommended |
| # (Default: 10). |
| # |
| # Related options: |
| # |
| # * Time interval between disk mount retries is declared with |
| # "mounted_disk_query_retry_interval" option. |
| # (integer value) |
| # Minimum value: 0 |
| #mounted_disk_query_retry_count=10 |
| |
| # |
| # Mounted disk query retry interval |
| # |
| # Interval between checks for a mounted disk, in seconds. |
| # |
| # Possible values: |
| # |
| # * Time in seconds (Default: 5). |
| # |
| # Related options: |
| # |
| # * This option is meaningful when the mounted_disk_query_retry_count |
| # is greater than 1. |
| # * The retry loop runs with mounted_disk_query_retry_count and |
| # mounted_disk_query_retry_interval configuration options. |
| # (integer value) |
| # Minimum value: 0 |
| #mounted_disk_query_retry_interval=5 |
| |
| # |
| # Power state check timeframe |
| # |
| # The timeframe to be checked for instance power state changes. |
| # This option is used to fetch the state of the instance from Hyper-V |
| # through the WMI interface, within the specified timeframe. |
| # |
| # Possible values: |
| # |
| # * Timeframe in seconds (Default: 60). |
| # (integer value) |
| # Minimum value: 0 |
| #power_state_check_timeframe=60 |
| |
| # |
| # Power state event polling interval |
| # |
| # Instance power state change event polling frequency. Sets the |
| # listener interval for power state events to the given value. |
| # This option enhances the internal lifecycle notifications of |
| # instances that reboot themselves. It is unlikely that an operator |
| # has to change this value. |
| # |
| # Possible values: |
| # |
| # * Time in seconds (Default: 2). |
| # (integer value) |
| # Minimum value: 0 |
| #power_state_event_polling_interval=2 |
| |
| # |
| # qemu-img command |
| # |
| # qemu-img is required for some of the image related operations |
| # like converting between different image types. You can get it |
| # from here: (http://qemu.weilnetz.de/) or you can install the |
| # Cloudbase OpenStack Hyper-V Compute Driver |
| # (https://cloudbase.it/openstack-hyperv-driver/) which automatically |
| # sets the proper path for this config option. You can either give the |
| # full path of qemu-img.exe or set its path in the PATH environment |
| # variable and leave this option to the default value. |
| # |
| # Possible values: |
| # |
| # * Name of the qemu-img executable, in case it is in the same |
| # directory as the nova-compute service or its path is in the |
| # PATH environment variable (Default). |
| # * Path of qemu-img command (DRIVELETTER:\PATH\TO\QEMU-IMG\COMMAND). |
| # |
| # Related options: |
| # |
| # * If the config_drive_cdrom option is False, qemu-img will be used to |
| # convert the ISO to a VHD, otherwise the configuration drive will |
| # remain an ISO. To use configuration drive with Hyper-V, you must |
| # set the mkisofs_cmd value to the full path to an mkisofs.exe |
| # installation. |
| # (string value) |
| #qemu_img_cmd=qemu-img.exe |
| |
| # |
| # External virtual switch name |
| # |
| # The Hyper-V Virtual Switch is a software-based layer-2 Ethernet |
| # network switch that is available with the installation of the |
| # Hyper-V server role. The switch includes programmatically managed |
| # and extensible capabilities to connect virtual machines to both |
| # virtual networks and the physical network. In addition, Hyper-V |
| # Virtual Switch provides policy enforcement for security, isolation, |
| # and service levels. The vSwitch represented by this config option |
| # must be an external one (not internal or private). |
| # |
| # Possible values: |
| # |
| # * If not provided, the first of a list of available vswitches |
| # is used. This list is queried using WQL. |
| # * Virtual switch name. |
| # (string value) |
| #vswitch_name=<None> |
| |
| # |
| # Wait soft reboot seconds |
| # |
| # Number of seconds to wait for instance to shut down after soft |
| # reboot request is made. We fall back to hard reboot if instance |
| # does not shutdown within this window. |
| # |
| # Possible values: |
| # |
| # * Time in seconds (Default: 60). |
| # (integer value) |
| # Minimum value: 0 |
| #wait_soft_reboot_seconds=60 |
| |
| # |
| # Configuration drive cdrom |
| # |
| # OpenStack can be configured to write instance metadata to |
| # a configuration drive, which is then attached to the |
| # instance before it boots. The configuration drive can be |
| # attached as a disk drive (default) or as a CD drive. |
| # |
| # Possible values: |
| # |
| # * True: Attach the configuration drive image as a CD drive. |
| # * False: Attach the configuration drive image as a disk drive (Default). |
| # |
| # Related options: |
| # |
| # * This option is meaningful with force_config_drive option set to 'True' |
| # or when the REST API call to create an instance will have |
| # '--config-drive=True' flag. |
| # * config_drive_format option must be set to 'iso9660' in order to use |
| # CD drive as the configuration drive image. |
| # * To use configuration drive with Hyper-V, you must set the |
| # mkisofs_cmd value to the full path to an mkisofs.exe installation. |
| # Additionally, you must set the qemu_img_cmd value to the full path |
| # to an qemu-img command installation. |
| # * You can configure the Compute service to always create a configuration |
| # drive by setting the force_config_drive option to 'True'. |
| # (boolean value) |
| #config_drive_cdrom=false |
| |
| # |
| # Configuration drive inject password |
| # |
| # Enables setting the admin password in the configuration drive image. |
| # |
| # Related options: |
| # |
| # * This option is meaningful when used with other options that enable |
| # configuration drive usage with Hyper-V, such as force_config_drive. |
| # * Currently, the only accepted config_drive_format is 'iso9660'. |
| # (boolean value) |
| #config_drive_inject_password=false |
| |
| # |
| # Volume attach retry count |
| # |
| # The number of times to retry attaching a volume. Volume attachment |
| # is retried until success or the given retry count is reached. |
| # |
| # Possible values: |
| # |
| # * Positive integer values (Default: 10). |
| # |
| # Related options: |
| # |
| # * Time interval between attachment attempts is declared with |
| # volume_attach_retry_interval option. |
| # (integer value) |
| # Minimum value: 0 |
| #volume_attach_retry_count=10 |
| |
| # |
| # Volume attach retry interval |
| # |
| # Interval between volume attachment attempts, in seconds. |
| # |
| # Possible values: |
| # |
| # * Time in seconds (Default: 5). |
| # |
| # Related options: |
| # |
| # * This options is meaningful when volume_attach_retry_count |
| # is greater than 1. |
| # * The retry loop runs with volume_attach_retry_count and |
| # volume_attach_retry_interval configuration options. |
| # (integer value) |
| # Minimum value: 0 |
| #volume_attach_retry_interval=5 |
| |
| # |
| # Enable RemoteFX feature |
| # |
| # This requires at least one DirectX 11 capable graphics adapter for |
| # Windows / Hyper-V Server 2012 R2 or newer and RDS-Virtualization |
| # feature has to be enabled. |
| # |
| # Instances with RemoteFX can be requested with the following flavor |
| # extra specs: |
| # |
| # **os:resolution**. Guest VM screen resolution size. Acceptable values:: |
| # |
| # 1024x768, 1280x1024, 1600x1200, 1920x1200, 2560x1600, 3840x2160 |
| # |
| # ``3840x2160`` is only available on Windows / Hyper-V Server 2016. |
| # |
| # **os:monitors**. Guest VM number of monitors. Acceptable values:: |
| # |
| # [1, 4] - Windows / Hyper-V Server 2012 R2 |
| # [1, 8] - Windows / Hyper-V Server 2016 |
| # |
| # **os:vram**. Guest VM VRAM amount. Only available on |
| # Windows / Hyper-V Server 2016. Acceptable values:: |
| # |
| # 64, 128, 256, 512, 1024 |
| # (boolean value) |
| #enable_remotefx=false |
| |
| # |
| # Use multipath connections when attaching iSCSI or FC disks. |
| # |
| # This requires the Multipath IO Windows feature to be enabled. MPIO must be |
| # configured to claim such devices. |
| # (boolean value) |
| #use_multipath_io=false |
| |
| # |
| # List of iSCSI initiators that will be used for estabilishing iSCSI sessions. |
| # |
| # If none are specified, the Microsoft iSCSI initiator service will choose the |
| # initiator. |
| # (list value) |
| #iscsi_initiator_list = |
| |
| |
| [image_file_url] |
| |
| # |
| # From nova.conf |
| # |
| |
| # DEPRECATED: |
| # List of file systems that are configured in this file in the |
| # image_file_url:<list entry name> sections |
| # (list value) |
| # This option is deprecated for removal since 14.0.0. |
| # Its value may be silently ignored in the future. |
| # Reason: |
| # The feature to download images from glance via filesystem is not used and will |
| # be removed in the future. |
| #filesystems = |
| |
| |
| {% if controller.ironic is defined -%} |
| [ironic] |
| # |
| # Configuration options for Ironic driver (Bare Metal). |
| # If using the Ironic driver following options must be set: |
| # * auth_type |
| # * auth_url |
| # * project_name |
| # * username |
| # * password |
| # * project_domain_id or project_domain_name |
| # * user_domain_id or user_domain_name |
| |
| # |
| # From nova.conf |
| # |
| |
| # URL override for the Ironic API endpoint. (string value) |
| api_endpoint={{ controller.ironic.get('protocol', 'http') }}://{{ controller.ironic.host }}:{{ controller.ironic.port }} |
| |
| # |
| # The number of times to retry when a request conflicts. |
| # If set to 0, only try once, no retries. |
| # |
| # Related options: |
| # |
| # * api_retry_interval |
| # (integer value) |
| # Minimum value: 0 |
| #api_max_retries=60 |
| |
| # |
| # The number of seconds to wait before retrying the request. |
| # |
| # Related options: |
| # |
| # * api_max_retries |
| # (integer value) |
| # Minimum value: 0 |
| #api_retry_interval=2 |
| |
| # Timeout (seconds) to wait for node serial console state changed. Set to 0 to |
| # disable timeout. (integer value) |
| # Minimum value: 0 |
| #serial_console_state_timeout=10 |
| |
| # PEM encoded Certificate Authority to use when verifying HTTPs connections. |
| # (string value) |
| {%- if controller.ironic.get('protocol', 'http') == 'https' %} |
| cafile={{ controller.identity.get('cacert_file', controller.cacert_file) }} |
| {%- endif %} |
| |
| # PEM encoded client certificate key file (string value) |
| #keyfile=<None> |
| |
| # Verify HTTPS connections. (boolean value) |
| #insecure=false |
| |
| # Timeout value for http requests (integer value) |
| #timeout=<None> |
| |
| # Authentication type to load (string value) |
| # Deprecated group/name - [ironic]/auth_plugin |
| auth_type={{ controller.ironic.auth_type }} |
| |
| # Config Section from which to load plugin specific options (string value) |
| #auth_section=<None> |
| |
| # Authentication URL (string value) |
| auth_url={{ controller.identity.get('protocol', 'http') }}://{{ controller.identity.host }}:{{ controller.identity.port }}/v3 |
| |
| # Domain ID to scope to (string value) |
| #domain_id=<None> |
| |
| # Domain name to scope to (string value) |
| #domain_name=<None> |
| |
| # Project ID to scope to (string value) |
| #project_id=<None> |
| |
| # Project name to scope to (string value) |
| project_name={{ controller.identity.tenant }} |
| |
| # Domain ID containing project (string value) |
| #project_domain_id=<None> |
| |
| # Domain name containing project (string value) |
| project_domain_name={{ controller.ironic.project_domain_name }} |
| |
| # Trust ID (string value) |
| #trust_id=<None> |
| |
| # User ID (string value) |
| #user_id=<None> |
| |
| # Username (string value) |
| # Deprecated group/name - [ironic]/user-name |
| username={{ controller.ironic.user }} |
| |
| # User's domain id (string value) |
| #user_domain_id=<None> |
| |
| # User's domain name (string value) |
| user_domain_name={{ controller.ironic.user_domain_name }} |
| |
| # User's password (string value) |
| password={{ controller.ironic.password }} |
| {%- endif %} |
| |
| |
| [key_manager] |
| |
| # |
| # From nova.conf |
| # |
| |
| # |
| # Fixed key returned by key manager, specified in hex. |
| # |
| # Possible values: |
| # |
| # * Empty string or a key in hex value |
| # (string value) |
| # Deprecated group/name - [keymgr]/fixed_key |
| #fixed_key=<None> |
| |
| # The full class name of the key manager API class (string value) |
| {%- if controller.get('barbican', {}).get('enabled', False) %} |
| api_class=castellan.key_manager.barbican_key_manager.BarbicanKeyManager |
| {%- endif %} |
| |
| # The type of authentication credential to create. Possible values are 'token', |
| # 'password', 'keystone_token', and 'keystone_password'. Required if no context |
| # is passed to the credential factory. (string value) |
| #auth_type=<None> |
| |
| # Token for authentication. Required for 'token' and 'keystone_token' auth_type |
| # if no context is passed to the credential factory. (string value) |
| #token=<None> |
| |
| # Username for authentication. Required for 'password' auth_type. Optional for |
| # the 'keystone_password' auth_type. (string value) |
| #username=<None> |
| |
| # Password for authentication. Required for 'password' and 'keystone_password' |
| # auth_type. (string value) |
| #password=<None> |
| |
| # User ID for authentication. Optional for 'keystone_token' and |
| # 'keystone_password' auth_type. (string value) |
| #user_id=<None> |
| |
| # User's domain ID for authentication. Optional for 'keystone_token' and |
| # 'keystone_password' auth_type. (string value) |
| #user_domain_id=<None> |
| |
| # User's domain name for authentication. Optional for 'keystone_token' and |
| # 'keystone_password' auth_type. (string value) |
| #user_domain_name=<None> |
| |
| # Trust ID for trust scoping. Optional for 'keystone_token' and |
| # 'keystone_password' auth_type. (string value) |
| #trust_id=<None> |
| |
| # Domain ID for domain scoping. Optional for 'keystone_token' and |
| # 'keystone_password' auth_type. (string value) |
| #domain_id=<None> |
| |
| # Domain name for domain scoping. Optional for 'keystone_token' and |
| # 'keystone_password' auth_type. (string value) |
| #domain_name=<None> |
| |
| # Project ID for project scoping. Optional for 'keystone_token' and |
| # 'keystone_password' auth_type. (string value) |
| #project_id=<None> |
| |
| # Project name for project scoping. Optional for 'keystone_token' and |
| # 'keystone_password' auth_type. (string value) |
| #project_name=<None> |
| |
| # Project's domain ID for project. Optional for 'keystone_token' and |
| # 'keystone_password' auth_type. (string value) |
| #project_domain_id=<None> |
| |
| # Project's domain name for project. Optional for 'keystone_token' and |
| # 'keystone_password' auth_type. (string value) |
| #project_domain_name=<None> |
| |
| # Allow fetching a new token if the current one is going to expire. Optional for |
| # 'keystone_token' and 'keystone_password' auth_type. (boolean value) |
| #reauthenticate=true |
| |
| |
| [keystone_authtoken] |
| |
| # |
| # From keystonemiddleware.auth_token |
| # |
| revocation_cache_time = 10 |
| signing_dir=/tmp/keystone-signing-nova |
| auth_type = password |
| user_domain_id = {{ controller.identity.get('domain', 'default') }} |
| project_domain_id = {{ controller.identity.get('domain', 'default') }} |
| project_name = {{ controller.identity.tenant }} |
| username = {{ controller.identity.user }} |
| password = {{ controller.identity.password }} |
| auth_uri={{ controller.identity.get('protocol', 'http') }}://{{ controller.identity.host }}:5000 |
| auth_url={{ controller.identity.get('protocol', 'http') }}://{{ controller.identity.host }}:35357 |
| {%- if controller.identity.get('protocol', 'http') == 'https' %} |
| cafile={{ controller.identity.get('cacert_file', controller.cacert_file) }} |
| {%- endif %} |
| {%- if controller.cache is defined %} |
| memcached_servers={%- for member in controller.cache.members %}{{ member.host }}:11211{% if not loop.last %},{% endif %}{%- endfor %} |
| {%- if controller.cache.get('security', {}).get('enabled', False) %} |
| memcache_security_strategy = {{ controller.cache.security.get('strategy', 'ENCRYPT') }} |
| {%- if controller.cache.security.secret_key is not defined or not controller.cache.security.secret_key %} |
| {%- do salt.test.exception('controller.cache.security.secret_key is not defined: Please add secret_key') %} |
| {%- else %} |
| memcache_secret_key = {{ controller.cache.security.secret_key }} |
| {%- endif %} |
| {%- endif %} |
| {%- endif %} |
| # Complete "public" Identity API endpoint. This endpoint should not be an |
| # "admin" endpoint, as it should be accessible by all end users. Unauthenticated |
| # clients are redirected to this endpoint to authenticate. Although this |
| # endpoint should ideally be unversioned, client support in the wild varies. |
| # If you're using a versioned v2 endpoint here, then this should *not* be the |
| # same endpoint the service user utilizes for validating tokens, because normal |
| # end users may not be able to reach that endpoint. (string value) |
| #auth_uri=<None> |
| |
| # API version of the admin Identity API endpoint. (string value) |
| #auth_version=<None> |
| |
| # Do not handle authorization requests within the middleware, but delegate the |
| # authorization decision to downstream WSGI components. (boolean value) |
| #delay_auth_decision=false |
| |
| # Request timeout value for communicating with Identity API server. (integer |
| # value) |
| #http_connect_timeout=<None> |
| |
| # How many times are we trying to reconnect when communicating with Identity API |
| # Server. (integer value) |
| #http_request_max_retries=3 |
| |
| # Request environment key where the Swift cache object is stored. When |
| # auth_token middleware is deployed with a Swift cache, use this option to have |
| # the middleware share a caching backend with swift. Otherwise, use the |
| # ``memcached_servers`` option instead. (string value) |
| #cache=<None> |
| |
| # Required if identity server requires client certificate (string value) |
| #certfile=<None> |
| |
| # Required if identity server requires client certificate (string value) |
| #keyfile=<None> |
| |
| # A PEM encoded Certificate Authority to use when verifying HTTPs connections. |
| # Defaults to system CAs. (string value) |
| #cafile=<None> |
| |
| # Verify HTTPS connections. (boolean value) |
| #insecure=false |
| |
| # The region in which the identity server can be found. (string value) |
| #region_name=<None> |
| |
| # DEPRECATED: Directory used to cache files related to PKI tokens. This option |
| # has been deprecated in the Ocata release and will be removed in the P release. |
| # (string value) |
| # This option is deprecated for removal since Ocata. |
| # Its value may be silently ignored in the future. |
| # Reason: PKI token format is no longer supported. |
| #signing_dir=<None> |
| |
| # Optionally specify a list of memcached server(s) to use for caching. If left |
| # undefined, tokens will instead be cached in-process. (list value) |
| # Deprecated group/name - [keystone_authtoken]/memcache_servers |
| #memcached_servers=<None> |
| |
| # In order to prevent excessive effort spent validating tokens, the middleware |
| # caches previously-seen tokens for a configurable duration (in seconds). Set to |
| # -1 to disable caching completely. (integer value) |
| #token_cache_time=300 |
| |
| # DEPRECATED: Determines the frequency at which the list of revoked tokens is |
| # retrieved from the Identity service (in seconds). A high number of revocation |
| # events combined with a low cache duration may significantly reduce |
| # performance. Only valid for PKI tokens. This option has been deprecated in the |
| # Ocata release and will be removed in the P release. (integer value) |
| # This option is deprecated for removal since Ocata. |
| # Its value may be silently ignored in the future. |
| # Reason: PKI token format is no longer supported. |
| #revocation_cache_time=10 |
| |
| # (Optional) If defined, indicate whether token data should be authenticated or |
| # authenticated and encrypted. If MAC, token data is authenticated (with HMAC) |
| # in the cache. If ENCRYPT, token data is encrypted and authenticated in the |
| # cache. If the value is not one of these options or empty, auth_token will |
| # raise an exception on initialization. (string value) |
| # Allowed values: None, MAC, ENCRYPT |
| #memcache_security_strategy=None |
| |
| # (Optional, mandatory if memcache_security_strategy is defined) This string is |
| # used for key derivation. (string value) |
| #memcache_secret_key=<None> |
| |
| # (Optional) Number of seconds memcached server is considered dead before it is |
| # tried again. (integer value) |
| #memcache_pool_dead_retry=300 |
| |
| # (Optional) Maximum total number of open connections to every memcached server. |
| # (integer value) |
| #memcache_pool_maxsize=10 |
| |
| # (Optional) Socket timeout in seconds for communicating with a memcached |
| # server. (integer value) |
| #memcache_pool_socket_timeout=3 |
| |
| # (Optional) Number of seconds a connection to memcached is held unused in the |
| # pool before it is closed. (integer value) |
| #memcache_pool_unused_timeout=60 |
| |
| # (Optional) Number of seconds that an operation will wait to get a memcached |
| # client connection from the pool. (integer value) |
| #memcache_pool_conn_get_timeout=10 |
| |
| # (Optional) Use the advanced (eventlet safe) memcached client pool. The |
| # advanced pool will only work under python 2.x. (boolean value) |
| #memcache_use_advanced_pool=false |
| |
| # (Optional) Indicate whether to set the X-Service-Catalog header. If False, |
| # middleware will not ask for service catalog on token validation and will not |
| # set the X-Service-Catalog header. (boolean value) |
| #include_service_catalog=true |
| |
| # Used to control the use and type of token binding. Can be set to: "disabled" |
| # to not check token binding. "permissive" (default) to validate binding |
| # information if the bind type is of a form known to the server and ignore it if |
| # not. "strict" like "permissive" but if the bind type is unknown the token will |
| # be rejected. "required" any form of token binding is needed to be allowed. |
| # Finally the name of a binding method that must be present in tokens. (string |
| # value) |
| #enforce_token_bind=permissive |
| |
| # DEPRECATED: If true, the revocation list will be checked for cached tokens. |
| # This requires that PKI tokens are configured on the identity server. (boolean |
| # value) |
| # This option is deprecated for removal since Ocata. |
| # Its value may be silently ignored in the future. |
| # Reason: PKI token format is no longer supported. |
| #check_revocations_for_cached=false |
| |
| # DEPRECATED: Hash algorithms to use for hashing PKI tokens. This may be a |
| # single algorithm or multiple. The algorithms are those supported by Python |
| # standard hashlib.new(). The hashes will be tried in the order given, so put |
| # the preferred one first for performance. The result of the first hash will be |
| # stored in the cache. This will typically be set to multiple values only while |
| # migrating from a less secure algorithm to a more secure one. Once all the old |
| # tokens are expired this option should be set to a single value for better |
| # performance. (list value) |
| # This option is deprecated for removal since Ocata. |
| # Its value may be silently ignored in the future. |
| # Reason: PKI token format is no longer supported. |
| #hash_algorithms=md5 |
| |
| # A choice of roles that must be present in a service token. Service tokens are |
| # allowed to request that an expired token can be used and so this check should |
| # tightly control that only actual services should be sending this token. Roles |
| # here are applied as an ANY check so any role in this list must be present. For |
| # backwards compatibility reasons this currently only affects the allow_expired |
| # check. (list value) |
| #service_token_roles=service |
| |
| # For backwards compatibility reasons we must let valid service tokens pass that |
| # don't pass the service_token_roles check as valid. Setting this true will |
| # become the default in a future release and should be enabled if possible. |
| # (boolean value) |
| #service_token_roles_required=false |
| |
| # Prefix to prepend at the beginning of the path. Deprecated, use identity_uri. |
| # (string value) |
| #auth_admin_prefix = |
| |
| # Host providing the admin Identity API endpoint. Deprecated, use identity_uri. |
| # (string value) |
| #auth_host=127.0.0.1 |
| |
| # Port of the admin Identity API endpoint. Deprecated, use identity_uri. |
| # (integer value) |
| #auth_port=35357 |
| |
| # Protocol of the admin Identity API endpoint. Deprecated, use identity_uri. |
| # (string value) |
| # Allowed values: http, https |
| #auth_protocol=https |
| |
| # Complete admin Identity API endpoint. This should specify the unversioned root |
| # endpoint e.g. https://localhost:35357/ (string value) |
| #identity_uri=<None> |
| |
| # This option is deprecated and may be removed in a future release. Single |
| # shared secret with the Keystone configuration used for bootstrapping a |
| # Keystone installation, or otherwise bypassing the normal authentication |
| # process. This option should not be used, use `admin_user` and `admin_password` |
| # instead. (string value) |
| #admin_token=<None> |
| |
| # Service username. (string value) |
| #admin_user=<None> |
| |
| # Service user password. (string value) |
| #admin_password=<None> |
| |
| # Service tenant name. (string value) |
| #admin_tenant_name=admin |
| |
| # Authentication type to load (string value) |
| # Deprecated group/name - [keystone_authtoken]/auth_plugin |
| #auth_type=<None> |
| |
| # Config Section from which to load plugin specific options (string value) |
| #auth_section=<None> |
| |
| |
| [libvirt] |
| # |
| # Libvirt options allows cloud administrator to configure related |
| # libvirt hypervisor driver to be used within an OpenStack deployment. |
| # |
| # Almost all of the libvirt config options are influence by ``virt_type`` config |
| # which describes the virtualization type (or so called domain type) libvirt |
| # should use for specific features such as live migration, snapshot. |
| |
| # |
| # From nova.conf |
| # |
| |
| # |
| # The ID of the image to boot from to rescue data from a corrupted instance. |
| # |
| # If the rescue REST API operation doesn't provide an ID of an image to |
| # use, the image which is referenced by this ID is used. If this |
| # option is not set, the image from the instance is used. |
| # |
| # Possible values: |
| # |
| # * An ID of an image or nothing. If it points to an *Amazon Machine |
| # Image* (AMI), consider to set the config options ``rescue_kernel_id`` |
| # and ``rescue_ramdisk_id`` too. If nothing is set, the image of the instance |
| # is used. |
| # |
| # Related options: |
| # |
| # * ``rescue_kernel_id``: If the chosen rescue image allows the separate |
| # definition of its kernel disk, the value of this option is used, |
| # if specified. This is the case when *Amazon*'s AMI/AKI/ARI image |
| # format is used for the rescue image. |
| # * ``rescue_ramdisk_id``: If the chosen rescue image allows the separate |
| # definition of its RAM disk, the value of this option is used if, |
| # specified. This is the case when *Amazon*'s AMI/AKI/ARI image |
| # format is used for the rescue image. |
| # (string value) |
| #rescue_image_id=<None> |
| |
| # |
| # The ID of the kernel (AKI) image to use with the rescue image. |
| # |
| # If the chosen rescue image allows the separate definition of its kernel |
| # disk, the value of this option is used, if specified. This is the case |
| # when *Amazon*'s AMI/AKI/ARI image format is used for the rescue image. |
| # |
| # Possible values: |
| # |
| # * An ID of an kernel image or nothing. If nothing is specified, the kernel |
| # disk from the instance is used if it was launched with one. |
| # |
| # Related options: |
| # |
| # * ``rescue_image_id``: If that option points to an image in *Amazon*'s |
| # AMI/AKI/ARI image format, it's useful to use ``rescue_kernel_id`` too. |
| # (string value) |
| #rescue_kernel_id=<None> |
| |
| # |
| # The ID of the RAM disk (ARI) image to use with the rescue image. |
| # |
| # If the chosen rescue image allows the separate definition of its RAM |
| # disk, the value of this option is used, if specified. This is the case |
| # when *Amazon*'s AMI/AKI/ARI image format is used for the rescue image. |
| # |
| # Possible values: |
| # |
| # * An ID of a RAM disk image or nothing. If nothing is specified, the RAM |
| # disk from the instance is used if it was launched with one. |
| # |
| # Related options: |
| # |
| # * ``rescue_image_id``: If that option points to an image in *Amazon*'s |
| # AMI/AKI/ARI image format, it's useful to use ``rescue_ramdisk_id`` too. |
| # (string value) |
| #rescue_ramdisk_id=<None> |
| |
| # |
| # Describes the virtualization type (or so called domain type) libvirt should |
| # use. |
| # |
| # The choice of this type must match the underlying virtualization strategy |
| # you have chosen for this host. |
| # |
| # Possible values: |
| # |
| # * See the predefined set of case-sensitive values. |
| # |
| # Related options: |
| # |
| # * ``connection_uri``: depends on this |
| # * ``disk_prefix``: depends on this |
| # * ``cpu_mode``: depends on this |
| # * ``cpu_model``: depends on this |
| # (string value) |
| # Allowed values: kvm, lxc, qemu, uml, xen, parallels |
| #virt_type=kvm |
| virt_type=kvm |
| |
| # |
| # Overrides the default libvirt URI of the chosen virtualization type. |
| # |
| # If set, Nova will use this URI to connect to libvirt. |
| # |
| # Possible values: |
| # |
| # * An URI like ``qemu:///system`` or ``xen+ssh://oirase/`` for example. |
| # This is only necessary if the URI differs to the commonly known URIs |
| # for the chosen virtualization type. |
| # |
| # Related options: |
| # |
| # * ``virt_type``: Influences what is used as default value here. |
| # (string value) |
| #connection_uri = |
| |
| # |
| # Allow the injection of an admin password for instance only at ``create`` and |
| # ``rebuild`` process. |
| # |
| # There is no agent needed within the image to do this. If *libguestfs* is |
| # available on the host, it will be used. Otherwise *nbd* is used. The file |
| # system of the image will be mounted and the admin password, which is provided |
| # in the REST API call will be injected as password for the root user. If no |
| # root user is available, the instance won't be launched and an error is thrown. |
| # Be aware that the injection is *not* possible when the instance gets launched |
| # from a volume. |
| # |
| # Possible values: |
| # |
| # * True: Allows the injection. |
| # * False (default): Disallows the injection. Any via the REST API provided |
| # admin password will be silently ignored. |
| # |
| # Related options: |
| # |
| # * ``inject_partition``: That option will decide about the discovery and usage |
| # of the file system. It also can disable the injection at all. |
| # (boolean value) |
| #inject_password=false |
| |
| # |
| # Allow the injection of an SSH key at boot time. |
| # |
| # There is no agent needed within the image to do this. If *libguestfs* is |
| # available on the host, it will be used. Otherwise *nbd* is used. The file |
| # system of the image will be mounted and the SSH key, which is provided |
| # in the REST API call will be injected as SSH key for the root user and |
| # appended to the ``authorized_keys`` of that user. The SELinux context will |
| # be set if necessary. Be aware that the injection is *not* possible when the |
| # instance gets launched from a volume. |
| # |
| # This config option will enable directly modifying the instance disk and does |
| # not affect what cloud-init may do using data from config_drive option or the |
| # metadata service. |
| # |
| # Related options: |
| # |
| # * ``inject_partition``: That option will decide about the discovery and usage |
| # of the file system. It also can disable the injection at all. |
| # (boolean value) |
| #inject_key=false |
| |
| # |
| # Determines the way how the file system is chosen to inject data into it. |
| # |
| # *libguestfs* will be used a first solution to inject data. If that's not |
| # available on the host, the image will be locally mounted on the host as a |
| # fallback solution. If libguestfs is not able to determine the root partition |
| # (because there are more or less than one root partition) or cannot mount the |
| # file system it will result in an error and the instance won't be boot. |
| # |
| # Possible values: |
| # |
| # * -2 => disable the injection of data. |
| # * -1 => find the root partition with the file system to mount with libguestfs |
| # * 0 => The image is not partitioned |
| # * >0 => The number of the partition to use for the injection |
| # |
| # Related options: |
| # |
| # * ``inject_key``: If this option allows the injection of a SSH key it depends |
| # on value greater or equal to -1 for ``inject_partition``. |
| # * ``inject_password``: If this option allows the injection of an admin |
| # password |
| # it depends on value greater or equal to -1 for ``inject_partition``. |
| # * ``guestfs`` You can enable the debug log level of libguestfs with this |
| # config option. A more verbose output will help in debugging issues. |
| # * ``virt_type``: If you use ``lxc`` as virt_type it will be treated as a |
| # single partition image |
| # (integer value) |
| # Minimum value: -2 |
| #inject_partition=-2 |
| inject_partition = -1 |
| |
| # DEPRECATED: |
| # Enable a mouse cursor within a graphical VNC or SPICE sessions. |
| # |
| # This will only be taken into account if the VM is fully virtualized and VNC |
| # and/or SPICE is enabled. If the node doesn't support a graphical framebuffer, |
| # then it is valid to set this to False. |
| # |
| # Related options: |
| # * ``[vnc]enabled``: If VNC is enabled, ``use_usb_tablet`` will have an effect. |
| # * ``[spice]enabled`` + ``[spice].agent_enabled``: If SPICE is enabled and the |
| # spice agent is disabled, the config value of ``use_usb_tablet`` will have |
| # an effect. |
| # (boolean value) |
| # This option is deprecated for removal since 14.0.0. |
| # Its value may be silently ignored in the future. |
| # Reason: This option is being replaced by the 'pointer_model' option. |
| #use_usb_tablet=true |
| use_usb_tablet=true |
| |
| # |
| # The IP address or hostname to be used as the target for live migration |
| # traffic. |
| # |
| # If this option is set to None, the hostname of the migration target compute |
| # node will be used. |
| # |
| # This option is useful in environments where the live-migration traffic can |
| # impact the network plane significantly. A separate network for live-migration |
| # traffic can then use this config option and avoids the impact on the |
| # management network. |
| # |
| # Possible values: |
| # |
| # * A valid IP address or hostname, else None. |
| # (string value) |
| #live_migration_inbound_addr=<None> |
| |
| # DEPRECATED: |
| # Live migration target URI to use. |
| # |
| # Override the default libvirt live migration target URI (which is dependent |
| # on virt_type). Any included "%s" is replaced with the migration target |
| # hostname. |
| # |
| # If this option is set to None (which is the default), Nova will automatically |
| # generate the `live_migration_uri` value based on only 3 supported `virt_type` |
| # in following list: |
| # * 'kvm': 'qemu+tcp://%s/system' |
| # * 'qemu': 'qemu+tcp://%s/system' |
| # * 'xen': 'xenmigr://%s/system' |
| # |
| # Related options: |
| # * ``live_migration_inbound_addr``: If ``live_migration_inbound_addr`` value |
| # is not None, the ip/hostname address of target compute node is used instead |
| # of ``live_migration_uri`` as the uri for live migration. |
| # * ``live_migration_scheme``: If ``live_migration_uri`` is not set, the scheme |
| # used for live migration is taken from ``live_migration_scheme`` instead. |
| # (string value) |
| # This option is deprecated for removal since 15.0.0. |
| # Its value may be silently ignored in the future. |
| # Reason: |
| # live_migration_uri is deprecated for removal in favor of two other options |
| # that |
| # allow to change live migration scheme and target URI: |
| # ``live_migration_scheme`` |
| # and ``live_migration_inbound_addr`` respectively. |
| #live_migration_uri=<None> |
| |
| # |
| # Schema used for live migration. |
| # |
| # Override the default libvirt live migration scheme (which is dependant on |
| # virt_type). If this option is set to None, nova will automatically choose a |
| # sensible default based on the hypervisor. It is not recommended that you |
| # change |
| # this unless you are very sure that hypervisor supports a particular scheme. |
| # |
| # Related options: |
| # * ``virt_type``: This option is meaningful only when ``virt_type`` is set to |
| # `kvm` or `qemu`. |
| # * ``live_migration_uri``: If ``live_migration_uri`` value is not None, the |
| # scheme used for live migration is taken from ``live_migration_uri`` instead. |
| # (string value) |
| #live_migration_scheme=<None> |
| |
| # |
| # Enable tunnelled migration. |
| # |
| # This option enables the tunnelled migration feature, where migration data is |
| # transported over the libvirtd connection. If enabled, we use the |
| # VIR_MIGRATE_TUNNELLED migration flag, avoiding the need to configure |
| # the network to allow direct hypervisor to hypervisor communication. |
| # If False, use the native transport. If not set, Nova will choose a |
| # sensible default based on, for example the availability of native |
| # encryption support in the hypervisor. Enable this option will definitely |
| # impact performance massively. |
| # |
| # Note that this option is NOT compatible with use of block migration. |
| # |
| # Possible values: |
| # |
| # * Supersedes and (if set) overrides the deprecated 'live_migration_flag' and |
| # 'block_migration_flag' to enable tunneled migration. |
| # (boolean value) |
| #live_migration_tunnelled=false |
| |
| # |
| # Maximum bandwidth(in MiB/s) to be used during migration. |
| # |
| # If set to 0, the hypervisor will choose a suitable default. Some hypervisors |
| # do not support this feature and will return an error if bandwidth is not 0. |
| # Please refer to the libvirt documentation for further details. |
| # (integer value) |
| #live_migration_bandwidth=0 |
| |
| # |
| # Maximum permitted downtime, in milliseconds, for live migration |
| # switchover. |
| # |
| # Will be rounded up to a minimum of 100ms. You can increase this value |
| # if you want to allow live-migrations to complete faster, or avoid |
| # live-migration timeout errors by allowing the guest to be paused for |
| # longer during the live-migration switch over. |
| # |
| # Related options: |
| # |
| # * live_migration_completion_timeout |
| # (integer value) |
| #live_migration_downtime=500 |
| |
| # |
| # Number of incremental steps to reach max downtime value. |
| # |
| # Will be rounded up to a minimum of 3 steps. |
| # (integer value) |
| #live_migration_downtime_steps=10 |
| |
| # |
| # Time to wait, in seconds, between each step increase of the migration |
| # downtime. |
| # |
| # Minimum delay is 10 seconds. Value is per GiB of guest RAM + disk to be |
| # transferred, with lower bound of a minimum of 2 GiB per device. |
| # (integer value) |
| #live_migration_downtime_delay=75 |
| |
| # |
| # Time to wait, in seconds, for migration to successfully complete transferring |
| # data before aborting the operation. |
| # |
| # Value is per GiB of guest RAM + disk to be transferred, with lower bound of |
| # a minimum of 2 GiB. Should usually be larger than downtime delay * downtime |
| # steps. Set to 0 to disable timeouts. |
| # |
| # Related options: |
| # |
| # * live_migration_downtime |
| # * live_migration_downtime_steps |
| # * live_migration_downtime_delay |
| # (integer value) |
| # Note: This option can be changed without restarting. |
| #live_migration_completion_timeout=800 |
| |
| # DEPRECATED: |
| # Time to wait, in seconds, for migration to make forward progress in |
| # transferring data before aborting the operation. |
| # |
| # Set to 0 to disable timeouts. |
| # |
| # This is deprecated, and now disabled by default because we have found serious |
| # bugs in this feature that caused false live-migration timeout failures. This |
| # feature will be removed or replaced in a future release. |
| # (integer value) |
| # Note: This option can be changed without restarting. |
| # This option is deprecated for removal. |
| # Its value may be silently ignored in the future. |
| # Reason: Serious bugs found in this feature. |
| #live_migration_progress_timeout=0 |
| |
| # |
| # This option allows nova to switch an on-going live migration to post-copy |
| # mode, i.e., switch the active VM to the one on the destination node before the |
| # migration is complete, therefore ensuring an upper bound on the memory that |
| # needs to be transferred. Post-copy requires libvirt>=1.3.3 and QEMU>=2.5.0. |
| # |
| # When permitted, post-copy mode will be automatically activated if a |
| # live-migration memory copy iteration does not make percentage increase of at |
| # least 10% over the last iteration. |
| # |
| # The live-migration force complete API also uses post-copy when permitted. If |
| # post-copy mode is not available, force complete falls back to pausing the VM |
| # to ensure the live-migration operation will complete. |
| # |
| # When using post-copy mode, if the source and destination hosts loose network |
| # connectivity, the VM being live-migrated will need to be rebooted. For more |
| # details, please see the Administration guide. |
| # |
| # Related options: |
| # |
| # * live_migration_permit_auto_converge |
| # (boolean value) |
| #live_migration_permit_post_copy=false |
| |
| # |
| # This option allows nova to start live migration with auto converge on. |
| # |
| # Auto converge throttles down CPU if a progress of on-going live migration |
| # is slow. Auto converge will only be used if this flag is set to True and |
| # post copy is not permitted or post copy is unavailable due to the version |
| # of libvirt and QEMU in use. Auto converge requires libvirt>=1.2.3 and |
| # QEMU>=1.6.0. |
| # |
| # Related options: |
| # |
| # * live_migration_permit_post_copy |
| # (boolean value) |
| #live_migration_permit_auto_converge=false |
| {%- if controller.get('libvirt', {}).live_migration_permit_auto_converge is defined %} |
| live_migration_permit_auto_converge={{ controller.libvirt.live_migration_permit_auto_converge|lower }} |
| {%- endif %} |
| |
| # |
| # Determine the snapshot image format when sending to the image service. |
| # |
| # If set, this decides what format is used when sending the snapshot to the |
| # image service. |
| # If not set, defaults to same type as source image. |
| # |
| # Possible values: |
| # |
| # * ``raw``: RAW disk format |
| # * ``qcow2``: KVM default disk format |
| # * ``vmdk``: VMWare default disk format |
| # * ``vdi``: VirtualBox default disk format |
| # * If not set, defaults to same type as source image. |
| # (string value) |
| # Allowed values: raw, qcow2, vmdk, vdi |
| #snapshot_image_format=<None> |
| |
| # |
| # Override the default disk prefix for the devices attached to an instance. |
| # |
| # If set, this is used to identify a free disk device name for a bus. |
| # |
| # Possible values: |
| # |
| # * Any prefix which will result in a valid disk device name like 'sda' or 'hda' |
| # for example. This is only necessary if the device names differ to the |
| # commonly known device name prefixes for a virtualization type such as: sd, |
| # xvd, uvd, vd. |
| # |
| # Related options: |
| # |
| # * ``virt_type``: Influences which device type is used, which determines |
| # the default disk prefix. |
| # (string value) |
| #disk_prefix=<None> |
| |
| # Number of seconds to wait for instance to shut down after soft reboot request |
| # is made. We fall back to hard reboot if instance does not shutdown within this |
| # window. (integer value) |
| #wait_soft_reboot_seconds=120 |
| |
| # |
| # Is used to set the CPU mode an instance should have. |
| # |
| # If virt_type="kvm|qemu", it will default to "host-model", otherwise it will |
| # default to "none". |
| # |
| # Possible values: |
| # |
| # * ``host-model``: Clones the host CPU feature flags. |
| # * ``host-passthrough``: Use the host CPU model exactly; |
| # * ``custom``: Use a named CPU model; |
| # * ``none``: Not set any CPU model. |
| # |
| # Related options: |
| # |
| # * ``cpu_model``: If ``custom`` is used for ``cpu_mode``, set this config |
| # option too, otherwise this would result in an error and the instance won't |
| # be launched. |
| # (string value) |
| # Allowed values: host-model, host-passthrough, custom, none |
| #cpu_mode=<None> |
| cpu_mode=host-passthrough |
| |
| # |
| # Set the name of the libvirt CPU model the instance should use. |
| # |
| # Possible values: |
| # |
| # * The names listed in /usr/share/libvirt/cpu_map.xml |
| # |
| # Related options: |
| # |
| # * ``cpu_mode``: Don't set this when ``cpu_mode`` is NOT set to ``custom``. |
| # This would result in an error and the instance won't be launched. |
| # * ``virt_type``: Only the virtualization types ``kvm`` and ``qemu`` use this. |
| # (string value) |
| #cpu_model=<None> |
| |
| # Location where libvirt driver will store snapshots before uploading them to |
| # image service (string value) |
| #snapshots_directory=$instances_path/snapshots |
| |
| # Location where the Xen hvmloader is kept (string value) |
| #xen_hvmloader_path=/usr/lib/xen/boot/hvmloader |
| |
| # Specific cachemodes to use for different disk types e.g: |
| # file=directsync,block=none (list value) |
| #disk_cachemodes = |
| |
| # A path to a device that will be used as source of entropy on the host. |
| # Permitted options are: /dev/random or /dev/hwrng (string value) |
| #rng_dev_path=<None> |
| |
| # For qemu or KVM guests, set this option to specify a default machine type per |
| # host architecture. You can find a list of supported machine types in your |
| # environment by checking the output of the "virsh capabilities"command. The |
| # format of the value for this config option is host-arch=machine-type. For |
| # example: x86_64=machinetype1,armv7l=machinetype2 (list value) |
| #hw_machine_type=<None> |
| |
| # The data source used to the populate the host "serial" UUID exposed to guest |
| # in the virtual BIOS. (string value) |
| # Allowed values: none, os, hardware, auto |
| #sysinfo_serial=auto |
| |
| # A number of seconds to memory usage statistics period. Zero or negative value |
| # mean to disable memory usage statistics. (integer value) |
| #mem_stats_period_seconds=10 |
| |
| # List of uid targets and ranges.Syntax is guest-uid:host-uid:countMaximum of 5 |
| # allowed. (list value) |
| #uid_maps = |
| |
| # List of guid targets and ranges.Syntax is guest-gid:host-gid:countMaximum of 5 |
| # allowed. (list value) |
| #gid_maps = |
| |
| # In a realtime host context vCPUs for guest will run in that scheduling |
| # priority. Priority depends on the host kernel (usually 1-99) (integer value) |
| #realtime_scheduler_priority=1 |
| |
| # |
| # This is a performance event list which could be used as monitor. These events |
| # will be passed to libvirt domain xml while creating a new instances. |
| # Then event statistics data can be collected from libvirt. The minimum |
| # libvirt version is 2.0.0. For more information about `Performance monitoring |
| # events`, refer https://libvirt.org/formatdomain.html#elementsPerf . |
| # |
| # Possible values: |
| # * A string list. For example: ``enabled_perf_events = cmt, mbml, mbmt`` |
| # The supported events list can be found in |
| # https://libvirt.org/html/libvirt-libvirt-domain.html , |
| # which you may need to search key words ``VIR_PERF_PARAM_*`` |
| # (list value) |
| #enabled_perf_events = |
| |
| # |
| # VM Images format. |
| # |
| # If default is specified, then use_cow_images flag is used instead of this |
| # one. |
| # |
| # Related options: |
| # |
| # * virt.use_cow_images |
| # * images_volume_group |
| # (string value) |
| # Allowed values: raw, flat, qcow2, lvm, rbd, ploop, default |
| #images_type=default |
| |
| # |
| # LVM Volume Group that is used for VM images, when you specify images_type=lvm |
| # |
| # Related options: |
| # |
| # * images_type |
| # (string value) |
| #images_volume_group=<None> |
| |
| # |
| # Create sparse logical volumes (with virtualsize) if this flag is set to True. |
| # (boolean value) |
| #sparse_logical_volumes=false |
| |
| # The RADOS pool in which rbd volumes are stored (string value) |
| #images_rbd_pool=rbd |
| |
| # Path to the ceph configuration file to use (string value) |
| #images_rbd_ceph_conf = |
| |
| # |
| # Discard option for nova managed disks. |
| # |
| # Requires: |
| # |
| # * Libvirt >= 1.0.6 |
| # * Qemu >= 1.5 (raw format) |
| # * Qemu >= 1.6 (qcow2 format) |
| # (string value) |
| # Allowed values: ignore, unmap |
| #hw_disk_discard=<None> |
| |
| # DEPRECATED: Allows image information files to be stored in non-standard |
| # locations (string value) |
| # This option is deprecated for removal since 14.0.0. |
| # Its value may be silently ignored in the future. |
| # Reason: Image info files are no longer used by the image cache |
| #image_info_filename_pattern=$instances_path/$image_cache_subdirectory_name/%(image)s.info |
| |
| # Unused resized base images younger than this will not be removed (integer |
| # value) |
| #remove_unused_resized_minimum_age_seconds=3600 |
| |
| # DEPRECATED: Write a checksum for files in _base to disk (boolean value) |
| # This option is deprecated for removal since 14.0.0. |
| # Its value may be silently ignored in the future. |
| # Reason: The image cache no longer periodically calculates checksums of stored |
| # images. Data integrity can be checked at the block or filesystem level. |
| #checksum_base_images=false |
| |
| # DEPRECATED: How frequently to checksum base images (integer value) |
| # This option is deprecated for removal since 14.0.0. |
| # Its value may be silently ignored in the future. |
| # Reason: The image cache no longer periodically calculates checksums of stored |
| # images. Data integrity can be checked at the block or filesystem level. |
| #checksum_interval_seconds=3600 |
| |
| # |
| # Method used to wipe ephemeral disks when they are deleted. Only takes effect |
| # if LVM is set as backing storage. |
| # |
| # Possible values: |
| # |
| # * none - do not wipe deleted volumes |
| # * zero - overwrite volumes with zeroes |
| # * shred - overwrite volume repeatedly |
| # |
| # Related options: |
| # |
| # * images_type - must be set to ``lvm`` |
| # * volume_clear_size |
| # (string value) |
| # Allowed values: none, zero, shred |
| #volume_clear=zero |
| |
| # |
| # Size of area in MiB, counting from the beginning of the allocated volume, |
| # that will be cleared using method set in ``volume_clear`` option. |
| # |
| # Possible values: |
| # |
| # * 0 - clear whole volume |
| # * >0 - clear specified amount of MiB |
| # |
| # Related options: |
| # |
| # * images_type - must be set to ``lvm`` |
| # * volume_clear - must be set and the value must be different than ``none`` |
| # for this option to have any impact |
| # (integer value) |
| # Minimum value: 0 |
| #volume_clear_size=0 |
| |
| # |
| # Enable snapshot compression for ``qcow2`` images. |
| # |
| # Note: you can set ``snapshot_image_format`` to ``qcow2`` to force all |
| # snapshots to be in ``qcow2`` format, independently from their original image |
| # type. |
| # |
| # Related options: |
| # |
| # * snapshot_image_format |
| # (boolean value) |
| #snapshot_compression=false |
| |
| # Use virtio for bridge interfaces with KVM/QEMU (boolean value) |
| #use_virtio_for_bridges=true |
| use_virtio_for_bridges=true |
| |
| # |
| # Protocols listed here will be accessed directly from QEMU. |
| # |
| # If gluster is present in qemu_allowed_storage_drivers, glusterfs's backend |
| # will |
| # pass a disk configuration to QEMU. This allows QEMU to access the volume using |
| # libgfapi rather than mounting GlusterFS via fuse. |
| # |
| # Possible values: |
| # |
| # * [gluster] |
| # (list value) |
| #qemu_allowed_storage_drivers = |
| |
| # |
| # Use multipath connection of the iSCSI or FC volume |
| # |
| # Volumes can be connected in the LibVirt as multipath devices. This will |
| # provide high availability and fault tolerance. |
| # (boolean value) |
| # Deprecated group/name - [libvirt]/iscsi_use_multipath |
| #volume_use_multipath=false |
| |
| # |
| # Number of times to rediscover AoE target to find volume. |
| # |
| # Nova provides support for block storage attaching to hosts via AOE (ATA over |
| # Ethernet). This option allows the user to specify the maximum number of retry |
| # attempts that can be made to discover the AoE device. |
| # (integer value) |
| #num_aoe_discover_tries=3 |
| |
| # |
| # Absolute path to the directory where the glusterfs volume is mounted on the |
| # compute node. |
| # (string value) |
| #glusterfs_mount_point_base=$state_path/mnt |
| |
| # |
| # Number of times to scan iSCSI target to find volume. |
| # (integer value) |
| #num_iscsi_scan_tries=5 |
| |
| # |
| # The iSCSI transport iface to use to connect to target in case offload support |
| # is desired. |
| # |
| # Default format is of the form <transport_name>.<hwaddress> where |
| # <transport_name> is one of (be2iscsi, bnx2i, cxgb3i, cxgb4i, qla4xxx, ocs) and |
| # <hwaddress> is the MAC address of the interface and can be generated via the |
| # iscsiadm -m iface command. Do not confuse the iscsi_iface parameter to be |
| # provided here with the actual transport name. |
| # (string value) |
| # Deprecated group/name - [libvirt]/iscsi_transport |
| #iscsi_iface=<None> |
| |
| # |
| # Number of times to scan iSER target to find volume. |
| # |
| # iSER is a server network protocol that extends iSCSI protocol to use Remote |
| # Direct Memory Access (RDMA). This option allows the user to specify the |
| # maximum |
| # number of scan attempts that can be made to find iSER volume. |
| # (integer value) |
| #num_iser_scan_tries=5 |
| |
| # |
| # Use multipath connection of the iSER volume. |
| # |
| # iSER volumes can be connected as multipath devices. This will provide high |
| # availability and fault tolerance. |
| # (boolean value) |
| #iser_use_multipath=false |
| |
| # |
| # The RADOS client name for accessing rbd(RADOS Block Devices) volumes. |
| # |
| # Libvirt will refer to this user when connecting and authenticating with |
| # the Ceph RBD server. |
| # (string value) |
| #rbd_user=<None> |
| |
| # |
| # The libvirt UUID of the secret for the rbd_user volumes. |
| # (string value) |
| #rbd_secret_uuid=<None> |
| |
| # |
| # Directory where the NFS volume is mounted on the compute node. |
| # The default is 'mnt' directory of the location where nova's Python module |
| # is installed. |
| # |
| # NFS provides shared storage for the OpenStack Block Storage service. |
| # |
| # Possible values: |
| # |
| # * A string representing absolute path of mount point. |
| # (string value) |
| #nfs_mount_point_base=$state_path/mnt |
| |
| # |
| # Mount options passed to the NFS client. See section of the nfs man page |
| # for details. |
| # |
| # Mount options controls the way the filesystem is mounted and how the |
| # NFS client behaves when accessing files on this mount point. |
| # |
| # Possible values: |
| # |
| # * Any string representing mount options separated by commas. |
| # * Example string: vers=3,lookupcache=pos |
| # (string value) |
| {%- if controller.nfs_mount_options is defined %} |
| nfs_mount_options="{{ controller.nfs_mount_options }}" |
| {%- endif %} |
| |
| # |
| # Directory where the Quobyte volume is mounted on the compute node. |
| # |
| # Nova supports Quobyte volume driver that enables storing Block Storage |
| # service volumes on a Quobyte storage back end. This Option sepcifies the |
| # path of the directory where Quobyte volume is mounted. |
| # |
| # Possible values: |
| # |
| # * A string representing absolute path of mount point. |
| # (string value) |
| #quobyte_mount_point_base=$state_path/mnt |
| |
| # Path to a Quobyte Client configuration file. (string value) |
| #quobyte_client_cfg=<None> |
| |
| # |
| # Path or URL to Scality SOFS(Scale-Out File Server) configuration file. |
| # |
| # The Scality SOFS provides OpenStack users the option of storing their |
| # data on a high capacity, replicated, highly available Scality Ring object |
| # storage cluster. |
| # (string value) |
| #scality_sofs_config=<None> |
| |
| # |
| # Base dir where Scality SOFS shall be mounted. |
| # |
| # The Scality volume driver in Nova mounts SOFS and lets the hypervisor access |
| # the volumes. |
| # |
| # Possible values: |
| # |
| # * $state_path/scality where state_path is a config option that specifies |
| # the top-level directory for maintaining nova's state or Any string |
| # containing the full directory path. |
| # (string value) |
| #scality_sofs_mount_point=$state_path/scality |
| |
| # |
| # Directory where the SMBFS shares are mounted on the compute node. |
| # (string value) |
| #smbfs_mount_point_base=$state_path/mnt |
| |
| # |
| # Mount options passed to the SMBFS client. |
| # |
| # Provide SMBFS options as a single string containing all parameters. |
| # See mount.cifs man page for details. Note that the libvirt-qemu ``uid`` |
| # and ``gid`` must be specified. |
| # (string value) |
| #smbfs_mount_options = |
| |
| # |
| # libvirt's transport method for remote file operations. |
| # |
| # Because libvirt cannot use RPC to copy files over network to/from other |
| # compute nodes, other method must be used for: |
| # |
| # * creating directory on remote host |
| # * creating file on remote host |
| # * removing file from remote host |
| # * copying file to remote host |
| # (string value) |
| # Allowed values: ssh, rsync |
| #remote_filesystem_transport=ssh |
| |
| # |
| # Directory where the Virtuozzo Storage clusters are mounted on the compute |
| # node. |
| # |
| # This option defines non-standard mountpoint for Vzstorage cluster. |
| # |
| # Related options: |
| # |
| # * vzstorage_mount_* group of parameters |
| # (string value) |
| #vzstorage_mount_point_base=$state_path/mnt |
| |
| # |
| # Mount owner user name. |
| # |
| # This option defines the owner user of Vzstorage cluster mountpoint. |
| # |
| # Related options: |
| # |
| # * vzstorage_mount_* group of parameters |
| # (string value) |
| #vzstorage_mount_user=stack |
| |
| # |
| # Mount owner group name. |
| # |
| # This option defines the owner group of Vzstorage cluster mountpoint. |
| # |
| # Related options: |
| # |
| # * vzstorage_mount_* group of parameters |
| # (string value) |
| #vzstorage_mount_group=qemu |
| |
| # |
| # Mount access mode. |
| # |
| # This option defines the access bits of Vzstorage cluster mountpoint, |
| # in the format similar to one of chmod(1) utility, like this: 0770. |
| # It consists of one to four digits ranging from 0 to 7, with missing |
| # lead digits assumed to be 0's. |
| # |
| # Related options: |
| # |
| # * vzstorage_mount_* group of parameters |
| # (string value) |
| #vzstorage_mount_perms=0770 |
| |
| # |
| # Path to vzstorage client log. |
| # |
| # This option defines the log of cluster operations, |
| # it should include "%(cluster_name)s" template to separate |
| # logs from multiple shares. |
| # |
| # Related options: |
| # |
| # * vzstorage_mount_opts may include more detailed logging options. |
| # (string value) |
| #vzstorage_log_path=/var/log/pstorage/%(cluster_name)s/nova.log.gz |
| |
| # |
| # Path to the SSD cache file. |
| # |
| # You can attach an SSD drive to a client and configure the drive to store |
| # a local cache of frequently accessed data. By having a local cache on a |
| # client's SSD drive, you can increase the overall cluster performance by |
| # up to 10 and more times. |
| # WARNING! There is a lot of SSD models which are not server grade and |
| # may loose arbitrary set of data changes on power loss. |
| # Such SSDs should not be used in Vstorage and are dangerous as may lead |
| # to data corruptions and inconsistencies. Please consult with the manual |
| # on which SSD models are known to be safe or verify it using |
| # vstorage-hwflush-check(1) utility. |
| # |
| # This option defines the path which should include "%(cluster_name)s" |
| # template to separate caches from multiple shares. |
| # |
| # Related options: |
| # |
| # * vzstorage_mount_opts may include more detailed cache options. |
| # (string value) |
| #vzstorage_cache_path=<None> |
| |
| # |
| # Extra mount options for pstorage-mount |
| # |
| # For full description of them, see |
| # https://static.openvz.org/vz-man/man1/pstorage-mount.1.gz.html |
| # Format is a python string representation of arguments list, like: |
| # "['-v', '-R', '500']" |
| # Shouldn't include -c, -l, -C, -u, -g and -m as those have |
| # explicit vzstorage_* options. |
| # |
| # Related options: |
| # |
| # * All other vzstorage_* options |
| # (list value) |
| #vzstorage_mount_opts = |
| |
| |
| [matchmaker_redis] |
| |
| # |
| # From oslo.messaging |
| # |
| |
| # DEPRECATED: Host to locate redis. (string value) |
| # This option is deprecated for removal. |
| # Its value may be silently ignored in the future. |
| # Reason: Replaced by [DEFAULT]/transport_url |
| #host=127.0.0.1 |
| |
| # DEPRECATED: Use this port to connect to redis host. (port value) |
| # Minimum value: 0 |
| # Maximum value: 65535 |
| # This option is deprecated for removal. |
| # Its value may be silently ignored in the future. |
| # Reason: Replaced by [DEFAULT]/transport_url |
| #port=6379 |
| |
| # DEPRECATED: Password for Redis server (optional). (string value) |
| # This option is deprecated for removal. |
| # Its value may be silently ignored in the future. |
| # Reason: Replaced by [DEFAULT]/transport_url |
| #password = |
| |
| # DEPRECATED: List of Redis Sentinel hosts (fault tolerance mode), e.g., |
| # [host:port, host1:port ... ] (list value) |
| # This option is deprecated for removal. |
| # Its value may be silently ignored in the future. |
| # Reason: Replaced by [DEFAULT]/transport_url |
| #sentinel_hosts = |
| |
| # Redis replica set name. (string value) |
| #sentinel_group_name=oslo-messaging-zeromq |
| |
| # Time in ms to wait between connection attempts. (integer value) |
| #wait_timeout=2000 |
| |
| # Time in ms to wait before the transaction is killed. (integer value) |
| #check_timeout=20000 |
| |
| # Timeout in ms on blocking socket operations. (integer value) |
| #socket_timeout=10000 |
| |
| |
| [metrics] |
| # |
| # Configuration options for metrics |
| # |
| # Options under this group allow to adjust how values assigned to metrics are |
| # calculated. |
| |
| # |
| # From nova.conf |
| # |
| |
| # |
| # When using metrics to weight the suitability of a host, you can use this |
| # option |
| # to change how the calculated weight influences the weight assigned to a host |
| # as |
| # follows: |
| # |
| # * >1.0: increases the effect of the metric on overall weight |
| # * 1.0: no change to the calculated weight |
| # * >0.0,<1.0: reduces the effect of the metric on overall weight |
| # * 0.0: the metric value is ignored, and the value of the |
| # 'weight_of_unavailable' option is returned instead |
| # * >-1.0,<0.0: the effect is reduced and reversed |
| # * -1.0: the effect is reversed |
| # * <-1.0: the effect is increased proportionally and reversed |
| # |
| # This option is only used by the FilterScheduler and its subclasses; if you use |
| # a different scheduler, this option has no effect. |
| # |
| # Possible values: |
| # |
| # * An integer or float value, where the value corresponds to the multipler |
| # ratio for this weigher. |
| # |
| # Related options: |
| # |
| # * weight_of_unavailable |
| # (floating point value) |
| #weight_multiplier=1.0 |
| |
| # |
| # This setting specifies the metrics to be weighed and the relative ratios for |
| # each metric. This should be a single string value, consisting of a series of |
| # one or more 'name=ratio' pairs, separated by commas, where 'name' is the name |
| # of the metric to be weighed, and 'ratio' is the relative weight for that |
| # metric. |
| # |
| # Note that if the ratio is set to 0, the metric value is ignored, and instead |
| # the weight will be set to the value of the 'weight_of_unavailable' option. |
| # |
| # As an example, let's consider the case where this option is set to: |
| # |
| # ``name1=1.0, name2=-1.3`` |
| # |
| # The final weight will be: |
| # |
| # ``(name1.value * 1.0) + (name2.value * -1.3)`` |
| # |
| # This option is only used by the FilterScheduler and its subclasses; if you use |
| # a different scheduler, this option has no effect. |
| # |
| # Possible values: |
| # |
| # * A list of zero or more key/value pairs separated by commas, where the key is |
| # a string representing the name of a metric and the value is a numeric weight |
| # for that metric. If any value is set to 0, the value is ignored and the |
| # weight will be set to the value of the 'weight_of_unavailable' option. |
| # |
| # Related options: |
| # |
| # * weight_of_unavailable |
| # (list value) |
| #weight_setting = |
| |
| # |
| # This setting determines how any unavailable metrics are treated. If this |
| # option |
| # is set to True, any hosts for which a metric is unavailable will raise an |
| # exception, so it is recommended to also use the MetricFilter to filter out |
| # those hosts before weighing. |
| # |
| # This option is only used by the FilterScheduler and its subclasses; if you use |
| # a different scheduler, this option has no effect. |
| # |
| # Possible values: |
| # |
| # * True or False, where False ensures any metric being unavailable for a host |
| # will set the host weight to 'weight_of_unavailable'. |
| # |
| # Related options: |
| # |
| # * weight_of_unavailable |
| # (boolean value) |
| #required=true |
| |
| # |
| # When any of the following conditions are met, this value will be used in place |
| # of any actual metric value: |
| # |
| # * One of the metrics named in 'weight_setting' is not available for a host, |
| # and the value of 'required' is False |
| # * The ratio specified for a metric in 'weight_setting' is 0 |
| # * The 'weight_multiplier' option is set to 0 |
| # |
| # This option is only used by the FilterScheduler and its subclasses; if you use |
| # a different scheduler, this option has no effect. |
| # |
| # Possible values: |
| # |
| # * An integer or float value, where the value corresponds to the multipler |
| # ratio for this weigher. |
| # |
| # Related options: |
| # |
| # * weight_setting |
| # * required |
| # * weight_multiplier |
| # (floating point value) |
| #weight_of_unavailable=-10000.0 |
| |
| |
| [mks] |
| # |
| # Nova compute node uses WebMKS, a desktop sharing protocol to provide |
| # instance console access to VM's created by VMware hypervisors. |
| # |
| # Related options: |
| # Following options must be set to provide console access. |
| # * mksproxy_base_url |
| # * enabled |
| |
| # |
| # From nova.conf |
| # |
| |
| # |
| # Location of MKS web console proxy |
| # |
| # The URL in the response points to a WebMKS proxy which |
| # starts proxying between client and corresponding vCenter |
| # server where instance runs. In order to use the web based |
| # console access, WebMKS proxy should be installed and configured |
| # |
| # Possible values: |
| # |
| # * Must be a valid URL of the form:``http://host:port/`` |
| # (string value) |
| #mksproxy_base_url=http://127.0.0.1:6090/ |
| |
| # |
| # Enables graphical console access for virtual machines. |
| # (boolean value) |
| #enabled=false |
| |
| |
| [neutron] |
| # |
| # Configuration options for neutron (network connectivity as a service). |
| |
| # |
| # From nova.conf |
| # |
| auth_type=v3password |
| project_domain_name = Default |
| user_domain_name = Default |
| auth_url = {{ controller.identity.get('protocol', 'http') }}://{{ controller.identity.host }}:35357/v3 |
| {% if pillar.neutron is defined and pillar.neutron.server is defined %} |
| password={{ pillar.neutron.server.identity.password }} |
| project_name={{ pillar.neutron.server.identity.tenant }} |
| username={{ pillar.neutron.server.identity.user }} |
| region_name= {{ pillar.neutron.server.identity.region }} |
| {%- else %} |
| password={{ controller.network.password }} |
| project_name={{ controller.network.tenant }} |
| username={{ controller.network.user }} |
| region_name= {{ controller.network.region }} |
| {%- endif %} |
| url={{ controller.network.get('protocol', 'http') }}://{{ controller.network.host }}:{{ controller.network.port }} |
| {%- if controller.network.get('protocol', 'http') == 'https' %} |
| cafile={{ controller.network.get('cacert_file', controller.cacert_file) }} |
| {%- endif %} |
| metadata_proxy_shared_secret={{ controller.metadata.password }} |
| service_metadata_proxy=True |
| # |
| # This option specifies the URL for connecting to Neutron. |
| # |
| # Possible values: |
| # |
| # * Any valid URL that points to the Neutron API service is appropriate here. |
| # This typically matches the URL returned for the 'network' service type |
| # from the Keystone service catalog. |
| # (uri value) |
| #url=http://127.0.0.1:9696 |
| |
| # |
| # Region name for connecting to Neutron in admin context. |
| # |
| # This option is used in multi-region setups. If there are two Neutron |
| # servers running in two regions in two different machines, then two |
| # services need to be created in Keystone with two different regions and |
| # associate corresponding endpoints to those services. When requests are made |
| # to Keystone, the Keystone service uses the region_name to determine the |
| # region the request is coming from. |
| # (string value) |
| #region_name=RegionOne |
| |
| # |
| # Specifies the name of an integration bridge interface used by OpenvSwitch. |
| # This option is used only if Neutron does not specify the OVS bridge name. |
| # |
| # Possible values: |
| # |
| # * Any string representing OVS bridge name. |
| # (string value) |
| #ovs_bridge=br-int |
| |
| # |
| # Integer value representing the number of seconds to wait before querying |
| # Neutron for extensions. After this number of seconds the next time Nova |
| # needs to create a resource in Neutron it will requery Neutron for the |
| # extensions that it has loaded. Setting value to 0 will refresh the |
| # extensions with no wait. |
| # (integer value) |
| # Minimum value: 0 |
| #extension_sync_interval=600 |
| |
| # |
| # When set to True, this option indicates that Neutron will be used to proxy |
| # metadata requests and resolve instance ids. Otherwise, the instance ID must be |
| # passed to the metadata request in the 'X-Instance-ID' header. |
| # |
| # Related options: |
| # |
| # * metadata_proxy_shared_secret |
| # (boolean value) |
| #service_metadata_proxy=false |
| |
| # |
| # This option holds the shared secret string used to validate proxy requests to |
| # Neutron metadata requests. In order to be used, the |
| # 'X-Metadata-Provider-Signature' header must be supplied in the request. |
| # |
| # Related options: |
| # |
| # * service_metadata_proxy |
| # (string value) |
| #metadata_proxy_shared_secret = |
| |
| # PEM encoded Certificate Authority to use when verifying HTTPs connections. |
| # (string value) |
| #cafile=<None> |
| |
| # PEM encoded client certificate cert file (string value) |
| #certfile=<None> |
| |
| # PEM encoded client certificate key file (string value) |
| #keyfile=<None> |
| |
| # Verify HTTPS connections. (boolean value) |
| #insecure=false |
| |
| # Timeout value for http requests (integer value) |
| #timeout=<None> |
| |
| # Authentication type to load (string value) |
| # Deprecated group/name - [neutron]/auth_plugin |
| #auth_type=<None> |
| |
| # Config Section from which to load plugin specific options (string value) |
| #auth_section=<None> |
| |
| # Authentication URL (string value) |
| #auth_url=<None> |
| |
| # Domain ID to scope to (string value) |
| #domain_id=<None> |
| |
| # Domain name to scope to (string value) |
| #domain_name=<None> |
| |
| # Project ID to scope to (string value) |
| #project_id=<None> |
| |
| # Project name to scope to (string value) |
| #project_name=<None> |
| |
| # Domain ID containing project (string value) |
| #project_domain_id=<None> |
| |
| # Domain name containing project (string value) |
| #project_domain_name=<None> |
| |
| # Trust ID (string value) |
| #trust_id=<None> |
| |
| # Optional domain ID to use with v3 and v2 parameters. It will be used for both |
| # the user and project domain in v3 and ignored in v2 authentication. (string |
| # value) |
| #default_domain_id=<None> |
| |
| # Optional domain name to use with v3 API and v2 parameters. It will be used for |
| # both the user and project domain in v3 and ignored in v2 authentication. |
| # (string value) |
| #default_domain_name=<None> |
| |
| # User ID (string value) |
| #user_id=<None> |
| |
| # Username (string value) |
| # Deprecated group/name - [neutron]/user-name |
| #username=<None> |
| |
| # User's domain id (string value) |
| #user_domain_id=<None> |
| |
| # User's domain name (string value) |
| #user_domain_name=<None> |
| |
| # User's password (string value) |
| #password=<None> |
| |
| # Tenant ID (string value) |
| #tenant_id=<None> |
| |
| # Tenant Name (string value) |
| #tenant_name=<None> |
| |
| |
| [notifications] |
| # |
| # Most of the actions in Nova which manipulate the system state generate |
| # notifications which are posted to the messaging component (e.g. RabbitMQ) and |
| # can be consumed by any service outside the Openstack. More technical details |
| # at http://docs.openstack.org/developer/nova/notifications.html |
| |
| # |
| # From nova.conf |
| # |
| |
| # |
| # If set, send compute.instance.update notifications on instance state |
| # changes. |
| # |
| # Please refer to https://wiki.openstack.org/wiki/SystemUsageData for |
| # additional information on notifications. |
| # |
| # Possible values: |
| # |
| # * None - no notifications |
| # * "vm_state" - notifications on VM state changes |
| # * "vm_and_task_state" - notifications on VM and task state changes |
| # (string value) |
| # Allowed values: <None>, vm_state, vm_and_task_state |
| # Deprecated group/name - [DEFAULT]/notify_on_state_change |
| #notify_on_state_change=<None> |
| {%- if controller.get('notification', {}).notify_on is defined %} |
| {%- for key, value in controller.notification.notify_on.iteritems() %} |
| notify_on_{{ key }} = {{ value }} |
| {%- endfor %} |
| {%- endif %} |
| |
| # |
| # If enabled, send api.fault notifications on caught exceptions in the |
| # API service. |
| # (boolean value) |
| # Deprecated group/name - [DEFAULT]/notify_api_faults |
| #notify_on_api_faults=false |
| notify_on_api_faults=false |
| |
| # Default notification level for outgoing notifications. (string value) |
| # Allowed values: DEBUG, INFO, WARN, ERROR, CRITICAL |
| # Deprecated group/name - [DEFAULT]/default_notification_level |
| #default_level=INFO |
| |
| # |
| # Default publisher_id for outgoing notifications. If you consider routing |
| # notifications using different publisher, change this value accordingly. |
| # |
| # Possible values: |
| # |
| # * Defaults to the IPv4 address of this host, but it can be any valid |
| # oslo.messaging publisher_id |
| # |
| # Related options: |
| # |
| # * my_ip - IP address of this host |
| # (string value) |
| # Deprecated group/name - [DEFAULT]/default_publisher_id |
| #default_publisher_id=$my_ip |
| |
| # |
| # Specifies which notification format shall be used by nova. |
| # |
| # The default value is fine for most deployments and rarely needs to be changed. |
| # This value can be set to 'versioned' once the infrastructure moves closer to |
| # consuming the newer format of notifications. After this occurs, this option |
| # will be removed (possibly in the "P" release). |
| # |
| # Possible values: |
| # * unversioned: Only the legacy unversioned notifications are emitted. |
| # * versioned: Only the new versioned notifications are emitted. |
| # * both: Both the legacy unversioned and the new versioned notifications are |
| # emitted. (Default) |
| # |
| # The list of versioned notifications is visible in |
| # http://docs.openstack.org/developer/nova/notifications.html |
| # (string value) |
| # Allowed values: unversioned, versioned, both |
| # Deprecated group/name - [DEFAULT]/notification_format |
| #notification_format=both |
| |
| |
| [osapi_v21] |
| |
| # |
| # From nova.conf |
| # |
| |
| # DEPRECATED: |
| # This option is a list of all of the v2.1 API extensions to never load. |
| # |
| # Possible values: |
| # |
| # * A list of strings, each being the alias of an extension that you do not |
| # wish to load. |
| # |
| # Related options: |
| # |
| # * enabled |
| # * extensions_whitelist |
| # (list value) |
| # This option is deprecated for removal since 12.0.0. |
| # Its value may be silently ignored in the future. |
| # Reason: |
| # API extensions are now part of the standard API. API extensions should be |
| # disabled using policy, rather than via these configuration options. |
| #extensions_blacklist = |
| |
| # DEPRECATED: |
| # This is a list of extensions. If it is empty, then *all* extensions except |
| # those specified in the extensions_blacklist option will be loaded. If it is |
| # not |
| # empty, then only those extensions in this list will be loaded, provided that |
| # they are also not in the extensions_blacklist option. |
| # |
| # Possible values: |
| # |
| # * A list of strings, each being the alias of an extension that you wish to |
| # load, or an empty list, which indicates that all extensions are to be run. |
| # |
| # Related options: |
| # |
| # * enabled |
| # * extensions_blacklist |
| # (list value) |
| # This option is deprecated for removal since 12.0.0. |
| # Its value may be silently ignored in the future. |
| # Reason: |
| # API extensions are now part of the standard API. API extensions should be |
| # disabled using policy, rather than via these configuration options. |
| #extensions_whitelist = |
| |
| # DEPRECATED: |
| # This option is a string representing a regular expression (regex) that matches |
| # the project_id as contained in URLs. If not set, it will match normal UUIDs |
| # created by keystone. |
| # |
| # Possible values: |
| # |
| # * A string representing any legal regular expression |
| # (string value) |
| # This option is deprecated for removal since 13.0.0. |
| # Its value may be silently ignored in the future. |
| # Reason: |
| # Recent versions of nova constrain project IDs to hexadecimal characters and |
| # dashes. If your installation uses IDs outside of this range, you should use |
| # this option to provide your own regex and give you time to migrate offending |
| # projects to valid IDs before the next release. |
| #project_id_regex=<None> |
| |
| |
| [oslo_concurrency] |
| |
| # |
| # From oslo.concurrency |
| # |
| |
| # Enables or disables inter-process locks. (boolean value) |
| # Deprecated group/name - [DEFAULT]/disable_process_locking |
| #disable_process_locking=false |
| |
| # Directory to use for lock files. For security, the specified directory should |
| # only be writable by the user running the processes that need locking. Defaults |
| # to environment variable OSLO_LOCK_PATH. If OSLO_LOCK_PATH is not set in the |
| # environment, use the Python tempfile.gettempdir function to find a suitable |
| # location. If external locks are used, a lock path must be set. (string value) |
| # Deprecated group/name - [DEFAULT]/lock_path |
| lock_path=/var/lib/nova/tmp |
| |
| |
| [oslo_messaging_amqp] |
| |
| # |
| # From oslo.messaging |
| # |
| |
| # Name for the AMQP container. must be globally unique. Defaults to a generated |
| # UUID (string value) |
| # Deprecated group/name - [amqp1]/container_name |
| #container_name=<None> |
| |
| # Timeout for inactive connections (in seconds) (integer value) |
| # Deprecated group/name - [amqp1]/idle_timeout |
| #idle_timeout=0 |
| |
| # Debug: dump AMQP frames to stdout (boolean value) |
| # Deprecated group/name - [amqp1]/trace |
| #trace=false |
| |
| # CA certificate PEM file used to verify the server's certificate (string value) |
| # Deprecated group/name - [amqp1]/ssl_ca_file |
| #ssl_ca_file = |
| |
| # Self-identifying certificate PEM file for client authentication (string value) |
| # Deprecated group/name - [amqp1]/ssl_cert_file |
| #ssl_cert_file = |
| |
| # Private key PEM file used to sign ssl_cert_file certificate (optional) (string |
| # value) |
| # Deprecated group/name - [amqp1]/ssl_key_file |
| #ssl_key_file = |
| |
| # Password for decrypting ssl_key_file (if encrypted) (string value) |
| # Deprecated group/name - [amqp1]/ssl_key_password |
| #ssl_key_password=<None> |
| |
| # DEPRECATED: Accept clients using either SSL or plain TCP (boolean value) |
| # Deprecated group/name - [amqp1]/allow_insecure_clients |
| # This option is deprecated for removal. |
| # Its value may be silently ignored in the future. |
| # Reason: Not applicable - not a SSL server |
| #allow_insecure_clients=false |
| |
| # Space separated list of acceptable SASL mechanisms (string value) |
| # Deprecated group/name - [amqp1]/sasl_mechanisms |
| #sasl_mechanisms = |
| |
| # Path to directory that contains the SASL configuration (string value) |
| # Deprecated group/name - [amqp1]/sasl_config_dir |
| #sasl_config_dir = |
| |
| # Name of configuration file (without .conf suffix) (string value) |
| # Deprecated group/name - [amqp1]/sasl_config_name |
| #sasl_config_name = |
| |
| # User name for message broker authentication (string value) |
| # Deprecated group/name - [amqp1]/username |
| #username = |
| |
| # Password for message broker authentication (string value) |
| # Deprecated group/name - [amqp1]/password |
| #password = |
| |
| # Seconds to pause before attempting to re-connect. (integer value) |
| # Minimum value: 1 |
| #connection_retry_interval=1 |
| {%- if controller.message_queue.connection_retry_interval is defined %} |
| connection_retry_interval = {{ controller.message_queue.connection_retry_interval }} |
| {%- endif %} |
| |
| |
| # Increase the connection_retry_interval by this many seconds after each |
| # unsuccessful failover attempt. (integer value) |
| # Minimum value: 0 |
| #connection_retry_backoff=2 |
| {%- if controller.message_queue.connection_retry_backoff is defined %} |
| connection_retry_backoff = {{ controller.message_queue.connection_retry_backoff }} |
| {%- endif %} |
| |
| # Maximum limit for connection_retry_interval + connection_retry_backoff |
| # (integer value) |
| # Minimum value: 1 |
| #connection_retry_interval_max=30 |
| {%- if controller.message_queue.connection_retry_interval_max is defined %} |
| connection_retry_interval_max = {{ controller.message_queue.connection_retry_interval_max }} |
| {%- endif %} |
| |
| # Time to pause between re-connecting an AMQP 1.0 link that failed due to a |
| # recoverable error. (integer value) |
| # Minimum value: 1 |
| #link_retry_delay=10 |
| {%- if controller.message_queue.link_retry_delay is defined %} |
| link_retry_delay = {{ controller.message_queue.link_retry_delay }} |
| {%- endif %} |
| |
| # The maximum number of attempts to re-send a reply message which failed due to |
| # a recoverable error. (integer value) |
| # Minimum value: -1 |
| #default_reply_retry=0 |
| {%- if controller.message_queue.default_reply_retry is defined %} |
| default_reply_retry = {{ controller.message_queue.default_reply_retry }} |
| {%- endif %} |
| |
| # The deadline for an rpc reply message delivery. (integer value) |
| # Minimum value: 5 |
| #default_reply_timeout=30 |
| {%- if controller.message_queue.default_reply_timeout is defined %} |
| default_reply_timeout = {{ controller.message_queue.default_reply_timeout }} |
| {%- endif %} |
| |
| # The deadline for an rpc cast or call message delivery. Only used when caller |
| # does not provide a timeout expiry. (integer value) |
| # Minimum value: 5 |
| #default_send_timeout=30 |
| {%- if controller.message_queue.default_send_timeout is defined %} |
| default_send_timeout = {{ controller.message_queue.default_send_timeout }} |
| {%- endif %} |
| |
| # The deadline for a sent notification message delivery. Only used when caller |
| # does not provide a timeout expiry. (integer value) |
| # Minimum value: 5 |
| #default_notify_timeout=30 |
| {%- if controller.message_queue.default_notify_timeout is defined %} |
| default_notify_timeout = {{ controller.message_queue.default_notify_timeout }} |
| {%- endif %} |
| |
| # The duration to schedule a purge of idle sender links. Detach link after |
| # expiry. (integer value) |
| # Minimum value: 1 |
| #default_sender_link_timeout=600 |
| {%- if controller.message_queue.default_sender_link_timeout is defined %} |
| default_sender_link_timeout = {{ controller.message_queue.default_sender_link_timeout }} |
| {%- endif %} |
| |
| # Indicates the addressing mode used by the driver. |
| # Permitted values: |
| # 'legacy' - use legacy non-routable addressing |
| # 'routable' - use routable addresses |
| # 'dynamic' - use legacy addresses if the message bus does not support routing |
| # otherwise use routable addressing (string value) |
| #addressing_mode=dynamic |
| |
| # address prefix used when sending to a specific server (string value) |
| # Deprecated group/name - [amqp1]/server_request_prefix |
| #server_request_prefix=exclusive |
| |
| # address prefix used when broadcasting to all servers (string value) |
| # Deprecated group/name - [amqp1]/broadcast_prefix |
| #broadcast_prefix=broadcast |
| |
| # address prefix when sending to any server in group (string value) |
| # Deprecated group/name - [amqp1]/group_request_prefix |
| #group_request_prefix=unicast |
| |
| # Address prefix for all generated RPC addresses (string value) |
| #rpc_address_prefix=openstack.org/om/rpc |
| |
| # Address prefix for all generated Notification addresses (string value) |
| #notify_address_prefix=openstack.org/om/notify |
| |
| # Appended to the address prefix when sending a fanout message. Used by the |
| # message bus to identify fanout messages. (string value) |
| #multicast_address=multicast |
| |
| # Appended to the address prefix when sending to a particular RPC/Notification |
| # server. Used by the message bus to identify messages sent to a single |
| # destination. (string value) |
| #unicast_address=unicast |
| |
| # Appended to the address prefix when sending to a group of consumers. Used by |
| # the message bus to identify messages that should be delivered in a round-robin |
| # fashion across consumers. (string value) |
| #anycast_address=anycast |
| |
| # Exchange name used in notification addresses. |
| # Exchange name resolution precedence: |
| # Target.exchange if set |
| # else default_notification_exchange if set |
| # else control_exchange if set |
| # else 'notify' (string value) |
| #default_notification_exchange=<None> |
| |
| # Exchange name used in RPC addresses. |
| # Exchange name resolution precedence: |
| # Target.exchange if set |
| # else default_rpc_exchange if set |
| # else control_exchange if set |
| # else 'rpc' (string value) |
| #default_rpc_exchange=<None> |
| |
| # Window size for incoming RPC Reply messages. (integer value) |
| # Minimum value: 1 |
| #reply_link_credit=200 |
| |
| # Window size for incoming RPC Request messages (integer value) |
| # Minimum value: 1 |
| #rpc_server_credit=100 |
| |
| # Window size for incoming Notification messages (integer value) |
| # Minimum value: 1 |
| #notify_server_credit=100 |
| |
| # Send messages of this type pre-settled. |
| # Pre-settled messages will not receive acknowledgement |
| # from the peer. Note well: pre-settled messages may be |
| # silently discarded if the delivery fails. |
| # Permitted values: |
| # 'rpc-call' - send RPC Calls pre-settled |
| # 'rpc-reply'- send RPC Replies pre-settled |
| # 'rpc-cast' - Send RPC Casts pre-settled |
| # 'notify' - Send Notifications pre-settled |
| # (multi valued) |
| #pre_settled=rpc-cast |
| #pre_settled=rpc-reply |
| |
| |
| [oslo_messaging_kafka] |
| |
| # |
| # From oslo.messaging |
| # |
| |
| # DEPRECATED: Default Kafka broker Host (string value) |
| # This option is deprecated for removal. |
| # Its value may be silently ignored in the future. |
| # Reason: Replaced by [DEFAULT]/transport_url |
| #kafka_default_host=localhost |
| |
| # DEPRECATED: Default Kafka broker Port (port value) |
| # Minimum value: 0 |
| # Maximum value: 65535 |
| # This option is deprecated for removal. |
| # Its value may be silently ignored in the future. |
| # Reason: Replaced by [DEFAULT]/transport_url |
| #kafka_default_port=9092 |
| |
| # Max fetch bytes of Kafka consumer (integer value) |
| #kafka_max_fetch_bytes=1048576 |
| |
| # Default timeout(s) for Kafka consumers (integer value) |
| #kafka_consumer_timeout=1.0 |
| |
| # Pool Size for Kafka Consumers (integer value) |
| #pool_size=10 |
| |
| # The pool size limit for connections expiration policy (integer value) |
| #conn_pool_min_size=2 |
| |
| # The time-to-live in sec of idle connections in the pool (integer value) |
| #conn_pool_ttl=1200 |
| |
| # Group id for Kafka consumer. Consumers in one group will coordinate message |
| # consumption (string value) |
| #consumer_group=oslo_messaging_consumer |
| |
| # Upper bound on the delay for KafkaProducer batching in seconds (floating point |
| # value) |
| #producer_batch_timeout=0.0 |
| |
| # Size of batch for the producer async send (integer value) |
| #producer_batch_size=16384 |
| |
| |
| [oslo_messaging_notifications] |
| |
| # |
| # From oslo.messaging |
| # |
| |
| # The Drivers(s) to handle sending notifications. Possible values are messaging, |
| # messagingv2, routing, log, test, noop (multi valued) |
| # Deprecated group/name - [DEFAULT]/notification_driver |
| #driver = |
| {%- if controller.notification is mapping %} |
| driver = {{ controller.notification.get('driver', 'messagingv2') }} |
| {%- if controller.notification.topics is defined %} |
| topics = {{ controller.notification.topics }} |
| {%- endif %} |
| {%- elif controller.notification %} |
| driver=messagingv2 |
| {%- endif %} |
| |
| # A URL representing the messaging driver to use for notifications. If not set, |
| # we fall back to the same configuration used for RPC. (string value) |
| # Deprecated group/name - [DEFAULT]/notification_transport_url |
| #transport_url=<None> |
| |
| # AMQP topic used for OpenStack notifications. (list value) |
| # Deprecated group/name - [rpc_notifier2]/topics |
| # Deprecated group/name - [DEFAULT]/notification_topics |
| #topics=notifications |
| |
| |
| [oslo_messaging_rabbit] |
| |
| # |
| # From oslo.messaging |
| # |
| # Use durable queues in AMQP. (boolean value) |
| # Deprecated group/name - [DEFAULT]/amqp_durable_queues |
| # Deprecated group/name - [DEFAULT]/rabbit_durable_queues |
| #amqp_durable_queues=false |
| |
| # Auto-delete queues in AMQP. (boolean value) |
| # Deprecated group/name - [DEFAULT]/amqp_auto_delete |
| #amqp_auto_delete=false |
| |
| # SSL version to use (valid only if SSL enabled). Valid values are TLSv1 and |
| # SSLv23. SSLv2, SSLv3, TLSv1_1, and TLSv1_2 may be available on some |
| # distributions. (string value) |
| # Deprecated group/name - [DEFAULT]/kombu_ssl_version |
| #kombu_ssl_version = |
| |
| # SSL key file (valid only if SSL enabled). (string value) |
| # Deprecated group/name - [DEFAULT]/kombu_ssl_keyfile |
| #kombu_ssl_keyfile = |
| |
| # SSL cert file (valid only if SSL enabled). (string value) |
| # Deprecated group/name - [DEFAULT]/kombu_ssl_certfile |
| #kombu_ssl_certfile = |
| |
| # SSL certification authority file (valid only if SSL enabled). (string value) |
| # Deprecated group/name - [DEFAULT]/kombu_ssl_ca_certs |
| #kombu_ssl_ca_certs = |
| |
| # How long to wait before reconnecting in response to an AMQP consumer cancel |
| # notification. (floating point value) |
| # Deprecated group/name - [DEFAULT]/kombu_reconnect_delay |
| #kombu_reconnect_delay=1.0 |
| |
| # EXPERIMENTAL: Possible values are: gzip, bz2. If not set compression will not |
| # be used. This option may not be available in future versions. (string value) |
| #kombu_compression=<None> |
| |
| # How long to wait a missing client before abandoning to send it its replies. |
| # This value should not be longer than rpc_response_timeout. (integer value) |
| # Deprecated group/name - [oslo_messaging_rabbit]/kombu_reconnect_timeout |
| #kombu_missing_consumer_retry_timeout=60 |
| |
| # Determines how the next RabbitMQ node is chosen in case the one we are |
| # currently connected to becomes unavailable. Takes effect only if more than one |
| # RabbitMQ node is provided in config. (string value) |
| # Allowed values: round-robin, shuffle |
| #kombu_failover_strategy=round-robin |
| |
| # DEPRECATED: The RabbitMQ broker address where a single node is used. (string |
| # value) |
| # Deprecated group/name - [DEFAULT]/rabbit_host |
| # This option is deprecated for removal. |
| # Its value may be silently ignored in the future. |
| # Reason: Replaced by [DEFAULT]/transport_url |
| #rabbit_host=localhost |
| |
| # DEPRECATED: The RabbitMQ broker port where a single node is used. (port value) |
| # Minimum value: 0 |
| # Maximum value: 65535 |
| # Deprecated group/name - [DEFAULT]/rabbit_port |
| # This option is deprecated for removal. |
| # Its value may be silently ignored in the future. |
| # Reason: Replaced by [DEFAULT]/transport_url |
| #rabbit_port=5672 |
| |
| # DEPRECATED: RabbitMQ HA cluster host:port pairs. (list value) |
| # Deprecated group/name - [DEFAULT]/rabbit_hosts |
| # This option is deprecated for removal. |
| # Its value may be silently ignored in the future. |
| # Reason: Replaced by [DEFAULT]/transport_url |
| #rabbit_hosts=$rabbit_host:$rabbit_port |
| |
| # Connect over SSL for RabbitMQ. (boolean value) |
| # Deprecated group/name - [DEFAULT]/rabbit_use_ssl |
| #rabbit_use_ssl=false |
| |
| # DEPRECATED: The RabbitMQ userid. (string value) |
| # Deprecated group/name - [DEFAULT]/rabbit_userid |
| # This option is deprecated for removal. |
| # Its value may be silently ignored in the future. |
| # Reason: Replaced by [DEFAULT]/transport_url |
| #rabbit_userid=guest |
| |
| # DEPRECATED: The RabbitMQ password. (string value) |
| # Deprecated group/name - [DEFAULT]/rabbit_password |
| # This option is deprecated for removal. |
| # Its value may be silently ignored in the future. |
| # Reason: Replaced by [DEFAULT]/transport_url |
| #rabbit_password=guest |
| |
| # The RabbitMQ login method. (string value) |
| # Allowed values: PLAIN, AMQPLAIN, RABBIT-CR-DEMO |
| # Deprecated group/name - [DEFAULT]/rabbit_login_method |
| #rabbit_login_method=AMQPLAIN |
| |
| # DEPRECATED: The RabbitMQ virtual host. (string value) |
| # Deprecated group/name - [DEFAULT]/rabbit_virtual_host |
| # This option is deprecated for removal. |
| # Its value may be silently ignored in the future. |
| # Reason: Replaced by [DEFAULT]/transport_url |
| #rabbit_virtual_host=/ |
| |
| # How frequently to retry connecting with RabbitMQ. (integer value) |
| #rabbit_retry_interval=1 |
| {%- if controller.message_queue.rabbit_retry_interval is defined %} |
| rabbit_retry_interval = {{ controller.message_queue.rabbit_retry_interval }} |
| {%- endif %} |
| |
| |
| # How long to backoff for between retries when connecting to RabbitMQ. (integer |
| # value) |
| # Deprecated group/name - [DEFAULT]/rabbit_retry_backoff |
| #rabbit_retry_backoff=2 |
| {%- if controller.message_queue.rabbit_retry_backoff is defined %} |
| rabbit_retry_backoff = {{ controller.message_queue.rabbit_retry_backoff }} |
| {%- endif %} |
| |
| |
| # Maximum interval of RabbitMQ connection retries. Default is 30 seconds. |
| # (integer value) |
| #rabbit_interval_max=30 |
| {%- if controller.message_queue.rabbit_interval_max is defined %} |
| rabbit_interval_max = {{ controller.message_queue.rabbit_interval_max }} |
| {%- endif %} |
| |
| |
| # DEPRECATED: Maximum number of RabbitMQ connection retries. Default is 0 |
| # (infinite retry count). (integer value) |
| # Deprecated group/name - [DEFAULT]/rabbit_max_retries |
| # This option is deprecated for removal. |
| # Its value may be silently ignored in the future. |
| #rabbit_max_retries=0 |
| {%- if controller.message_queue.rabbit_max_retries is defined %} |
| rabbit_max_retries = {{ controller.message_queue.rabbit_max_retries }} |
| {%- endif %} |
| |
| # Try to use HA queues in RabbitMQ (x-ha-policy: all). If you change this |
| # option, you must wipe the RabbitMQ database. In RabbitMQ 3.0, queue mirroring |
| # is no longer controlled by the x-ha-policy argument when declaring a queue. If |
| # you just want to make sure that all queues (except those with auto-generated |
| # names) are mirrored across all nodes, run: "rabbitmqctl set_policy HA |
| # '^(?!amq\.).*' '{"ha-mode": "all"}' " (boolean value) |
| # Deprecated group/name - [DEFAULT]/rabbit_ha_queues |
| #rabbit_ha_queues=false |
| |
| # Positive integer representing duration in seconds for queue TTL (x-expires). |
| # Queues which are unused for the duration of the TTL are automatically deleted. |
| # The parameter affects only reply and fanout queues. (integer value) |
| # Minimum value: 1 |
| #rabbit_transient_queues_ttl=1800 |
| {%- if controller.message_queue.rabbit_transient_queues_ttl is defined %} |
| rabbit_transient_queues_ttl = {{ controller.message_queue.rabbit_transient_queues_ttl }} |
| {%- endif %} |
| |
| # Specifies the number of messages to prefetch. Setting to zero allows unlimited |
| # messages. (integer value) |
| #rabbit_qos_prefetch_count=0 |
| {%- if controller.message_queue.rabbit_qos_prefetch_count is defined %} |
| rabbit_qos_prefetch_count = {{ controller.message_queue.rabbit_qos_prefetch_count }} |
| {%- endif %} |
| |
| # Number of seconds after which the Rabbit broker is considered down if |
| # heartbeat's keep-alive fails (0 disable the heartbeat). EXPERIMENTAL (integer |
| # value) |
| #heartbeat_timeout_threshold=60 |
| {%- if controller.message_queue.heartbeat_timeout_threshold is defined %} |
| heartbeat_timeout_threshold = {{ controller.message_queue.heartbeat_timeout_threshold }} |
| {%- endif %} |
| |
| # How often times during the heartbeat_timeout_threshold we check the heartbeat. |
| # (integer value) |
| #heartbeat_rate=2 |
| {%- if controller.message_queue.heartbeat_rate is defined %} |
| heartbeat_rate = {{ controller.message_queue.heartbeat_rate }} |
| {%- endif %} |
| |
| # Deprecated, use rpc_backend=kombu+memory or rpc_backend=fake (boolean value) |
| # Deprecated group/name - [DEFAULT]/fake_rabbit |
| #fake_rabbit=false |
| {%- if controller.message_queue.fake_rabbit is defined %} |
| fake_rabbit = {{ controller.message_queue.fake_rabbit }} |
| {%- endif %} |
| |
| # Maximum number of channels to allow (integer value) |
| #channel_max=<None> |
| {%- if controller.message_queue.channel_max is defined %} |
| channel_max = {{ controller.message_queue.channel_max }} |
| {%- endif %} |
| |
| # The maximum byte size for an AMQP frame (integer value) |
| #frame_max=<None> |
| {%- if controller.message_queue.frame_max is defined %} |
| frame_max = {{ controller.message_queue.frame_max }} |
| {%- endif %} |
| |
| # How often to send heartbeats for consumer's connections (integer value) |
| #heartbeat_interval=3 |
| {%- if controller.message_queue.heartbeat_interval is defined %} |
| heartbeat_interval = {{ controller.message_queue.heartbeat_interval }} |
| {%- endif %} |
| |
| # Enable SSL (boolean value) |
| #ssl=<None> |
| |
| # Arguments passed to ssl.wrap_socket (dict value) |
| #ssl_options=<None> |
| |
| # Set socket timeout in seconds for connection's socket (floating point value) |
| #socket_timeout=0.25 |
| {%- if controller.message_queue.socket_timeout is defined %} |
| socket_timeout = {{ controller.message_queue.socket_timeout }} |
| {%- endif %} |
| |
| # Set TCP_USER_TIMEOUT in seconds for connection's socket (floating point value) |
| #tcp_user_timeout=0.25 |
| {%- if controller.message_queue.tcp_user_timeout is defined %} |
| tcp_user_timeout = {{ controller.message_queue.tcp_user_timeout }} |
| {%- endif %} |
| |
| # Set delay for reconnection to some host which has connection error (floating |
| # point value) |
| #host_connection_reconnect_delay=0.25 |
| {%- if controller.message_queue.host_connection_reconnect_delay is defined %} |
| host_connection_reconnect_delay = {{ controller.message_queue.host_connection_reconnect_delay }} |
| {%- endif %} |
| |
| # Connection factory implementation (string value) |
| # Allowed values: new, single, read_write |
| #connection_factory=single |
| |
| # Maximum number of connections to keep queued. (integer value) |
| #pool_max_size=30 |
| {%- if controller.message_queue.pool_max_size is defined %} |
| pool_max_size = {{ controller.message_queue.pool_max_size }} |
| {%- endif %} |
| |
| # Maximum number of connections to create above `pool_max_size`. (integer value) |
| #pool_max_overflow=0 |
| {%- if controller.message_queue.pool_max_overflow is defined %} |
| pool_max_overflow = {{ controller.message_queue.pool_max_overflow }} |
| {%- endif %} |
| |
| # Default number of seconds to wait for a connections to available (integer |
| # value) |
| #pool_timeout=30 |
| {%- if controller.message_queue.pool_timeout is defined %} |
| pool_timeout = {{ controller.message_queue.pool_timeout }} |
| {%- endif %} |
| |
| # Lifetime of a connection (since creation) in seconds or None for no recycling. |
| # Expired connections are closed on acquire. (integer value) |
| #pool_recycle=600 |
| {%- if controller.message_queue.pool_recycle is defined %} |
| pool_recycle = {{ controller.message_queue.pool_recycle }} |
| {%- endif %} |
| |
| # Threshold at which inactive (since release) connections are considered stale |
| # in seconds or None for no staleness. Stale connections are closed on acquire. |
| # (integer value) |
| #pool_stale=60 |
| {%- if controller.message_queue.pool_stale is defined %} |
| pool_stale = {{ controller.message_queue.pool_stale }} |
| {%- endif %} |
| |
| # Default serialization mechanism for serializing/deserializing |
| # outgoing/incoming messages (string value) |
| # Allowed values: json, msgpack |
| #default_serializer_type=json |
| {%- if controller.message_queue.default_serializer_type is defined %} |
| default_serializer_type = {{ controller.message_queue.default_serializer_type }} |
| {%- endif %} |
| |
| # Persist notification messages. (boolean value) |
| #notification_persistence=false |
| {%- if controller.message_queue.notification_persistence is defined %} |
| notification_persistence = {{ controller.message_queue.notification_persistence }} |
| {%- endif %} |
| |
| # Exchange name for sending notifications (string value) |
| #default_notification_exchange=${control_exchange}_notification |
| {%- if controller.message_queue.default_notification_exchange is defined %} |
| default_notification_exchange = {{ controller.message_queue.default_notification_exchange }} |
| {%- endif %} |
| |
| # Max number of not acknowledged message which RabbitMQ can send to notification |
| # listener. (integer value) |
| #notification_listener_prefetch_count=100 |
| {%- if controller.message_queue.notification_listener_prefetch_count is defined %} |
| notification_listener_prefetch_count = {{ controller.message_queue.notification_listener_prefetch_count }} |
| {%- endif %} |
| |
| # Reconnecting retry count in case of connectivity problem during sending |
| # notification, -1 means infinite retry. (integer value) |
| #default_notification_retry_attempts=-1 |
| {%- if controller.message_queue.default_notification_retry_attempts is defined %} |
| default_notification_retry_attempts = {{ controller.message_queue.default_notification_retry_attempts }} |
| {%- endif %} |
| |
| # Reconnecting retry delay in case of connectivity problem during sending |
| # notification message (floating point value) |
| #notification_retry_delay=0.25 |
| {%- if controller.message_queue.notification_retry_delay is defined %} |
| notification_retry_delay = {{ controller.message_queue.notification_retry_delay }} |
| {%- endif %} |
| |
| # Time to live for rpc queues without consumers in seconds. (integer value) |
| #rpc_queue_expiration=60 |
| {%- if controller.message_queue.rpc_queue_expiration is defined %} |
| rpc_queue_expiration = {{ controller.message_queue.rpc_queue_expiration }} |
| {%- endif %} |
| |
| # Exchange name for sending RPC messages (string value) |
| #default_rpc_exchange=${control_exchange}_rpc |
| {%- if controller.message_queue.default_rpc_exchange is defined %} |
| default_rpc_exchange = {{ controller.message_queue.default_rpc_exchange }} |
| {%- endif %} |
| |
| # Exchange name for receiving RPC replies (string value) |
| #rpc_reply_exchange=${control_exchange}_rpc_reply |
| {%- if controller.message_queue.rpc_reply_exchange is defined %} |
| rpc_reply_exchange = {{ controller.message_queue.rpc_reply_exchange }} |
| {%- endif %} |
| |
| # Max number of not acknowledged message which RabbitMQ can send to rpc |
| # listener. (integer value) |
| #rpc_listener_prefetch_count=100 |
| {%- if controller.message_queue.rpc_listener_prefetch_count is defined %} |
| rpc_listener_prefetch_count = {{ controller.message_queue.rpc_listener_prefetch_count }} |
| {%- endif %} |
| |
| # Max number of not acknowledged message which RabbitMQ can send to rpc reply |
| # listener. (integer value) |
| #rpc_reply_listener_prefetch_count=100 |
| {%- if controller.message_queue.rpc_reply_listener_prefetch_count is defined %} |
| rpc_reply_listener_prefetch_count = {{ controller.message_queue.rpc_reply_listener_prefetch_count }} |
| {%- endif %} |
| |
| # Reconnecting retry count in case of connectivity problem during sending reply. |
| # -1 means infinite retry during rpc_timeout (integer value) |
| #rpc_reply_retry_attempts=-1 |
| {%- if controller.message_queue.rpc_reply_retry_attempts is defined %} |
| rpc_reply_retry_attempts = {{ controller.message_queue.rpc_reply_retry_attempts }} |
| {%- endif %} |
| |
| # Reconnecting retry delay in case of connectivity problem during sending reply. |
| # (floating point value) |
| #rpc_reply_retry_delay=0.25 |
| {%- if controller.message_queue.rpc_reply_retry_delay is defined %} |
| rpc_reply_retry_delay = {{ controller.message_queue.rpc_reply_retry_delay }} |
| {%- endif %} |
| |
| # Reconnecting retry count in case of connectivity problem during sending RPC |
| # message, -1 means infinite retry. If actual retry attempts in not 0 the rpc |
| # request could be processed more than one time (integer value) |
| #default_rpc_retry_attempts=-1 |
| {%- if controller.message_queue.default_rpc_retry_attempts is defined %} |
| default_rpc_retry_attempts = {{ controller.message_queue.default_rpc_retry_attempts }} |
| {%- endif %} |
| |
| # Reconnecting retry delay in case of connectivity problem during sending RPC |
| # message (floating point value) |
| #rpc_retry_delay=0.25 |
| {%- if controller.message_queue.rpc_retry_delay is defined %} |
| rpc_retry_delay = {{ controller.message_queue.rpc_retry_delay }} |
| {%- endif %} |
| |
| {# rabbitmq ssl configuration #} |
| {%- if controller.message_queue.get('ssl',{}).get('enabled', False) %} |
| rabbit_use_ssl=true |
| |
| {%- if controller.message_queue.ssl.version is defined %} |
| kombu_ssl_version = {{ controller.message_queue.ssl.version }} |
| {%- elif salt['grains.get']('pythonversion') > [2,7,8] %} |
| kombu_ssl_version = TLSv1_2 |
| {%- endif %} |
| |
| {%- if controller.message_queue.get('x509',{}).get('enabled', False) %} |
| kombu_ssl_ca_certs = {{ controller.message_queue.x509.ca_file}} |
| |
| kombu_ssl_keyfile = {{ controller.message_queue.x509.key_file}} |
| |
| kombu_ssl_certfile = {{ controller.message_queue.x509.cert_file}} |
| |
| {%- else %} |
| kombu_ssl_ca_certs = {{ controller.message_queue.ssl.get('cacert_file', controller.cacert_file) }} |
| {%- endif %} |
| |
| {%- endif %} |
| |
| [oslo_messaging_zmq] |
| |
| # |
| # From oslo.messaging |
| # |
| |
| # ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP. |
| # The "host" option should point or resolve to this address. (string value) |
| # Deprecated group/name - [DEFAULT]/rpc_zmq_bind_address |
| #rpc_zmq_bind_address=* |
| |
| # MatchMaker driver. (string value) |
| # Allowed values: redis, sentinel, dummy |
| # Deprecated group/name - [DEFAULT]/rpc_zmq_matchmaker |
| #rpc_zmq_matchmaker=redis |
| |
| # Number of ZeroMQ contexts, defaults to 1. (integer value) |
| # Deprecated group/name - [DEFAULT]/rpc_zmq_contexts |
| #rpc_zmq_contexts=1 |
| |
| # Maximum number of ingress messages to locally buffer per topic. Default is |
| # unlimited. (integer value) |
| # Deprecated group/name - [DEFAULT]/rpc_zmq_topic_backlog |
| #rpc_zmq_topic_backlog=<None> |
| |
| # Directory for holding IPC sockets. (string value) |
| # Deprecated group/name - [DEFAULT]/rpc_zmq_ipc_dir |
| #rpc_zmq_ipc_dir=/var/run/openstack |
| |
| # Name of this node. Must be a valid hostname, FQDN, or IP address. Must match |
| # "host" option, if running Nova. (string value) |
| # Deprecated group/name - [DEFAULT]/rpc_zmq_host |
| #rpc_zmq_host=localhost |
| |
| # Number of seconds to wait before all pending messages will be sent after |
| # closing a socket. The default value of -1 specifies an infinite linger period. |
| # The value of 0 specifies no linger period. Pending messages shall be discarded |
| # immediately when the socket is closed. Positive values specify an upper bound |
| # for the linger period. (integer value) |
| # Deprecated group/name - [DEFAULT]/rpc_cast_timeout |
| #zmq_linger=-1 |
| |
| # The default number of seconds that poll should wait. Poll raises timeout |
| # exception when timeout expired. (integer value) |
| # Deprecated group/name - [DEFAULT]/rpc_poll_timeout |
| #rpc_poll_timeout=1 |
| {%- if controller.message_queue.rpc_poll_timeout is defined %} |
| rpc_poll_timeout = {{ controller.message_queue.rpc_poll_timeout }} |
| {%- endif %} |
| |
| |
| # Expiration timeout in seconds of a name service record about existing target ( |
| # < 0 means no timeout). (integer value) |
| # Deprecated group/name - [DEFAULT]/zmq_target_expire |
| #zmq_target_expire=300 |
| |
| # Update period in seconds of a name service record about existing target. |
| # (integer value) |
| # Deprecated group/name - [DEFAULT]/zmq_target_update |
| #zmq_target_update=180 |
| |
| # Use PUB/SUB pattern for fanout methods. PUB/SUB always uses proxy. (boolean |
| # value) |
| # Deprecated group/name - [DEFAULT]/use_pub_sub |
| #use_pub_sub=false |
| |
| # Use ROUTER remote proxy. (boolean value) |
| # Deprecated group/name - [DEFAULT]/use_router_proxy |
| #use_router_proxy=false |
| |
| # This option makes direct connections dynamic or static. It makes sense only |
| # with use_router_proxy=False which means to use direct connections for direct |
| # message types (ignored otherwise). (boolean value) |
| #use_dynamic_connections=false |
| |
| # How many additional connections to a host will be made for failover reasons. |
| # This option is actual only in dynamic connections mode. (integer value) |
| #zmq_failover_connections=2 |
| |
| # Minimal port number for random ports range. (port value) |
| # Minimum value: 0 |
| # Maximum value: 65535 |
| # Deprecated group/name - [DEFAULT]/rpc_zmq_min_port |
| #rpc_zmq_min_port=49153 |
| |
| # Maximal port number for random ports range. (integer value) |
| # Minimum value: 1 |
| # Maximum value: 65536 |
| # Deprecated group/name - [DEFAULT]/rpc_zmq_max_port |
| #rpc_zmq_max_port=65536 |
| |
| # Number of retries to find free port number before fail with ZMQBindError. |
| # (integer value) |
| # Deprecated group/name - [DEFAULT]/rpc_zmq_bind_port_retries |
| #rpc_zmq_bind_port_retries=100 |
| |
| # Default serialization mechanism for serializing/deserializing |
| # outgoing/incoming messages (string value) |
| # Allowed values: json, msgpack |
| # Deprecated group/name - [DEFAULT]/rpc_zmq_serialization |
| #rpc_zmq_serialization=json |
| |
| # This option configures round-robin mode in zmq socket. True means not keeping |
| # a queue when server side disconnects. False means to keep queue and messages |
| # even if server is disconnected, when the server appears we send all |
| # accumulated messages to it. (boolean value) |
| #zmq_immediate=true |
| |
| # Enable/disable TCP keepalive (KA) mechanism. The default value of -1 (or any |
| # other negative value) means to skip any overrides and leave it to OS default; |
| # 0 and 1 (or any other positive value) mean to disable and enable the option |
| # respectively. (integer value) |
| #zmq_tcp_keepalive=-1 |
| |
| # The duration between two keepalive transmissions in idle condition. The unit |
| # is platform dependent, for example, seconds in Linux, milliseconds in Windows |
| # etc. The default value of -1 (or any other negative value and 0) means to skip |
| # any overrides and leave it to OS default. (integer value) |
| #zmq_tcp_keepalive_idle=-1 |
| |
| # The number of retransmissions to be carried out before declaring that remote |
| # end is not available. The default value of -1 (or any other negative value and |
| # 0) means to skip any overrides and leave it to OS default. (integer value) |
| #zmq_tcp_keepalive_cnt=-1 |
| |
| # The duration between two successive keepalive retransmissions, if |
| # acknowledgement to the previous keepalive transmission is not received. The |
| # unit is platform dependent, for example, seconds in Linux, milliseconds in |
| # Windows etc. The default value of -1 (or any other negative value and 0) means |
| # to skip any overrides and leave it to OS default. (integer value) |
| #zmq_tcp_keepalive_intvl=-1 |
| |
| # Maximum number of (green) threads to work concurrently. (integer value) |
| #rpc_thread_pool_size=100 |
| rpc_thread_pool_size=70 |
| |
| # Expiration timeout in seconds of a sent/received message after which it is not |
| # tracked anymore by a client/server. (integer value) |
| #rpc_message_ttl=300 |
| |
| # Wait for message acknowledgements from receivers. This mechanism works only |
| # via proxy without PUB/SUB. (boolean value) |
| #rpc_use_acks=false |
| |
| # Number of seconds to wait for an ack from a cast/call. After each retry |
| # attempt this timeout is multiplied by some specified multiplier. (integer |
| # value) |
| #rpc_ack_timeout_base=15 |
| |
| # Number to multiply base ack timeout by after each retry attempt. (integer |
| # value) |
| #rpc_ack_timeout_multiplier=2 |
| |
| # Default number of message sending attempts in case of any problems occurred: |
| # positive value N means at most N retries, 0 means no retries, None or -1 (or |
| # any other negative values) mean to retry forever. This option is used only if |
| # acknowledgments are enabled. (integer value) |
| #rpc_retry_attempts=3 |
| |
| # List of publisher hosts SubConsumer can subscribe on. This option has higher |
| # priority then the default publishers list taken from the matchmaker. (list |
| # value) |
| #subscribe_on = |
| |
| |
| [oslo_middleware] |
| |
| # |
| # From oslo.middleware |
| # |
| |
| # The maximum body size for each request, in bytes. (integer value) |
| # Deprecated group/name - [DEFAULT]/osapi_max_request_body_size |
| # Deprecated group/name - [DEFAULT]/max_request_body_size |
| #max_request_body_size=114688 |
| |
| # DEPRECATED: The HTTP Header that will be used to determine what the original |
| # request protocol scheme was, even if it was hidden by a SSL termination proxy. |
| # (string value) |
| # This option is deprecated for removal. |
| # Its value may be silently ignored in the future. |
| #secure_proxy_ssl_header=X-Forwarded-Proto |
| |
| # Whether the application is behind a proxy or not. This determines if the |
| # middleware should parse the headers or not. (boolean value) |
| #enable_proxy_headers_parsing=false |
| enable_proxy_headers_parsing=True |
| |
| [oslo_policy] |
| |
| # |
| # From oslo.policy |
| # |
| |
| # The file that defines policies. (string value) |
| # Deprecated group/name - [DEFAULT]/policy_file |
| #policy_file=policy.json |
| |
| # Default rule. Enforced when a requested rule is not found. (string value) |
| # Deprecated group/name - [DEFAULT]/policy_default_rule |
| #policy_default_rule=default |
| |
| # Directories where policy configuration files are stored. They can be relative |
| # to any directory in the search path defined by the config_dir option, or |
| # absolute paths. The file defined by policy_file must exist for these |
| # directories to be searched. Missing or empty directories are ignored. (multi |
| # valued) |
| # Deprecated group/name - [DEFAULT]/policy_dirs |
| #policy_dirs=policy.d |
| |
| |
| [pci] |
| |
| # |
| # From nova.conf |
| # |
| |
| # |
| # An alias for a PCI passthrough device requirement. |
| # |
| # This allows users to specify the alias in the extra_spec for a flavor, without |
| # needing to repeat all the PCI property requirements. |
| # |
| # Possible Values: |
| # |
| # * A list of JSON values which describe the aliases. For example: |
| # |
| # alias = { |
| # "name": "QuickAssist", |
| # "product_id": "0443", |
| # "vendor_id": "8086", |
| # "device_type": "type-PCI" |
| # } |
| # |
| # defines an alias for the Intel QuickAssist card. (multi valued). Valid key |
| # values are : |
| # |
| # * "name": Name of the PCI alias. |
| # * "product_id": Product ID of the device in hexadecimal. |
| # * "vendor_id": Vendor ID of the device in hexadecimal. |
| # * "device_type": Type of PCI device. Valid values are: "type-PCI", |
| # "type-PF" and "type-VF". |
| # (multi valued) |
| # Deprecated group/name - [DEFAULT]/pci_alias |
| #alias = |
| {%- if controller.get('pci', {}).get('alias', false) %} |
| {%- for alias_name, alias in controller.pci.alias.iteritems() %} |
| alias = {{ alias | json }} |
| {%- endfor %} |
| {%- endif %} |
| |
| # |
| # White list of PCI devices available to VMs. |
| # |
| # Possible values: |
| # |
| # * A JSON dictionary which describe a whitelisted PCI device. It should take |
| # the following format: |
| # |
| # ["vendor_id": "<id>",] ["product_id": "<id>",] |
| # ["address": "[[[[<domain>]:]<bus>]:][<slot>][.[<function>]]" | |
| # "devname": "<name>",] |
| # {"<tag>": "<tag_value>",} |
| # |
| # Where '[' indicates zero or one occurrences, '{' indicates zero or multiple |
| # occurrences, and '|' mutually exclusive options. Note that any missing |
| # fields are automatically wildcarded. |
| # |
| # Valid key values are : |
| # |
| # * "vendor_id": Vendor ID of the device in hexadecimal. |
| # * "product_id": Product ID of the device in hexadecimal. |
| # * "address": PCI address of the device. |
| # * "devname": Device name of the device (for e.g. interface name). Not all |
| # PCI devices have a name. |
| # * "<tag>": Additional <tag> and <tag_value> used for matching PCI devices. |
| # Supported <tag>: "physical_network". |
| # |
| # The address key supports traditional glob style and regular expression |
| # syntax. Valid examples are: |
| # |
| # passthrough_whitelist = {"devname":"eth0", |
| # "physical_network":"physnet"} |
| # passthrough_whitelist = {"address":"*:0a:00.*"} |
| # passthrough_whitelist = {"address":":0a:00.", |
| # "physical_network":"physnet1"} |
| # passthrough_whitelist = {"vendor_id":"1137", |
| # "product_id":"0071"} |
| # passthrough_whitelist = {"vendor_id":"1137", |
| # "product_id":"0071", |
| # "address": "0000:0a:00.1", |
| # "physical_network":"physnet1"} |
| # passthrough_whitelist = {"address":{"domain": ".*", |
| # "bus": "02", "slot": "01", |
| # "function": "[2-7]"}, |
| # "physical_network":"physnet1"} |
| # passthrough_whitelist = {"address":{"domain": ".*", |
| # "bus": "02", "slot": "0[1-2]", |
| # "function": ".*"}, |
| # "physical_network":"physnet1"} |
| # |
| # The following are invalid, as they specify mutually exclusive options: |
| # |
| # passthrough_whitelist = {"devname":"eth0", |
| # "physical_network":"physnet", |
| # "address":"*:0a:00.*"} |
| # |
| # * A JSON list of JSON dictionaries corresponding to the above format. For |
| # example: |
| # |
| # passthrough_whitelist = [{"product_id":"0001", "vendor_id":"8086"}, |
| # {"product_id":"0002", "vendor_id":"8086"}] |
| # (multi valued) |
| # Deprecated group/name - [DEFAULT]/pci_passthrough_whitelist |
| #passthrough_whitelist = |
| |
| |
| [placement] |
| |
| # |
| # From nova.conf |
| # |
| |
| # |
| # Region name of this node. This is used when picking the URL in the service |
| # catalog. |
| # |
| # Possible values: |
| # |
| # * Any string representing region name |
| # (string value) |
| os_region_name = {{ controller.identity.region }} |
| auth_type = password |
| user_domain_id = {{ controller.identity.get('domain', 'default') }} |
| project_domain_id = {{ controller.identity.get('domain', 'default') }} |
| project_name = {{ controller.identity.tenant }} |
| username = {{ controller.identity.user }} |
| password = {{ controller.identity.password }} |
| auth_url={{ controller.identity.get('protocol', 'http') }}://{{ controller.identity.host }}:35357/v3 |
| {%- if controller.identity.get('protocol', 'http') == 'https' %} |
| cafile={{ controller.identity.get('cacert_file', controller.cacert_file) }} |
| {%- endif %} |
| os_interface = internal |
| |
| # |
| # Endpoint interface for this node. This is used when picking the URL in the |
| # service catalog. |
| # (string value) |
| #os_interface=<None> |
| |
| # PEM encoded Certificate Authority to use when verifying HTTPs connections. |
| # (string value) |
| #cafile=<None> |
| |
| # PEM encoded client certificate cert file (string value) |
| #certfile=<None> |
| |
| # PEM encoded client certificate key file (string value) |
| #keyfile=<None> |
| |
| # Verify HTTPS connections. (boolean value) |
| #insecure=false |
| |
| # Timeout value for http requests (integer value) |
| #timeout=<None> |
| |
| # Authentication type to load (string value) |
| # Deprecated group/name - [placement]/auth_plugin |
| #auth_type=<None> |
| |
| # Config Section from which to load plugin specific options (string value) |
| #auth_section=<None> |
| |
| # Authentication URL (string value) |
| #auth_url=<None> |
| |
| # Domain ID to scope to (string value) |
| #domain_id=<None> |
| |
| # Domain name to scope to (string value) |
| #domain_name=<None> |
| |
| # Project ID to scope to (string value) |
| #project_id=<None> |
| |
| # Project name to scope to (string value) |
| #project_name=<None> |
| |
| # Domain ID containing project (string value) |
| #project_domain_id=<None> |
| |
| # Domain name containing project (string value) |
| #project_domain_name=<None> |
| |
| # Trust ID (string value) |
| #trust_id=<None> |
| |
| # Optional domain ID to use with v3 and v2 parameters. It will be used for both |
| # the user and project domain in v3 and ignored in v2 authentication. (string |
| # value) |
| #default_domain_id=<None> |
| |
| # Optional domain name to use with v3 API and v2 parameters. It will be used for |
| # both the user and project domain in v3 and ignored in v2 authentication. |
| # (string value) |
| #default_domain_name=<None> |
| |
| # User ID (string value) |
| #user_id=<None> |
| |
| # Username (string value) |
| # Deprecated group/name - [placement]/user-name |
| #username=<None> |
| |
| # User's domain id (string value) |
| #user_domain_id=<None> |
| |
| # User's domain name (string value) |
| #user_domain_name=<None> |
| |
| # User's password (string value) |
| #password=<None> |
| |
| # Tenant ID (string value) |
| #tenant_id=<None> |
| |
| # Tenant Name (string value) |
| #tenant_name=<None> |
| |
| |
| [quota] |
| # |
| # Quota options allow to manage quotas in openstack deployment. |
| |
| # |
| # From nova.conf |
| # |
| |
| # |
| # The number of instances allowed per project. |
| # |
| # Possible Values |
| # |
| # * A positive integer or 0. |
| # * -1 to disable the quota. |
| # (integer value) |
| # Minimum value: -1 |
| # Deprecated group/name - [DEFAULT]/quota_instances |
| #instances=10 |
| |
| # |
| # The number of instance cores or vCPUs allowed per project. |
| # |
| # Possible values: |
| # |
| # * A positive integer or 0. |
| # * -1 to disable the quota. |
| # (integer value) |
| # Minimum value: -1 |
| # Deprecated group/name - [DEFAULT]/quota_cores |
| #cores=20 |
| |
| # |
| # The number of megabytes of instance RAM allowed per project. |
| # |
| # Possible values: |
| # |
| # * A positive integer or 0. |
| # * -1 to disable the quota. |
| # (integer value) |
| # Minimum value: -1 |
| # Deprecated group/name - [DEFAULT]/quota_ram |
| #ram=51200 |
| |
| # DEPRECATED: |
| # The number of floating IPs allowed per project. |
| # |
| # Floating IPs are not allocated to instances by default. Users need to select |
| # them from the pool configured by the OpenStack administrator to attach to |
| # their |
| # instances. |
| # |
| # Possible values: |
| # |
| # * A positive integer or 0. |
| # * -1 to disable the quota. |
| # (integer value) |
| # Minimum value: -1 |
| # Deprecated group/name - [DEFAULT]/quota_floating_ips |
| # This option is deprecated for removal since 15.0.0. |
| # Its value may be silently ignored in the future. |
| # Reason: |
| # nova-network is deprecated, as are any related configuration options. |
| #floating_ips=10 |
| |
| # DEPRECATED: |
| # The number of fixed IPs allowed per project. |
| # |
| # Unlike floating IPs, fixed IPs are allocated dynamically by the network |
| # component when instances boot up. This quota value should be at least the |
| # number of instances allowed |
| # |
| # Possible values: |
| # |
| # * A positive integer or 0. |
| # * -1 to disable the quota. |
| # (integer value) |
| # Minimum value: -1 |
| # Deprecated group/name - [DEFAULT]/quota_fixed_ips |
| # This option is deprecated for removal since 15.0.0. |
| # Its value may be silently ignored in the future. |
| # Reason: |
| # nova-network is deprecated, as are any related configuration options. |
| #fixed_ips=-1 |
| |
| # |
| # The number of metadata items allowed per instance. |
| # |
| # Users can associate metadata with an instance during instance creation. This |
| # metadata takes the form of key-value pairs. |
| # |
| # Possible values: |
| # |
| # * A positive integer or 0. |
| # * -1 to disable the quota. |
| # (integer value) |
| # Minimum value: -1 |
| # Deprecated group/name - [DEFAULT]/quota_metadata_items |
| #metadata_items=128 |
| |
| # |
| # The number of injected files allowed. |
| # |
| # File injection allows users to customize the personality of an instance by |
| # injecting data into it upon boot. Only text file injection is permitted: |
| # binary |
| # or ZIP files are not accepted. During file injection, any existing files that |
| # match specified files are renamed to include ``.bak`` extension appended with |
| # a |
| # timestamp. |
| # |
| # Possible values: |
| # |
| # * A positive integer or 0. |
| # * -1 to disable the quota. |
| # (integer value) |
| # Minimum value: -1 |
| # Deprecated group/name - [DEFAULT]/quota_injected_files |
| #injected_files=5 |
| |
| # |
| # The number of bytes allowed per injected file. |
| # |
| # Possible values: |
| # |
| # * A positive integer or 0. |
| # * -1 to disable the quota. |
| # (integer value) |
| # Minimum value: -1 |
| # Deprecated group/name - [DEFAULT]/quota_injected_file_content_bytes |
| #injected_file_content_bytes=10240 |
| |
| # |
| # The maximum allowed injected file path length. |
| # |
| # Possible values: |
| # |
| # * A positive integer or 0. |
| # * -1 to disable the quota. |
| # (integer value) |
| # Minimum value: -1 |
| # Deprecated group/name - [DEFAULT]/quota_injected_file_path_length |
| #injected_file_path_length=255 |
| |
| # DEPRECATED: |
| # The number of security groups per project. |
| # |
| # Possible values: |
| # |
| # * A positive integer or 0. |
| # * -1 to disable the quota. |
| # (integer value) |
| # Minimum value: -1 |
| # Deprecated group/name - [DEFAULT]/quota_security_groups |
| # This option is deprecated for removal since 15.0.0. |
| # Its value may be silently ignored in the future. |
| # Reason: |
| # nova-network is deprecated, as are any related configuration options. |
| #security_groups=10 |
| |
| # DEPRECATED: |
| # The number of security rules per security group. |
| # |
| # The associated rules in each security group control the traffic to instances |
| # in |
| # the group. |
| # |
| # Possible values: |
| # |
| # * A positive integer or 0. |
| # * -1 to disable the quota. |
| # (integer value) |
| # Minimum value: -1 |
| # Deprecated group/name - [DEFAULT]/quota_security_group_rules |
| # This option is deprecated for removal since 15.0.0. |
| # Its value may be silently ignored in the future. |
| # Reason: |
| # nova-network is deprecated, as are any related configuration options. |
| #security_group_rules=20 |
| |
| # |
| # The maximum number of key pairs allowed per user. |
| # |
| # Users can create at least one key pair for each project and use the key pair |
| # for multiple instances that belong to that project. |
| # |
| # Possible values: |
| # |
| # * A positive integer or 0. |
| # * -1 to disable the quota. |
| # (integer value) |
| # Minimum value: -1 |
| # Deprecated group/name - [DEFAULT]/quota_key_pairs |
| #key_pairs=100 |
| |
| # |
| # The maxiumum number of server groups per project. |
| # |
| # Server groups are used to control the affinity and anti-affinity scheduling |
| # policy for a group of servers or instances. Reducing the quota will not affect |
| # any existing group, but new servers will not be allowed into groups that have |
| # become over quota. |
| # |
| # Possible values: |
| # |
| # * A positive integer or 0. |
| # * -1 to disable the quota. |
| # (integer value) |
| # Minimum value: -1 |
| # Deprecated group/name - [DEFAULT]/quota_server_groups |
| #server_groups=10 |
| |
| # |
| # The maximum number of servers per server group. |
| # |
| # Possible values: |
| # |
| # * A positive integer or 0. |
| # * -1 to disable the quota. |
| # (integer value) |
| # Minimum value: -1 |
| # Deprecated group/name - [DEFAULT]/quota_server_group_members |
| #server_group_members=10 |
| |
| # |
| # The number of seconds until a reservation expires. |
| # |
| # This quota represents the time period for invalidating quota reservations. |
| # (integer value) |
| # Deprecated group/name - [DEFAULT]/reservation_expire |
| #reservation_expire=86400 |
| reservation_expire=86400 |
| |
| # |
| # The count of reservations until usage is refreshed. |
| # |
| # This defaults to 0 (off) to avoid additional load but it is useful to turn on |
| # to help keep quota usage up-to-date and reduce the impact of out of sync usage |
| # issues. |
| # (integer value) |
| # Minimum value: 0 |
| # Deprecated group/name - [DEFAULT]/until_refresh |
| #until_refresh=0 |
| until_refresh=0 |
| |
| # |
| # The number of seconds between subsequent usage refreshes. |
| # |
| # This defaults to 0 (off) to avoid additional load but it is useful to turn on |
| # to help keep quota usage up-to-date and reduce the impact of out of sync usage |
| # issues. Note that quotas are not updated on a periodic task, they will update |
| # on a new reservation if max_age has passed since the last reservation. |
| # (integer value) |
| # Minimum value: 0 |
| # Deprecated group/name - [DEFAULT]/max_age |
| #max_age=0 |
| |
| # DEPRECATED: |
| # The quota enforcer driver. |
| # |
| # Provides abstraction for quota checks. Users can configure a specific |
| # driver to use for quota checks. |
| # |
| # Possible values: |
| # |
| # * nova.quota.DbQuotaDriver (default) or any string representing fully |
| # qualified class name. |
| # (string value) |
| # Deprecated group/name - [DEFAULT]/quota_driver |
| # This option is deprecated for removal since 14.0.0. |
| # Its value may be silently ignored in the future. |
| #driver=nova.quota.DbQuotaDriver |
| |
| |
| [rdp] |
| # |
| # Options under this group enable and configure Remote Desktop Protocol ( |
| # RDP) related features. |
| # |
| # This group is only relevant to Hyper-V users. |
| |
| # |
| # From nova.conf |
| # |
| |
| # |
| # Enable Remote Desktop Protocol (RDP) related features. |
| # |
| # Hyper-V, unlike the majority of the hypervisors employed on Nova compute |
| # nodes, uses RDP instead of VNC and SPICE as a desktop sharing protocol to |
| # provide instance console access. This option enables RDP for graphical |
| # console access for virtual machines created by Hyper-V. |
| # |
| # **Note:** RDP should only be enabled on compute nodes that support the Hyper-V |
| # virtualization platform. |
| # |
| # Related options: |
| # |
| # * ``compute_driver``: Must be hyperv. |
| # |
| # (boolean value) |
| #enabled=false |
| |
| # |
| # The URL an end user would use to connect to the RDP HTML5 console proxy. |
| # The console proxy service is called with this token-embedded URL and |
| # establishes the connection to the proper instance. |
| # |
| # An RDP HTML5 console proxy service will need to be configured to listen on the |
| # address configured here. Typically the console proxy service would be run on a |
| # controller node. The localhost address used as default would only work in a |
| # single node environment i.e. devstack. |
| # |
| # An RDP HTML5 proxy allows a user to access via the web the text or graphical |
| # console of any Windows server or workstation using RDP. RDP HTML5 console |
| # proxy services include FreeRDP, wsgate. |
| # See https://github.com/FreeRDP/FreeRDP-WebConnect |
| # |
| # Possible values: |
| # |
| # * <scheme>://<ip-address>:<port-number>/ |
| # |
| # The scheme must be identical to the scheme configured for the RDP HTML5 |
| # console proxy service. |
| # |
| # The IP address must be identical to the address on which the RDP HTML5 |
| # console proxy service is listening. |
| # |
| # The port must be identical to the port on which the RDP HTML5 console proxy |
| # service is listening. |
| # |
| # Related options: |
| # |
| # * ``rdp.enabled``: Must be set to ``True`` for ``html5_proxy_base_url`` to be |
| # effective. |
| # (string value) |
| #html5_proxy_base_url=http://127.0.0.1:6083/ |
| |
| |
| [remote_debug] |
| |
| # |
| # From nova.conf |
| # |
| |
| # |
| # Debug host (IP or name) to connect to. This command line parameter is used |
| # when |
| # you want to connect to a nova service via a debugger running on a different |
| # host. |
| # |
| # Note that using the remote debug option changes how Nova uses the eventlet |
| # library to support async IO. This could result in failures that do not occur |
| # under normal operation. Use at your own risk. |
| # |
| # Possible Values: |
| # |
| # * IP address of a remote host as a command line parameter |
| # to a nova service. For Example: |
| # |
| # /usr/local/bin/nova-compute --config-file /etc/nova/nova.conf |
| # --remote_debug-host <IP address where the debugger is running> |
| # (string value) |
| #host=<None> |
| |
| # |
| # Debug port to connect to. This command line parameter allows you to specify |
| # the port you want to use to connect to a nova service via a debugger running |
| # on different host. |
| # |
| # Note that using the remote debug option changes how Nova uses the eventlet |
| # library to support async IO. This could result in failures that do not occur |
| # under normal operation. Use at your own risk. |
| # |
| # Possible Values: |
| # |
| # * Port number you want to use as a command line parameter |
| # to a nova service. For Example: |
| # |
| # /usr/local/bin/nova-compute --config-file /etc/nova/nova.conf |
| # --remote_debug-host <IP address where the debugger is running> |
| # --remote_debug-port <port> it's listening on>. |
| # (port value) |
| # Minimum value: 0 |
| # Maximum value: 65535 |
| #port=<None> |
| |
| |
| [scheduler] |
| |
| # |
| # From nova.conf |
| # |
| |
| # |
| # The scheduler host manager to use. |
| # |
| # The host manager manages the in-memory picture of the hosts that the scheduler |
| # uses. The options values are chosen from the entry points under the namespace |
| # 'nova.scheduler.host_manager' in 'setup.cfg'. |
| # (string value) |
| # Allowed values: host_manager, ironic_host_manager |
| # Deprecated group/name - [DEFAULT]/scheduler_host_manager |
| #host_manager=host_manager |
| host_manager=host_manager |
| |
| # |
| # The class of the driver used by the scheduler. |
| # |
| # The options are chosen from the entry points under the namespace |
| # 'nova.scheduler.driver' in 'setup.cfg'. |
| # |
| # Possible values: |
| # |
| # * A string, where the string corresponds to the class name of a scheduler |
| # driver. There are a number of options available: |
| # ** 'caching_scheduler', which aggressively caches the system state for better |
| # individual scheduler performance at the risk of more retries when running |
| # multiple schedulers |
| # ** 'chance_scheduler', which simply picks a host at random |
| # ** 'fake_scheduler', which is used for testing |
| # ** A custom scheduler driver. In this case, you will be responsible for |
| # creating and maintaining the entry point in your 'setup.cfg' file |
| # (string value) |
| # Allowed values: filter_scheduler, caching_scheduler, chance_scheduler, fake_scheduler |
| # Deprecated group/name - [DEFAULT]/scheduler_driver |
| #driver=filter_scheduler |
| driver=filter_scheduler |
| |
| # |
| # Periodic task interval. |
| # |
| # This value controls how often (in seconds) to run periodic tasks in the |
| # scheduler. The specific tasks that are run for each period are determined by |
| # the particular scheduler being used. |
| # |
| # If this is larger than the nova-service 'service_down_time' setting, Nova may |
| # report the scheduler service as down. This is because the scheduler driver is |
| # responsible for sending a heartbeat and it will only do that as often as this |
| # option allows. As each scheduler can work a little differently than the |
| # others, |
| # be sure to test this with your selected scheduler. |
| # |
| # Possible values: |
| # |
| # * An integer, where the integer corresponds to periodic task interval in |
| # seconds. 0 uses the default interval (60 seconds). A negative value disables |
| # periodic tasks. |
| # |
| # Related options: |
| # |
| # * ``nova-service service_down_time`` |
| # (integer value) |
| # Deprecated group/name - [DEFAULT]/scheduler_driver_task_period |
| #periodic_task_interval=60 |
| |
| # |
| # Maximum number of schedule attempts for a chosen host. |
| # |
| # This is the maximum number of attempts that will be made to schedule an |
| # instance before it is assumed that the failures aren't due to normal |
| # occasional |
| # race conflicts, but rather some other problem. When this is reached a |
| # MaxRetriesExceeded exception is raised, and the instance is set to an error |
| # state. |
| # |
| # Possible values: |
| # |
| # * A positive integer, where the integer corresponds to the max number of |
| # attempts that can be made when scheduling an instance. |
| # (integer value) |
| # Minimum value: 1 |
| # Deprecated group/name - [DEFAULT]/scheduler_max_attempts |
| #max_attempts=3 |
| max_attempts=3 |
| |
| # |
| # Periodic task interval. |
| # |
| # This value controls how often (in seconds) the scheduler should attempt |
| # to discover new hosts that have been added to cells. If negative (the |
| # default), no automatic discovery will occur. |
| # |
| # Small deployments may want this periodic task enabled, as surveying the |
| # cells for new hosts is likely to be lightweight enough to not cause undue |
| # burdon to the scheduler. However, larger clouds (and those that are not |
| # adding hosts regularly) will likely want to disable this automatic |
| # behavior and instead use the `nova-manage cell_v2 discover_hosts` command |
| # when hosts have been added to a cell. |
| # (integer value) |
| # Minimum value: -1 |
| #discover_hosts_in_cells_interval=-1 |
| discover_hosts_in_cells_interval=300 |
| |
| |
| [serial_console] |
| # |
| # The serial console feature allows you to connect to a guest in case a |
| # graphical console like VNC, RDP or SPICE is not available. This is only |
| # currently supported for the libvirt, Ironic and hyper-v drivers. |
| |
| # |
| # From nova.conf |
| # |
| |
| # |
| # Enable the serial console feature. |
| # |
| # In order to use this feature, the service ``nova-serialproxy`` needs to run. |
| # This service is typically executed on the controller node. |
| # (boolean value) |
| #enabled=false |
| |
| # |
| # A range of TCP ports a guest can use for its backend. |
| # |
| # Each instance which gets created will use one port out of this range. If the |
| # range is not big enough to provide another port for an new instance, this |
| # instance won't get launched. |
| # |
| # Possible values: |
| # |
| # * Each string which passes the regex ``\d+:\d+`` For example ``10000:20000``. |
| # Be sure that the first port number is lower than the second port number |
| # and that both are in range from 0 to 65535. |
| # (string value) |
| #port_range=10000:20000 |
| |
| # |
| # The URL an end user would use to connect to the ``nova-serialproxy`` service. |
| # |
| # The ``nova-serialproxy`` service is called with this token enriched URL |
| # and establishes the connection to the proper instance. |
| # |
| # Related options: |
| # |
| # * The IP address must be identical to the address to which the |
| # ``nova-serialproxy`` service is listening (see option ``serialproxy_host`` |
| # in this section). |
| # * The port must be the same as in the option ``serialproxy_port`` of this |
| # section. |
| # * If you choose to use a secured websocket connection, then start this option |
| # with ``wss://`` instead of the unsecured ``ws://``. The options ``cert`` |
| # and ``key`` in the ``[DEFAULT]`` section have to be set for that. |
| # (uri value) |
| #base_url=ws://127.0.0.1:6083/ |
| |
| # |
| # The IP address to which proxy clients (like ``nova-serialproxy``) should |
| # connect to get the serial console of an instance. |
| # |
| # This is typically the IP address of the host of a ``nova-compute`` service. |
| # (string value) |
| #proxyclient_address=127.0.0.1 |
| |
| # |
| # The IP address which is used by the ``nova-serialproxy`` service to listen |
| # for incoming requests. |
| # |
| # The ``nova-serialproxy`` service listens on this IP address for incoming |
| # connection requests to instances which expose serial console. |
| # |
| # Related options: |
| # |
| # * Ensure that this is the same IP address which is defined in the option |
| # ``base_url`` of this section or use ``0.0.0.0`` to listen on all addresses. |
| # (string value) |
| #serialproxy_host=0.0.0.0 |
| |
| # |
| # The port number which is used by the ``nova-serialproxy`` service to listen |
| # for incoming requests. |
| # |
| # The ``nova-serialproxy`` service listens on this port number for incoming |
| # connection requests to instances which expose serial console. |
| # |
| # Related options: |
| # |
| # * Ensure that this is the same port number which is defined in the option |
| # ``base_url`` of this section. |
| # (port value) |
| # Minimum value: 0 |
| # Maximum value: 65535 |
| #serialproxy_port=6083 |
| |
| |
| [service_user] |
| # |
| # Configuration options for service to service authentication using a service |
| # token. These options allow to send a service token along with the |
| # user's token when contacting external REST APIs. |
| |
| # |
| # From nova.conf |
| # |
| {%- if controller.get('service_user', {}).get('enabled', True) %} |
| send_service_user_token = True |
| auth_type = password |
| {%- if controller.service_user is defined %} |
| {%- set _data=controller.service_user %} |
| {%- else %} |
| {%- set _data=controller.identity %} |
| {%- endif %} |
| user_domain_id = {{ _data.get('domain', 'default') }} |
| project_domain_id = {{ _data.get('domain', 'default') }} |
| project_name = {{ _data.get('tenant', 'service') }} |
| username = {{ _data.get('user', 'nova') }} |
| password = {{ _data.password }} |
| uth_url={{ controller.identity.get('protocol', 'http') }}://{{ controller.identity.host }}:5000 |
| {%- if controller.identity.get('protocol', 'http') == 'https' %} |
| cafile={{ controller.identity.get('cacert_file', controller.cacert_file) }} |
| {%- endif %} |
| {%- endif %} |
| |
| # |
| # When True, if sending a user token to an REST API, also send a service token. |
| # |
| # Nova often reuses the user token provided to the nova-api to talk to other |
| # REST APIs, such as Cinder and Neutron. It is possible that while the |
| # user token was valid when the request was made to Nova, the token may expire |
| # before it reaches the other service. To avoid any failures, and to |
| # make it clear it is Nova calling the service on the users behalf, we include |
| # a server token along with the user token. Should the user's token have |
| # expired, a valid service token ensures the REST API request will still be |
| # accepted by the keystone middleware. |
| # |
| # This feature is currently experimental, and as such is turned off by default |
| # while full testing and performance tuning of this feature is completed. |
| # (boolean value) |
| #send_service_user_token=false |
| |
| # PEM encoded Certificate Authority to use when verifying HTTPs connections. |
| # (string value) |
| #cafile=<None> |
| |
| # PEM encoded client certificate cert file (string value) |
| #certfile=<None> |
| |
| # PEM encoded client certificate key file (string value) |
| #keyfile=<None> |
| |
| # Verify HTTPS connections. (boolean value) |
| #insecure=false |
| |
| # Timeout value for http requests (integer value) |
| #timeout=<None> |
| |
| # Authentication type to load (string value) |
| # Deprecated group/name - [service_user]/auth_plugin |
| #auth_type=<None> |
| |
| # Config Section from which to load plugin specific options (string value) |
| #auth_section=<None> |
| |
| # Authentication URL (string value) |
| #auth_url=<None> |
| |
| # Domain ID to scope to (string value) |
| #domain_id=<None> |
| |
| # Domain name to scope to (string value) |
| #domain_name=<None> |
| |
| # Project ID to scope to (string value) |
| #project_id=<None> |
| |
| # Project name to scope to (string value) |
| #project_name=<None> |
| |
| # Domain ID containing project (string value) |
| #project_domain_id=<None> |
| |
| # Domain name containing project (string value) |
| #project_domain_name=<None> |
| |
| # Trust ID (string value) |
| #trust_id=<None> |
| |
| # Optional domain ID to use with v3 and v2 parameters. It will be used for both |
| # the user and project domain in v3 and ignored in v2 authentication. (string |
| # value) |
| #default_domain_id=<None> |
| |
| # Optional domain name to use with v3 API and v2 parameters. It will be used for |
| # both the user and project domain in v3 and ignored in v2 authentication. |
| # (string value) |
| #default_domain_name=<None> |
| |
| # User ID (string value) |
| #user_id=<None> |
| |
| # Username (string value) |
| # Deprecated group/name - [service_user]/user-name |
| #username=<None> |
| |
| # User's domain id (string value) |
| #user_domain_id=<None> |
| |
| # User's domain name (string value) |
| #user_domain_name=<None> |
| |
| # User's password (string value) |
| #password=<None> |
| |
| # Tenant ID (string value) |
| #tenant_id=<None> |
| |
| # Tenant Name (string value) |
| #tenant_name=<None> |
| |
| |
| [spice] |
| # |
| # SPICE console feature allows you to connect to a guest virtual machine. |
| # SPICE is a replacement for fairly limited VNC protocol. |
| # |
| # Following requirements must be met in order to use SPICE: |
| # |
| # * Virtualization driver must be libvirt |
| # * spice.enabled set to True |
| # * vnc.enabled set to False |
| # * update html5proxy_base_url |
| # * update server_proxyclient_address |
| enabled = false |
| html5proxy_base_url = {{ controller.vncproxy_url }}/spice_auto.html |
| # |
| # From nova.conf |
| # |
| |
| # |
| # Enable SPICE related features. |
| # |
| # Related options: |
| # |
| # * VNC must be explicitly disabled to get access to the SPICE console. Set the |
| # enabled option to False in the [vnc] section to disable the VNC console. |
| # (boolean value) |
| #enabled=false |
| |
| # |
| # Enable the SPICE guest agent support on the instances. |
| # |
| # The Spice agent works with the Spice protocol to offer a better guest console |
| # experience. However, the Spice console can still be used without the Spice |
| # Agent. With the Spice agent installed the following features are enabled: |
| # |
| # * Copy & Paste of text and images between the guest and client machine |
| # * Automatic adjustment of resolution when the client screen changes - e.g. |
| # if you make the Spice console full screen the guest resolution will adjust |
| # to |
| # match it rather than letterboxing. |
| # * Better mouse integration - The mouse can be captured and released without |
| # needing to click inside the console or press keys to release it. The |
| # performance of mouse movement is also improved. |
| # (boolean value) |
| #agent_enabled=true |
| |
| # |
| # Location of the SPICE HTML5 console proxy. |
| # |
| # End user would use this URL to connect to the `nova-spicehtml5proxy`` |
| # service. This service will forward request to the console of an instance. |
| # |
| # In order to use SPICE console, the service ``nova-spicehtml5proxy`` should be |
| # running. This service is typically launched on the controller node. |
| # |
| # Possible values: |
| # |
| # * Must be a valid URL of the form: ``http://host:port/spice_auto.html`` |
| # where host is the node running ``nova-spicehtml5proxy`` and the port is |
| # typically 6082. Consider not using default value as it is not well defined |
| # for any real deployment. |
| # |
| # Related options: |
| # |
| # * This option depends on ``html5proxy_host`` and ``html5proxy_port`` options. |
| # The access URL returned by the compute node must have the host |
| # and port where the ``nova-spicehtml5proxy`` service is listening. |
| # (uri value) |
| #html5proxy_base_url=http://127.0.0.1:6082/spice_auto.html |
| |
| # |
| # The address where the SPICE server running on the instances should listen. |
| # |
| # Typically, the ``nova-spicehtml5proxy`` proxy client runs on the controller |
| # node and connects over the private network to this address on the compute |
| # node(s). |
| # |
| # Possible values: |
| # |
| # * IP address to listen on. |
| # (string value) |
| #server_listen=127.0.0.1 |
| |
| # |
| # The address used by ``nova-spicehtml5proxy`` client to connect to instance |
| # console. |
| # |
| # Typically, the ``nova-spicehtml5proxy`` proxy client runs on the |
| # controller node and connects over the private network to this address on the |
| # compute node(s). |
| # |
| # Possible values: |
| # |
| # * Any valid IP address on the compute node. |
| # |
| # Related options: |
| # |
| # * This option depends on the ``server_listen`` option. |
| # The proxy client must be able to access the address specified in |
| # ``server_listen`` using the value of this option. |
| # (string value) |
| #server_proxyclient_address=127.0.0.1 |
| |
| # |
| # A keyboard layout which is supported by the underlying hypervisor on this |
| # node. |
| # |
| # Possible values: |
| # * This is usually an 'IETF language tag' (default is 'en-us'). If you |
| # use QEMU as hypervisor, you should find the list of supported keyboard |
| # layouts at /usr/share/qemu/keymaps. |
| # (string value) |
| #keymap=en-us |
| |
| # |
| # IP address or a hostname on which the ``nova-spicehtml5proxy`` service |
| # listens for incoming requests. |
| # |
| # Related options: |
| # |
| # * This option depends on the ``html5proxy_base_url`` option. |
| # The ``nova-spicehtml5proxy`` service must be listening on a host that is |
| # accessible from the HTML5 client. |
| # (string value) |
| #html5proxy_host=0.0.0.0 |
| |
| # |
| # Port on which the ``nova-spicehtml5proxy`` service listens for incoming |
| # requests. |
| # |
| # Related options: |
| # |
| # * This option depends on the ``html5proxy_base_url`` option. |
| # The ``nova-spicehtml5proxy`` service must be listening on a port that is |
| # accessible from the HTML5 client. |
| # (port value) |
| # Minimum value: 0 |
| # Maximum value: 65535 |
| #html5proxy_port=6082 |
| |
| |
| [ssl] |
| |
| # |
| # From nova.conf |
| # |
| |
| # CA certificate file to use to verify connecting clients. (string value) |
| # Deprecated group/name - [DEFAULT]/ssl_ca_file |
| {%- if controller.identity.get('protocol', 'http') == 'https' %} |
| ca_file={{ controller.glance.get('cacert_file', controller.cacert_file) }} |
| {%- endif %} |
| |
| # Certificate file to use when starting the server securely. (string value) |
| # Deprecated group/name - [DEFAULT]/ssl_cert_file |
| #cert_file=<None> |
| |
| # Private key file to use when starting the server securely. (string value) |
| # Deprecated group/name - [DEFAULT]/ssl_key_file |
| #key_file=<None> |
| |
| # SSL version to use (valid only if SSL enabled). Valid values are TLSv1 and |
| # SSLv23. SSLv2, SSLv3, TLSv1_1, and TLSv1_2 may be available on some |
| # distributions. (string value) |
| #version=<None> |
| |
| # Sets the list of available ciphers. value should be a string in the OpenSSL |
| # cipher list format. (string value) |
| #ciphers=<None> |
| |
| |
| [trusted_computing] |
| # |
| # Configuration options for enabling Trusted Platform Module. |
| |
| # |
| # From nova.conf |
| # |
| |
| # |
| # The host to use as the attestation server. |
| # |
| # Cloud computing pools can involve thousands of compute nodes located at |
| # different geographical locations, making it difficult for cloud providers to |
| # identify a node's trustworthiness. When using the Trusted filter, users can |
| # request that their VMs only be placed on nodes that have been verified by the |
| # attestation server specified in this option. |
| # |
| # This option is only used by the FilterScheduler and its subclasses; if you use |
| # a different scheduler, this option has no effect. Also note that this setting |
| # only affects scheduling if the 'TrustedFilter' filter is enabled. |
| # |
| # Possible values: |
| # |
| # * A string representing the host name or IP address of the attestation server, |
| # or an empty string. |
| # |
| # Related options: |
| # |
| # * attestation_server_ca_file |
| # * attestation_port |
| # * attestation_api_url |
| # * attestation_auth_blob |
| # * attestation_auth_timeout |
| # * attestation_insecure_ssl |
| # (string value) |
| #attestation_server=<None> |
| |
| # |
| # The absolute path to the certificate to use for authentication when connecting |
| # to the attestation server. See the `attestation_server` help text for more |
| # information about host verification. |
| # |
| # This option is only used by the FilterScheduler and its subclasses; if you use |
| # a different scheduler, this option has no effect. Also note that this setting |
| # only affects scheduling if the 'TrustedFilter' filter is enabled. |
| # |
| # Possible values: |
| # |
| # * A string representing the path to the authentication certificate for the |
| # attestation server, or an empty string. |
| # |
| # Related options: |
| # |
| # * attestation_server |
| # * attestation_port |
| # * attestation_api_url |
| # * attestation_auth_blob |
| # * attestation_auth_timeout |
| # * attestation_insecure_ssl |
| # (string value) |
| #attestation_server_ca_file=<None> |
| |
| # |
| # The port to use when connecting to the attestation server. See the |
| # `attestation_server` help text for more information about host verification. |
| # |
| # This option is only used by the FilterScheduler and its subclasses; if you use |
| # a different scheduler, this option has no effect. Also note that this setting |
| # only affects scheduling if the 'TrustedFilter' filter is enabled. |
| # |
| # Related options: |
| # |
| # * attestation_server |
| # * attestation_server_ca_file |
| # * attestation_api_url |
| # * attestation_auth_blob |
| # * attestation_auth_timeout |
| # * attestation_insecure_ssl |
| # (port value) |
| # Minimum value: 0 |
| # Maximum value: 65535 |
| #attestation_port=8443 |
| |
| # |
| # The URL on the attestation server to use. See the `attestation_server` help |
| # text for more information about host verification. |
| # |
| # This value must be just that path portion of the full URL, as it will be |
| # joined |
| # to the host specified in the attestation_server option. |
| # |
| # This option is only used by the FilterScheduler and its subclasses; if you use |
| # a different scheduler, this option has no effect. Also note that this setting |
| # only affects scheduling if the 'TrustedFilter' filter is enabled. |
| # |
| # Possible values: |
| # |
| # * A valid URL string of the attestation server, or an empty string. |
| # |
| # Related options: |
| # |
| # * attestation_server |
| # * attestation_server_ca_file |
| # * attestation_port |
| # * attestation_auth_blob |
| # * attestation_auth_timeout |
| # * attestation_insecure_ssl |
| # (string value) |
| #attestation_api_url=/OpenAttestationWebServices/V1.0 |
| |
| # |
| # Attestation servers require a specific blob that is used to authenticate. The |
| # content and format of the blob are determined by the particular attestation |
| # server being used. There is no default value; you must supply the value as |
| # specified by your attestation service. See the `attestation_server` help text |
| # for more information about host verification. |
| # |
| # This option is only used by the FilterScheduler and its subclasses; if you use |
| # a different scheduler, this option has no effect. Also note that this setting |
| # only affects scheduling if the 'TrustedFilter' filter is enabled. |
| # |
| # Possible values: |
| # |
| # * A string containing the specific blob required by the attestation server, or |
| # an empty string. |
| # |
| # Related options: |
| # |
| # * attestation_server |
| # * attestation_server_ca_file |
| # * attestation_port |
| # * attestation_api_url |
| # * attestation_auth_timeout |
| # * attestation_insecure_ssl |
| # (string value) |
| #attestation_auth_blob=<None> |
| |
| # |
| # This value controls how long a successful attestation is cached. Once this |
| # period has elapsed, a new attestation request will be made. See the |
| # `attestation_server` help text for more information about host verification. |
| # |
| # This option is only used by the FilterScheduler and its subclasses; if you use |
| # a different scheduler, this option has no effect. Also note that this setting |
| # only affects scheduling if the 'TrustedFilter' filter is enabled. |
| # |
| # Possible values: |
| # |
| # * A integer value, corresponding to the timeout interval for attestations in |
| # seconds. Any integer is valid, although setting this to zero or negative |
| # values can greatly impact performance when using an attestation service. |
| # |
| # Related options: |
| # |
| # * attestation_server |
| # * attestation_server_ca_file |
| # * attestation_port |
| # * attestation_api_url |
| # * attestation_auth_blob |
| # * attestation_insecure_ssl |
| # (integer value) |
| #attestation_auth_timeout=60 |
| |
| # |
| # When set to True, the SSL certificate verification is skipped for the |
| # attestation service. See the `attestation_server` help text for more |
| # information about host verification. |
| # |
| # This option is only used by the FilterScheduler and its subclasses; if you use |
| # a different scheduler, this option has no effect. Also note that this setting |
| # only affects scheduling if the 'TrustedFilter' filter is enabled. |
| # |
| # Related options: |
| # |
| # * attestation_server |
| # * attestation_server_ca_file |
| # * attestation_port |
| # * attestation_api_url |
| # * attestation_auth_blob |
| # * attestation_auth_timeout |
| # (boolean value) |
| #attestation_insecure_ssl=false |
| |
| |
| [upgrade_levels] |
| {%- if controller.upgrade_levels is defined %} |
| {%- for key, value in controller.upgrade_levels.iteritems() %} |
| {{ key }}={{ value }} |
| {%- endfor %} |
| {%- endif %} |
| # |
| # upgrade_levels options are used to set version cap for RPC |
| # messages sent between different nova services. |
| # |
| # By default all services send messages using the latest version |
| # they know about. |
| # |
| # The compute upgrade level is an important part of rolling upgrades |
| # where old and new nova-compute services run side by side. |
| # |
| # The other options can largely be ignored, and are only kept to |
| # help with a possible future backport issue. |
| |
| # |
| # From nova.conf |
| # |
| |
| # |
| # Compute RPC API version cap. |
| # |
| # By default, we always send messages using the most recent version |
| # the client knows about. |
| # |
| # Where you have old and new compute services running, you should set |
| # this to the lowest deployed version. This is to guarantee that all |
| # services never send messages that one of the compute nodes can't |
| # understand. Note that we only support upgrading from release N to |
| # release N+1. |
| # |
| # Set this option to "auto" if you want to let the compute RPC module |
| # automatically determine what version to use based on the service |
| # versions in the deployment. |
| # |
| # Possible values: |
| # |
| # * By default send the latest version the client knows about |
| # * 'auto': Automatically determines what version to use based on |
| # the service versions in the deployment. |
| # * A string representing a version number in the format 'N.N'; |
| # for example, possible values might be '1.12' or '2.0'. |
| # * An OpenStack release name, in lower case, such as 'mitaka' or |
| # 'liberty'. |
| # (string value) |
| #compute=<None> |
| |
| # Cells RPC API version cap (string value) |
| #cells=<None> |
| |
| # Intercell RPC API version cap (string value) |
| #intercell=<None> |
| |
| # Cert RPC API version cap (string value) |
| #cert=<None> |
| |
| # Scheduler RPC API version cap (string value) |
| #scheduler=<None> |
| |
| # Conductor RPC API version cap (string value) |
| #conductor=<None> |
| |
| # Console RPC API version cap (string value) |
| #console=<None> |
| |
| # Consoleauth RPC API version cap (string value) |
| #consoleauth=<None> |
| |
| # Network RPC API version cap (string value) |
| #network=<None> |
| |
| # Base API RPC API version cap (string value) |
| #baseapi=<None> |
| |
| |
| [vendordata_dynamic_auth] |
| # |
| # Options within this group control the authentication of the vendordata |
| # subsystem of the metadata API server (and config drive) with external systems. |
| |
| # |
| # From nova.conf |
| # |
| |
| # PEM encoded Certificate Authority to use when verifying HTTPs connections. |
| # (string value) |
| #cafile=<None> |
| |
| # PEM encoded client certificate cert file (string value) |
| #certfile=<None> |
| |
| # PEM encoded client certificate key file (string value) |
| #keyfile=<None> |
| |
| # Verify HTTPS connections. (boolean value) |
| #insecure=false |
| |
| # Timeout value for http requests (integer value) |
| #timeout=<None> |
| |
| # Authentication type to load (string value) |
| # Deprecated group/name - [vendordata_dynamic_auth]/auth_plugin |
| #auth_type=<None> |
| |
| # Config Section from which to load plugin specific options (string value) |
| #auth_section=<None> |
| |
| # Authentication URL (string value) |
| #auth_url=<None> |
| |
| # Domain ID to scope to (string value) |
| #domain_id=<None> |
| |
| # Domain name to scope to (string value) |
| #domain_name=<None> |
| |
| # Project ID to scope to (string value) |
| #project_id=<None> |
| |
| # Project name to scope to (string value) |
| #project_name=<None> |
| |
| # Domain ID containing project (string value) |
| #project_domain_id=<None> |
| |
| # Domain name containing project (string value) |
| #project_domain_name=<None> |
| |
| # Trust ID (string value) |
| #trust_id=<None> |
| |
| # Optional domain ID to use with v3 and v2 parameters. It will be used for both |
| # the user and project domain in v3 and ignored in v2 authentication. (string |
| # value) |
| #default_domain_id=<None> |
| |
| # Optional domain name to use with v3 API and v2 parameters. It will be used for |
| # both the user and project domain in v3 and ignored in v2 authentication. |
| # (string value) |
| #default_domain_name=<None> |
| |
| # User ID (string value) |
| #user_id=<None> |
| |
| # Username (string value) |
| # Deprecated group/name - [vendordata_dynamic_auth]/user-name |
| #username=<None> |
| |
| # User's domain id (string value) |
| #user_domain_id=<None> |
| |
| # User's domain name (string value) |
| #user_domain_name=<None> |
| |
| # User's password (string value) |
| #password=<None> |
| |
| # Tenant ID (string value) |
| #tenant_id=<None> |
| |
| # Tenant Name (string value) |
| #tenant_name=<None> |
| |
| {%- set compute_driver = controller.get('compute_driver', 'libvirt.LibvirtDriver') %} |
| {%- if compute_driver in compute_driver_mapping.keys() %} |
| {%- set _data = controller.get(compute_driver_mapping[compute_driver]) %} |
| {%- include "nova/files/pike/compute/_" + compute_driver_mapping[compute_driver] + ".conf" %} |
| {%- endif %} |
| |
| [vnc] |
| # |
| # Virtual Network Computer (VNC) can be used to provide remote desktop |
| # console access to instances for tenants and/or administrators. |
| |
| # |
| # From nova.conf |
| # |
| |
| # |
| # Enable VNC related features. |
| # |
| # Guests will get created with graphical devices to support this. Clients |
| # (for example Horizon) can then establish a VNC connection to the guest. |
| # (boolean value) |
| # Deprecated group/name - [DEFAULT]/vnc_enabled |
| #enabled=true |
| enabled=true |
| |
| # |
| # Keymap for VNC. |
| # |
| # The keyboard mapping (keymap) determines which keyboard layout a VNC |
| # session should use by default. |
| # |
| # Possible values: |
| # |
| # * A keyboard layout which is supported by the underlying hypervisor on |
| # this node. This is usually an 'IETF language tag' (for example |
| # 'en-us'). If you use QEMU as hypervisor, you should find the list |
| # of supported keyboard layouts at ``/usr/share/qemu/keymaps``. |
| # (string value) |
| # Deprecated group/name - [DEFAULT]/vnc_keymap |
| #keymap=en-us |
| |
| # |
| # The IP address or hostname on which an instance should listen to for |
| # incoming VNC connection requests on this node. |
| # (string value) |
| # Deprecated group/name - [DEFAULT]/vncserver_listen |
| #vncserver_listen=127.0.0.1 |
| |
| # |
| # Private, internal IP address or hostname of VNC console proxy. |
| # |
| # The VNC proxy is an OpenStack component that enables compute service |
| # users to access their instances through VNC clients. |
| # |
| # This option sets the private address to which proxy clients, such as |
| # ``nova-xvpvncproxy``, should connect to. |
| # (string value) |
| # Deprecated group/name - [DEFAULT]/vncserver_proxyclient_address |
| #vncserver_proxyclient_address=127.0.0.1 |
| |
| # |
| # Public address of noVNC VNC console proxy. |
| # |
| # The VNC proxy is an OpenStack component that enables compute service |
| # users to access their instances through VNC clients. noVNC provides |
| # VNC support through a websocket-based client. |
| # |
| # This option sets the public base URL to which client systems will |
| # connect. noVNC clients can use this address to connect to the noVNC |
| # instance and, by extension, the VNC sessions. |
| # |
| # Related options: |
| # |
| # * novncproxy_host |
| # * novncproxy_port |
| # (uri value) |
| # Deprecated group/name - [DEFAULT]/novncproxy_base_url |
| #novncproxy_base_url=http://127.0.0.1:6080/vnc_auto.html |
| enabled = true |
| novncproxy_host = {{ controller.bind.get('novncproxy_address', '0.0.0.0') }} |
| novncproxy_base_url = {{ controller.vncproxy_url }}/vnc_auto.html |
| novncproxy_port={{ controller.bind.get('vncproxy_port', '6080') }} |
| {%- if pillar.nova.compute is defined %} |
| vncserver_listen={{ controller.bind.private_address }} |
| vncserver_proxyclient_address={{ controller.bind.private_address }} |
| {%- else %} |
| vncserver_listen={{ controller.bind.get('novncproxy_address', '0.0.0.0') }} |
| {%- endif %} |
| keymap = {{ controller.get('vnc_keymap', 'en-us') }} |
| |
| # |
| # IP address or hostname that the XVP VNC console proxy should bind to. |
| # |
| # The VNC proxy is an OpenStack component that enables compute service |
| # users to access their instances through VNC clients. Xen provides |
| # the Xenserver VNC Proxy, or XVP, as an alternative to the |
| # websocket-based noVNC proxy used by Libvirt. In contrast to noVNC, |
| # XVP clients are Java-based. |
| # |
| # This option sets the private address to which the XVP VNC console proxy |
| # service should bind to. |
| # |
| # Related options: |
| # |
| # * xvpvncproxy_port |
| # * xvpvncproxy_base_url |
| # (string value) |
| # Deprecated group/name - [DEFAULT]/xvpvncproxy_host |
| #xvpvncproxy_host=0.0.0.0 |
| |
| # |
| # Port that the XVP VNC console proxy should bind to. |
| # |
| # The VNC proxy is an OpenStack component that enables compute service |
| # users to access their instances through VNC clients. Xen provides |
| # the Xenserver VNC Proxy, or XVP, as an alternative to the |
| # websocket-based noVNC proxy used by Libvirt. In contrast to noVNC, |
| # XVP clients are Java-based. |
| # |
| # This option sets the private port to which the XVP VNC console proxy |
| # service should bind to. |
| # |
| # Related options: |
| # |
| # * xvpvncproxy_host |
| # * xvpvncproxy_base_url |
| # (port value) |
| # Minimum value: 0 |
| # Maximum value: 65535 |
| # Deprecated group/name - [DEFAULT]/xvpvncproxy_port |
| #xvpvncproxy_port=6081 |
| |
| # |
| # Public URL address of XVP VNC console proxy. |
| # |
| # The VNC proxy is an OpenStack component that enables compute service |
| # users to access their instances through VNC clients. Xen provides |
| # the Xenserver VNC Proxy, or XVP, as an alternative to the |
| # websocket-based noVNC proxy used by Libvirt. In contrast to noVNC, |
| # XVP clients are Java-based. |
| # |
| # This option sets the public base URL to which client systems will |
| # connect. XVP clients can use this address to connect to the XVP |
| # instance and, by extension, the VNC sessions. |
| # |
| # Related options: |
| # |
| # * xvpvncproxy_host |
| # * xvpvncproxy_port |
| # (uri value) |
| # Deprecated group/name - [DEFAULT]/xvpvncproxy_base_url |
| #xvpvncproxy_base_url=http://127.0.0.1:6081/console |
| |
| # |
| # IP address that the noVNC console proxy should bind to. |
| # |
| # The VNC proxy is an OpenStack component that enables compute service |
| # users to access their instances through VNC clients. noVNC provides |
| # VNC support through a websocket-based client. |
| # |
| # This option sets the private address to which the noVNC console proxy |
| # service should bind to. |
| # |
| # Related options: |
| # |
| # * novncproxy_port |
| # * novncproxy_base_url |
| # (string value) |
| # Deprecated group/name - [DEFAULT]/novncproxy_host |
| #novncproxy_host=0.0.0.0 |
| |
| # |
| # Port that the noVNC console proxy should bind to. |
| # |
| # The VNC proxy is an OpenStack component that enables compute service |
| # users to access their instances through VNC clients. noVNC provides |
| # VNC support through a websocket-based client. |
| # |
| # This option sets the private port to which the noVNC console proxy |
| # service should bind to. |
| # |
| # Related options: |
| # |
| # * novncproxy_host |
| # * novncproxy_base_url |
| # (port value) |
| # Minimum value: 0 |
| # Maximum value: 65535 |
| # Deprecated group/name - [DEFAULT]/novncproxy_port |
| #novncproxy_port=6080 |
| |
| |
| [workarounds] |
| # |
| # A collection of workarounds used to mitigate bugs or issues found in system |
| # tools (e.g. Libvirt or QEMU) or Nova itself under certain conditions. These |
| # should only be enabled in exceptional circumstances. All options are linked |
| # against bug IDs, where more information on the issue can be found. |
| |
| # |
| # From nova.conf |
| # |
| |
| # |
| # Use sudo instead of rootwrap. |
| # |
| # Allow fallback to sudo for performance reasons. |
| # |
| # For more information, refer to the bug report: |
| # |
| # https://bugs.launchpad.net/nova/+bug/1415106 |
| # |
| # Possible values: |
| # |
| # * True: Use sudo instead of rootwrap |
| # * False: Use rootwrap as usual |
| # |
| # Interdependencies to other options: |
| # |
| # * Any options that affect 'rootwrap' will be ignored. |
| # (boolean value) |
| #disable_rootwrap=false |
| |
| # |
| # Disable live snapshots when using the libvirt driver. |
| # |
| # Live snapshots allow the snapshot of the disk to happen without an |
| # interruption to the guest, using coordination with a guest agent to |
| # quiesce the filesystem. |
| # |
| # When using libvirt 1.2.2 live snapshots fail intermittently under load |
| # (likely related to concurrent libvirt/qemu operations). This config |
| # option provides a mechanism to disable live snapshot, in favor of cold |
| # snapshot, while this is resolved. Cold snapshot causes an instance |
| # outage while the guest is going through the snapshotting process. |
| # |
| # For more information, refer to the bug report: |
| # |
| # https://bugs.launchpad.net/nova/+bug/1334398 |
| # |
| # Possible values: |
| # |
| # * True: Live snapshot is disabled when using libvirt |
| # * False: Live snapshots are always used when snapshotting (as long as |
| # there is a new enough libvirt and the backend storage supports it) |
| # (boolean value) |
| #disable_libvirt_livesnapshot=true |
| |
| # |
| # Enable handling of events emitted from compute drivers. |
| # |
| # Many compute drivers emit lifecycle events, which are events that occur when, |
| # for example, an instance is starting or stopping. If the instance is going |
| # through task state changes due to an API operation, like resize, the events |
| # are ignored. |
| # |
| # This is an advanced feature which allows the hypervisor to signal to the |
| # compute service that an unexpected state change has occurred in an instance |
| # and that the instance can be shutdown automatically. Unfortunately, this can |
| # race in some conditions, for example in reboot operations or when the compute |
| # service or when host is rebooted (planned or due to an outage). If such races |
| # are common, then it is advisable to disable this feature. |
| # |
| # Care should be taken when this feature is disabled and |
| # 'sync_power_state_interval' is set to a negative value. In this case, any |
| # instances that get out of sync between the hypervisor and the Nova database |
| # will have to be synchronized manually. |
| # |
| # For more information, refer to the bug report: |
| # |
| # https://bugs.launchpad.net/bugs/1444630 |
| # |
| # Interdependencies to other options: |
| # |
| # * If ``sync_power_state_interval`` is negative and this feature is disabled, |
| # then instances that get out of sync between the hypervisor and the Nova |
| # database will have to be synchronized manually. |
| # (boolean value) |
| #handle_virt_lifecycle_events=true |
| |
| |
| [wsgi] |
| # |
| # Options under this group are used to configure WSGI (Web Server Gateway |
| # Interface). WSGI is used to serve API requests. |
| |
| # |
| # From nova.conf |
| # |
| |
| # |
| # This option represents a file name for the paste.deploy config for nova-api. |
| # |
| # Possible values: |
| # |
| # * A string representing file name for the paste.deploy config. |
| # (string value) |
| # Deprecated group/name - [DEFAULT]/api_paste_config |
| api_paste_config=/etc/nova/api-paste.ini |
| |
| # |
| # It represents a python format string that is used as the template to generate |
| # log lines. The following values can be formatted into it: client_ip, |
| # date_time, request_line, status_code, body_length, wall_seconds. |
| # |
| # This option is used for building custom request loglines. |
| # |
| # Possible values: |
| # |
| # * '%(client_ip)s "%(request_line)s" status: %(status_code)s' |
| # 'len: %(body_length)s time: %(wall_seconds).7f' (default) |
| # * Any formatted string formed by specific values. |
| # (string value) |
| # Deprecated group/name - [DEFAULT]/wsgi_log_format |
| #wsgi_log_format=%(client_ip)s "%(request_line)s" status: %(status_code)s len: %(body_length)s time: %(wall_seconds).7f |
| |
| # |
| # This option specifies the HTTP header used to determine the protocol scheme |
| # for the original request, even if it was removed by a SSL terminating proxy. |
| # |
| # Possible values: |
| # |
| # * None (default) - the request scheme is not influenced by any HTTP headers. |
| # * Valid HTTP header, like HTTP_X_FORWARDED_PROTO |
| # (string value) |
| # Deprecated group/name - [DEFAULT]/secure_proxy_ssl_header |
| #secure_proxy_ssl_header=<None> |
| |
| # |
| # This option allows setting path to the CA certificate file that should be used |
| # to verify connecting clients. |
| # |
| # Possible values: |
| # |
| # * String representing path to the CA certificate file. |
| # |
| # Related options: |
| # |
| # * enabled_ssl_apis |
| # (string value) |
| # Deprecated group/name - [DEFAULT]/ssl_ca_file |
| #ssl_ca_file=<None> |
| |
| # |
| # This option allows setting path to the SSL certificate of API server. |
| # |
| # Possible values: |
| # |
| # * String representing path to the SSL certificate. |
| # |
| # Related options: |
| # |
| # * enabled_ssl_apis |
| # (string value) |
| # Deprecated group/name - [DEFAULT]/ssl_cert_file |
| #ssl_cert_file=<None> |
| |
| # |
| # This option specifies the path to the file where SSL private key of API |
| # server is stored when SSL is in effect. |
| # |
| # Possible values: |
| # |
| # * String representing path to the SSL private key. |
| # |
| # Related options: |
| # |
| # * enabled_ssl_apis |
| # (string value) |
| # Deprecated group/name - [DEFAULT]/ssl_key_file |
| #ssl_key_file=<None> |
| |
| # |
| # This option sets the value of TCP_KEEPIDLE in seconds for each server socket. |
| # It specifies the duration of time to keep connection active. TCP generates a |
| # KEEPALIVE transmission for an application that requests to keep connection |
| # active. Not supported on OS X. |
| # |
| # Related options: |
| # |
| # * keep_alive |
| # (integer value) |
| # Minimum value: 0 |
| # Deprecated group/name - [DEFAULT]/tcp_keepidle |
| #tcp_keepidle=600 |
| |
| # |
| # This option specifies the size of the pool of greenthreads used by wsgi. |
| # It is possible to limit the number of concurrent connections using this |
| # option. |
| # (integer value) |
| # Minimum value: 0 |
| # Deprecated group/name - [DEFAULT]/wsgi_default_pool_size |
| #default_pool_size=1000 |
| |
| # |
| # This option specifies the maximum line size of message headers to be accepted. |
| # max_header_line may need to be increased when using large tokens (typically |
| # those generated by the Keystone v3 API with big service catalogs). |
| # |
| # Since TCP is a stream based protocol, in order to reuse a connection, the HTTP |
| # has to have a way to indicate the end of the previous response and beginning |
| # of the next. Hence, in a keep_alive case, all messages must have a |
| # self-defined message length. |
| # (integer value) |
| # Minimum value: 0 |
| # Deprecated group/name - [DEFAULT]/max_header_line |
| #max_header_line=16384 |
| |
| # |
| # This option allows using the same TCP connection to send and receive multiple |
| # HTTP requests/responses, as opposed to opening a new one for every single |
| # request/response pair. HTTP keep-alive indicates HTTP connection reuse. |
| # |
| # Possible values: |
| # |
| # * True : reuse HTTP connection. |
| # * False : closes the client socket connection explicitly. |
| # |
| # Related options: |
| # |
| # * tcp_keepidle |
| # (boolean value) |
| # Deprecated group/name - [DEFAULT]/wsgi_keep_alive |
| #keep_alive=true |
| |
| # |
| # This option specifies the timeout for client connections' socket operations. |
| # If an incoming connection is idle for this number of seconds it will be |
| # closed. It indicates timeout on individual read/writes on the socket |
| # connection. To wait forever set to 0. |
| # (integer value) |
| # Minimum value: 0 |
| # Deprecated group/name - [DEFAULT]/client_socket_timeout |
| #client_socket_timeout=900 |
| |
| |
| [xenserver] |
| # |
| # XenServer options are used when the compute_driver is set to use |
| # XenServer (compute_driver=xenapi.XenAPIDriver). |
| # |
| # Must specify connection_url, connection_password and ovs_integration_bridge to |
| # use compute_driver=xenapi.XenAPIDriver. |
| |
| # |
| # From nova.conf |
| # |
| |
| # |
| # Number of seconds to wait for agent's reply to a request. |
| # |
| # Nova configures/performs certain administrative actions on a server with the |
| # help of an agent that's installed on the server. The communication between |
| # Nova and the agent is achieved via sharing messages, called records, over |
| # xenstore, a shared storage across all the domains on a Xenserver host. |
| # Operations performed by the agent on behalf of nova are: 'version',' |
| # key_init', |
| # 'password','resetnetwork','inject_file', and 'agentupdate'. |
| # |
| # To perform one of the above operations, the xapi 'agent' plugin writes the |
| # command and its associated parameters to a certain location known to the |
| # domain |
| # and awaits response. On being notified of the message, the agent performs |
| # appropriate actions on the server and writes the result back to xenstore. This |
| # result is then read by the xapi 'agent' plugin to determine the |
| # success/failure |
| # of the operation. |
| # |
| # This config option determines how long the xapi 'agent' plugin shall wait to |
| # read the response off of xenstore for a given request/command. If the agent on |
| # the instance fails to write the result in this time period, the operation is |
| # considered to have timed out. |
| # |
| # Related options: |
| # |
| # * ``agent_version_timeout`` |
| # * ``agent_resetnetwork_timeout`` |
| # |
| # (integer value) |
| # Minimum value: 0 |
| #agent_timeout=30 |
| |
| # |
| # Number of seconds to wait for agent't reply to version request. |
| # |
| # This indicates the amount of time xapi 'agent' plugin waits for the agent to |
| # respond to the 'version' request specifically. The generic timeout for agent |
| # communication ``agent_timeout`` is ignored in this case. |
| # |
| # During the build process the 'version' request is used to determine if the |
| # agent is available/operational to perform other requests such as |
| # 'resetnetwork', 'password', 'key_init' and 'inject_file'. If the 'version' |
| # call |
| # fails, the other configuration is skipped. So, this configuration option can |
| # also be interpreted as time in which agent is expected to be fully |
| # operational. |
| # (integer value) |
| # Minimum value: 0 |
| #agent_version_timeout=300 |
| |
| # |
| # Number of seconds to wait for agent's reply to resetnetwork |
| # request. |
| # |
| # This indicates the amount of time xapi 'agent' plugin waits for the agent to |
| # respond to the 'resetnetwork' request specifically. The generic timeout for |
| # agent communication ``agent_timeout`` is ignored in this case. |
| # (integer value) |
| # Minimum value: 0 |
| #agent_resetnetwork_timeout=60 |
| |
| # |
| # Path to locate guest agent on the server. |
| # |
| # Specifies the path in which the XenAPI guest agent should be located. If the |
| # agent is present, network configuration is not injected into the image. |
| # |
| # Related options: |
| # |
| # For this option to have an effect: |
| # * ``flat_injected`` should be set to ``True`` |
| # * ``compute_driver`` should be set to ``xenapi.XenAPIDriver`` |
| # |
| # (string value) |
| #agent_path=usr/sbin/xe-update-networking |
| |
| # |
| # Disables the use of XenAPI agent. |
| # |
| # This configuration option suggests whether the use of agent should be enabled |
| # or not regardless of what image properties are present. Image properties have |
| # an effect only when this is set to ``True``. Read description of config option |
| # ``use_agent_default`` for more information. |
| # |
| # Related options: |
| # |
| # * ``use_agent_default`` |
| # |
| # (boolean value) |
| #disable_agent=false |
| |
| # |
| # Whether or not to use the agent by default when its usage is enabled but not |
| # indicated by the image. |
| # |
| # The use of XenAPI agent can be disabled altogether using the configuration |
| # option ``disable_agent``. However, if it is not disabled, the use of an agent |
| # can still be controlled by the image in use through one of its properties, |
| # ``xenapi_use_agent``. If this property is either not present or specified |
| # incorrectly on the image, the use of agent is determined by this configuration |
| # option. |
| # |
| # Note that if this configuration is set to ``True`` when the agent is not |
| # present, the boot times will increase significantly. |
| # |
| # Related options: |
| # |
| # * ``disable_agent`` |
| # |
| # (boolean value) |
| #use_agent_default=false |
| |
| # Timeout in seconds for XenAPI login. (integer value) |
| # Minimum value: 0 |
| #login_timeout=10 |
| |
| # |
| # Maximum number of concurrent XenAPI connections. |
| # |
| # In nova, multiple XenAPI requests can happen at a time. |
| # Configuring this option will parallelize access to the XenAPI |
| # session, which allows you to make concurrent XenAPI connections. |
| # (integer value) |
| # Minimum value: 1 |
| #connection_concurrent=5 |
| |
| # DEPRECATED: |
| # Base URL for torrent files; must contain a slash character (see RFC 1808, |
| # step 6). |
| # (string value) |
| # This option is deprecated for removal since 15.0.0. |
| # Its value may be silently ignored in the future. |
| # Reason: |
| # The torrent feature has not been tested nor maintained, and as such is being |
| # removed. |
| #torrent_base_url=<None> |
| |
| # DEPRECATED: Probability that peer will become a seeder (1.0 = 100%) (floating |
| # point value) |
| # Minimum value: 0 |
| # This option is deprecated for removal since 15.0.0. |
| # Its value may be silently ignored in the future. |
| # Reason: |
| # The torrent feature has not been tested nor maintained, and as such is being |
| # removed. |
| #torrent_seed_chance=1.0 |
| |
| # DEPRECATED: |
| # Number of seconds after downloading an image via BitTorrent that it should |
| # be seeded for other peers.' |
| # (integer value) |
| # This option is deprecated for removal since 15.0.0. |
| # Its value may be silently ignored in the future. |
| # Reason: |
| # The torrent feature has not been tested nor maintained, and as such is being |
| # removed. |
| #torrent_seed_duration=3600 |
| |
| # DEPRECATED: |
| # Cached torrent files not accessed within this number of seconds can be reaped. |
| # (integer value) |
| # Minimum value: 0 |
| # This option is deprecated for removal since 15.0.0. |
| # Its value may be silently ignored in the future. |
| # Reason: |
| # The torrent feature has not been tested nor maintained, and as such is being |
| # removed. |
| #torrent_max_last_accessed=86400 |
| |
| # DEPRECATED: Beginning of port range to listen on (port value) |
| # Minimum value: 0 |
| # Maximum value: 65535 |
| # This option is deprecated for removal since 15.0.0. |
| # Its value may be silently ignored in the future. |
| # Reason: |
| # The torrent feature has not been tested nor maintained, and as such is being |
| # removed. |
| #torrent_listen_port_start=6881 |
| |
| # DEPRECATED: End of port range to listen on (port value) |
| # Minimum value: 0 |
| # Maximum value: 65535 |
| # This option is deprecated for removal since 15.0.0. |
| # Its value may be silently ignored in the future. |
| # Reason: |
| # The torrent feature has not been tested nor maintained, and as such is being |
| # removed. |
| #torrent_listen_port_end=6891 |
| |
| # DEPRECATED: |
| # Number of seconds a download can remain at the same progress percentage w/o |
| # being considered a stall. |
| # (integer value) |
| # Minimum value: 0 |
| # This option is deprecated for removal since 15.0.0. |
| # Its value may be silently ignored in the future. |
| # Reason: |
| # The torrent feature has not been tested nor maintained, and as such is being |
| # removed. |
| #torrent_download_stall_cutoff=600 |
| |
| # DEPRECATED: |
| # Maximum number of seeder processes to run concurrently within a given dom0 |
| # (-1 = no limit). |
| # (integer value) |
| # Minimum value: -1 |
| # This option is deprecated for removal since 15.0.0. |
| # Its value may be silently ignored in the future. |
| # Reason: |
| # The torrent feature has not been tested nor maintained, and as such is being |
| # removed. |
| #torrent_max_seeder_processes_per_host=1 |
| |
| # |
| # Cache glance images locally. |
| # |
| # The value for this option must be chosen from the choices listed |
| # here. Configuring a value other than these will default to 'all'. |
| # |
| # Note: There is nothing that deletes these images. |
| # |
| # Possible values: |
| # |
| # * `all`: will cache all images. |
| # * `some`: will only cache images that have the |
| # image_property `cache_in_nova=True`. |
| # * `none`: turns off caching entirely. |
| # (string value) |
| # Allowed values: all, some, none |
| #cache_images=all |
| |
| # |
| # Compression level for images. |
| # |
| # By setting this option we can configure the gzip compression level. |
| # This option sets GZIP environment variable before spawning tar -cz |
| # to force the compression level. It defaults to none, which means the |
| # GZIP environment variable is not set and the default (usually -6) |
| # is used. |
| # |
| # Possible values: |
| # |
| # * Range is 1-9, e.g., 9 for gzip -9, 9 being most |
| # compressed but most CPU intensive on dom0. |
| # * Any values out of this range will default to None. |
| # (integer value) |
| # Minimum value: 1 |
| # Maximum value: 9 |
| #image_compression_level=<None> |
| |
| # Default OS type used when uploading an image to glance (string value) |
| #default_os_type=linux |
| |
| # Time in secs to wait for a block device to be created (integer value) |
| # Minimum value: 1 |
| #block_device_creation_timeout=10 |
| {%- if controller.block_device_creation_timeout is defined %} |
| block_device_creation_timeout = {{ controller.block_device_creation_timeout }} |
| {%- endif %} |
| |
| # |
| # Maximum size in bytes of kernel or ramdisk images. |
| # |
| # Specifying the maximum size of kernel or ramdisk will avoid copying |
| # large files to dom0 and fill up /boot/guest. |
| # (integer value) |
| #max_kernel_ramdisk_size=16777216 |
| |
| # |
| # Filter for finding the SR to be used to install guest instances on. |
| # |
| # Possible values: |
| # |
| # * To use the Local Storage in default XenServer/XCP installations |
| # set this flag to other-config:i18n-key=local-storage. |
| # * To select an SR with a different matching criteria, you could |
| # set it to other-config:my_favorite_sr=true. |
| # * To fall back on the Default SR, as displayed by XenCenter, |
| # set this flag to: default-sr:true. |
| # (string value) |
| #sr_matching_filter=default-sr:true |
| |
| # |
| # Whether to use sparse_copy for copying data on a resize down. |
| # (False will use standard dd). This speeds up resizes down |
| # considerably since large runs of zeros won't have to be rsynced. |
| # (boolean value) |
| #sparse_copy=true |
| |
| # |
| # Maximum number of retries to unplug VBD. |
| # If set to 0, should try once, no retries. |
| # (integer value) |
| # Minimum value: 0 |
| #num_vbd_unplug_retries=10 |
| |
| # |
| # Whether or not to download images via Bit Torrent. |
| # |
| # The value for this option must be chosen from the choices listed |
| # here. Configuring a value other than these will default to 'none'. |
| # |
| # Possible values: |
| # |
| # * `all`: will download all images. |
| # * `some`: will only download images that have the image_property |
| # `bittorrent=true`. |
| # * `none`: will turnoff downloading images via Bit Torrent. |
| # (string value) |
| # Allowed values: all, some, none |
| #torrent_images=none |
| |
| # |
| # Name of network to use for booting iPXE ISOs. |
| # |
| # An iPXE ISO is a specially crafted ISO which supports iPXE booting. |
| # This feature gives a means to roll your own image. |
| # |
| # By default this option is not set. Enable this option to |
| # boot an iPXE ISO. |
| # |
| # Related Options: |
| # |
| # * `ipxe_boot_menu_url` |
| # * `ipxe_mkisofs_cmd` |
| # (string value) |
| #ipxe_network_name=<None> |
| |
| # |
| # URL to the iPXE boot menu. |
| # |
| # An iPXE ISO is a specially crafted ISO which supports iPXE booting. |
| # This feature gives a means to roll your own image. |
| # |
| # By default this option is not set. Enable this option to |
| # boot an iPXE ISO. |
| # |
| # Related Options: |
| # |
| # * `ipxe_network_name` |
| # * `ipxe_mkisofs_cmd` |
| # (string value) |
| #ipxe_boot_menu_url=<None> |
| |
| # |
| # Name and optionally path of the tool used for ISO image creation. |
| # |
| # An iPXE ISO is a specially crafted ISO which supports iPXE booting. |
| # This feature gives a means to roll your own image. |
| # |
| # Note: By default `mkisofs` is not present in the Dom0, so the |
| # package can either be manually added to Dom0 or include the |
| # `mkisofs` binary in the image itself. |
| # |
| # Related Options: |
| # |
| # * `ipxe_network_name` |
| # * `ipxe_boot_menu_url` |
| # (string value) |
| #ipxe_mkisofs_cmd=mkisofs |
| |
| # |
| # URL for connection to XenServer/Xen Cloud Platform. A special value |
| # of unix://local can be used to connect to the local unix socket. |
| # |
| # Possible values: |
| # |
| # * Any string that represents a URL. The connection_url is |
| # generally the management network IP address of the XenServer. |
| # * This option must be set if you chose the XenServer driver. |
| # (string value) |
| #connection_url=<None> |
| |
| # Username for connection to XenServer/Xen Cloud Platform (string value) |
| #connection_username=root |
| |
| # Password for connection to XenServer/Xen Cloud Platform (string value) |
| #connection_password=<None> |
| |
| # |
| # The interval used for polling of coalescing vhds. |
| # |
| # This is the interval after which the task of coalesce VHD is |
| # performed, until it reaches the max attempts that is set by |
| # vhd_coalesce_max_attempts. |
| # |
| # Related options: |
| # |
| # * `vhd_coalesce_max_attempts` |
| # (floating point value) |
| # Minimum value: 0 |
| #vhd_coalesce_poll_interval=5.0 |
| |
| # |
| # Ensure compute service is running on host XenAPI connects to. |
| # This option must be set to false if the 'independent_compute' |
| # option is set to true. |
| # |
| # Possible values: |
| # |
| # * Setting this option to true will make sure that compute service |
| # is running on the same host that is specified by connection_url. |
| # * Setting this option to false, doesn't perform the check. |
| # |
| # Related options: |
| # |
| # * `independent_compute` |
| # (boolean value) |
| #check_host=true |
| |
| # |
| # Max number of times to poll for VHD to coalesce. |
| # |
| # This option determines the maximum number of attempts that can be |
| # made for coalescing the VHD before giving up. |
| # |
| # Related opitons: |
| # |
| # * `vhd_coalesce_poll_interval` |
| # (integer value) |
| # Minimum value: 0 |
| #vhd_coalesce_max_attempts=20 |
| |
| # Base path to the storage repository on the XenServer host. (string value) |
| #sr_base_path=/var/run/sr-mount |
| |
| # |
| # The iSCSI Target Host. |
| # |
| # This option represents the hostname or ip of the iSCSI Target. |
| # If the target host is not present in the connection information from |
| # the volume provider then the value from this option is taken. |
| # |
| # Possible values: |
| # |
| # * Any string that represents hostname/ip of Target. |
| # (string value) |
| #target_host=<None> |
| |
| # |
| # The iSCSI Target Port. |
| # |
| # This option represents the port of the iSCSI Target. If the |
| # target port is not present in the connection information from the |
| # volume provider then the value from this option is taken. |
| # (port value) |
| # Minimum value: 0 |
| # Maximum value: 65535 |
| #target_port=3260 |
| |
| # DEPRECATED: |
| # Used to enable the remapping of VBD dev. |
| # (Works around an issue in Ubuntu Maverick) |
| # (boolean value) |
| # This option is deprecated for removal since 15.0.0. |
| # Its value may be silently ignored in the future. |
| # Reason: |
| # This option provided a workaround for issues in Ubuntu Maverick, which |
| # was released in April 2010 and was dropped from support in April 2012. |
| # There's no reason to continue supporting this option. |
| #remap_vbd_dev=false |
| |
| # |
| # Specify prefix to remap VBD dev to (ex. /dev/xvdb -> /dev/sdb). |
| # |
| # Related options: |
| # |
| # * If `remap_vbd_dev` is set to False this option has no impact. |
| # (string value) |
| #remap_vbd_dev_prefix=sd |
| |
| # |
| # Used to prevent attempts to attach VBDs locally, so Nova can |
| # be run in a VM on a different host. |
| # |
| # Related options: |
| # |
| # * ``CONF.flat_injected`` (Must be False) |
| # * ``CONF.xenserver.check_host`` (Must be False) |
| # * ``CONF.default_ephemeral_format`` (Must be unset or 'ext3') |
| # * Joining host aggregates (will error if attempted) |
| # * Swap disks for Windows VMs (will error if attempted) |
| # * Nova-based auto_configure_disk (will error if attempted) |
| # (boolean value) |
| #independent_compute=false |
| |
| # |
| # Wait time for instances to go to running state. |
| # |
| # Provide an integer value representing time in seconds to set the |
| # wait time for an instance to go to running state. |
| # |
| # When a request to create an instance is received by nova-api and |
| # communicated to nova-compute, the creation of the instance occurs |
| # through interaction with Xen via XenAPI in the compute node. Once |
| # the node on which the instance(s) are to be launched is decided by |
| # nova-schedule and the launch is triggered, a certain amount of wait |
| # time is involved until the instance(s) can become available and |
| # 'running'. This wait time is defined by running_timeout. If the |
| # instances do not go to running state within this specified wait |
| # time, the launch expires and the instance(s) are set to 'error' |
| # state. |
| # (integer value) |
| # Minimum value: 0 |
| #running_timeout=60 |
| |
| # DEPRECATED: |
| # The XenAPI VIF driver using XenServer Network APIs. |
| # |
| # Provide a string value representing the VIF XenAPI vif driver to use for |
| # plugging virtual network interfaces. |
| # |
| # Xen configuration uses bridging within the backend domain to allow |
| # all VMs to appear on the network as individual hosts. Bridge |
| # interfaces are used to create a XenServer VLAN network in which |
| # the VIFs for the VM instances are plugged. If no VIF bridge driver |
| # is plugged, the bridge is not made available. This configuration |
| # option takes in a value for the VIF driver. |
| # |
| # Possible values: |
| # |
| # * nova.virt.xenapi.vif.XenAPIOpenVswitchDriver (default) |
| # * nova.virt.xenapi.vif.XenAPIBridgeDriver (deprecated) |
| # |
| # Related options: |
| # |
| # * ``vlan_interface`` |
| # * ``ovs_integration_bridge`` |
| # (string value) |
| # This option is deprecated for removal since 15.0.0. |
| # Its value may be silently ignored in the future. |
| # Reason: |
| # There are only two in-tree vif drivers for XenServer. XenAPIBridgeDriver is |
| # for |
| # nova-network which is deprecated and XenAPIOpenVswitchDriver is for Neutron |
| # which is the default configuration for Nova since the 15.0.0 Ocata release. In |
| # the future the "use_neutron" configuration option will be used to determine |
| # which vif driver to use. |
| #vif_driver=nova.virt.xenapi.vif.XenAPIOpenVswitchDriver |
| |
| # |
| # Dom0 plugin driver used to handle image uploads. |
| # |
| # Provide a string value representing a plugin driver required to |
| # handle the image uploading to GlanceStore. |
| # |
| # Images, and snapshots from XenServer need to be uploaded to the data |
| # store for use. image_upload_handler takes in a value for the Dom0 |
| # plugin driver. This driver is then called to uplaod images to the |
| # GlanceStore. |
| # (string value) |
| #image_upload_handler=nova.virt.xenapi.image.glance.GlanceStore |
| |
| # |
| # Number of seconds to wait for SR to settle if the VDI |
| # does not exist when first introduced. |
| # |
| # Some SRs, particularly iSCSI connections are slow to see the VDIs |
| # right after they got introduced. Setting this option to a |
| # time interval will make the SR to wait for that time period |
| # before raising VDI not found exception. |
| # (integer value) |
| # Minimum value: 0 |
| #introduce_vdi_retry_wait=20 |
| |
| # |
| # The name of the integration Bridge that is used with xenapi |
| # when connecting with Open vSwitch. |
| # |
| # Note: The value of this config option is dependent on the |
| # environment, therefore this configuration value must be set |
| # accordingly if you are using XenAPI. |
| # |
| # Possible values: |
| # |
| # * Any string that represents a bridge name. |
| # (string value) |
| #ovs_integration_bridge=<None> |
| |
| # |
| # When adding new host to a pool, this will append a --force flag to the |
| # command, forcing hosts to join a pool, even if they have different CPUs. |
| # |
| # Since XenServer version 5.6 it is possible to create a pool of hosts that have |
| # different CPU capabilities. To accommodate CPU differences, XenServer limited |
| # features it uses to determine CPU compatibility to only the ones that are |
| # exposed by CPU and support for CPU masking was added. |
| # Despite this effort to level differences between CPUs, it is still possible |
| # that adding new host will fail, thus option to force join was introduced. |
| # (boolean value) |
| #use_join_force=true |
| |
| # |
| # Publicly visible name for this console host. |
| # |
| # Possible values: |
| # |
| # * A string representing a valid hostname |
| # (string value) |
| # Deprecated group/name - [DEFAULT]/console_public_hostname |
| #console_public_hostname=lcy01-22 |
| |
| |
| [xvp] |
| # |
| # Configuration options for XVP. |
| # |
| # xvp (Xen VNC Proxy) is a proxy server providing password-protected VNC-based |
| # access to the consoles of virtual machines hosted on Citrix XenServer. |
| |
| # |
| # From nova.conf |
| # |
| |
| # XVP conf template (string value) |
| # Deprecated group/name - [DEFAULT]/console_xvp_conf_template |
| #console_xvp_conf_template=$pybasedir/nova/console/xvp.conf.template |
| |
| # Generated XVP conf file (string value) |
| # Deprecated group/name - [DEFAULT]/console_xvp_conf |
| #console_xvp_conf=/etc/xvp.conf |
| |
| # XVP master process pid file (string value) |
| # Deprecated group/name - [DEFAULT]/console_xvp_pid |
| #console_xvp_pid=/var/run/xvp.pid |
| |
| # XVP log file (string value) |
| # Deprecated group/name - [DEFAULT]/console_xvp_log |
| #console_xvp_log=/var/log/xvp.log |
| |
| # Port for XVP to multiplex VNC connections on (port value) |
| # Minimum value: 0 |
| # Maximum value: 65535 |
| # Deprecated group/name - [DEFAULT]/console_xvp_multiplex_port |
| #console_xvp_multiplex_port=5900 |