Introduce Newton release for Nova
Add configuration files and parameters for Newton release
Change-Id: I6c91a69b9bd6c865d5f6da8019b3a8b7eaf0c6db
diff --git a/nova/files/newton/api-paste.ini.Debian b/nova/files/newton/api-paste.ini.Debian
new file mode 100644
index 0000000..edee888
--- /dev/null
+++ b/nova/files/newton/api-paste.ini.Debian
@@ -0,0 +1,87 @@
+############
+# Metadata #
+############
+{%- from "nova/map.jinja" import controller with context %}
+[composite:metadata]
+use = egg:Paste#urlmap
+/: meta
+
+[pipeline:meta]
+pipeline = cors metaapp
+
+[app:metaapp]
+paste.app_factory = nova.api.metadata.handler:MetadataRequestHandler.factory
+
+#############
+# OpenStack #
+#############
+
+[composite:osapi_compute]
+use = call:nova.api.openstack.urlmap:urlmap_factory
+/: oscomputeversions
+# v21 is an exactly feature match for v2, except it has more stringent
+# input validation on the wsgi surface (prevents fuzzing early on the
+# API). It also provides new features via API microversions which are
+# opt into for clients. Unaware clients will receive the same frozen
+# v2 API feature set, but with some relaxed validation
+/v2: openstack_compute_api_v21_legacy_v2_compatible
+/v2.1: openstack_compute_api_v21
+
+[composite:openstack_compute_api_v21]
+use = call:nova.api.auth:pipeline_factory_v21
+noauth2 = cors http_proxy_to_wsgi compute_req_id faultwrap sizelimit noauth2 osapi_compute_app_v21
+keystone = cors http_proxy_to_wsgi compute_req_id faultwrap sizelimit authtoken keystonecontext {% if controller.audit.enabled %}audit {% endif %}osapi_compute_app_v21
+
+[composite:openstack_compute_api_v21_legacy_v2_compatible]
+use = call:nova.api.auth:pipeline_factory_v21
+noauth2 = cors http_proxy_to_wsgi compute_req_id faultwrap sizelimit noauth2 legacy_v2_compatible osapi_compute_app_v21
+keystone = cors http_proxy_to_wsgi compute_req_id faultwrap sizelimit authtoken keystonecontext {% if controller.audit.enabled %}audit {% endif %}legacy_v2_compatible osapi_compute_app_v21
+
+[filter:request_id]
+paste.filter_factory = oslo_middleware:RequestId.factory
+
+[filter:compute_req_id]
+paste.filter_factory = nova.api.compute_req_id:ComputeReqIdMiddleware.factory
+
+[filter:faultwrap]
+paste.filter_factory = nova.api.openstack:FaultWrapper.factory
+
+[filter:noauth2]
+paste.filter_factory = nova.api.openstack.auth:NoAuthMiddleware.factory
+
+[filter:sizelimit]
+paste.filter_factory = oslo_middleware:RequestBodySizeLimiter.factory
+
+[filter:http_proxy_to_wsgi]
+paste.filter_factory = oslo_middleware.http_proxy_to_wsgi:HTTPProxyToWSGI.factory
+
+[filter:legacy_v2_compatible]
+paste.filter_factory = nova.api.openstack:LegacyV2CompatibleWrapper.factory
+
+[app:osapi_compute_app_v21]
+paste.app_factory = nova.api.openstack.compute:APIRouterV21.factory
+
+[pipeline:oscomputeversions]
+pipeline = faultwrap http_proxy_to_wsgi oscomputeversionapp
+
+[app:oscomputeversionapp]
+paste.app_factory = nova.api.openstack.compute.versions:Versions.factory
+
+{%- if controller.audit.enabled %}
+[filter:audit]
+paste.filter_factory = {{ controller.get("audit", {}).get("filter_factory", "keystonemiddleware.audit:filter_factory") }}
+audit_map_file = {{ controller.get("audit", {}).get("map_file", "/etc/pycadf/nova_api_audit_map.conf") }}
+{%- endif %}
+##########
+# Shared #
+##########
+
+[filter:cors]
+paste.filter_factory = oslo_middleware.cors:filter_factory
+oslo_config_project = nova
+
+[filter:keystonecontext]
+paste.filter_factory = nova.api.auth:NovaKeystoneContext.factory
+
+[filter:authtoken]
+paste.filter_factory = keystonemiddleware.auth_token:filter_factory
diff --git a/nova/files/newton/api-paste.ini.RedHat b/nova/files/newton/api-paste.ini.RedHat
new file mode 120000
index 0000000..08fd76a
--- /dev/null
+++ b/nova/files/newton/api-paste.ini.RedHat
@@ -0,0 +1 @@
+api-paste.ini.Debian
\ No newline at end of file
diff --git a/nova/files/newton/libvirt.Debian b/nova/files/newton/libvirt.Debian
new file mode 100644
index 0000000..b80fab0
--- /dev/null
+++ b/nova/files/newton/libvirt.Debian
@@ -0,0 +1,19 @@
+# Defaults for libvirt-bin initscript (/etc/init.d/libvirt-bin)
+# This is a POSIX shell fragment
+
+# Start libvirtd to handle qemu/kvm:
+start_libvirtd="yes"
+
+# options passed to libvirtd, add "-l" to listen on tcp
+{%- if grains.get('init', None) != 'systemd' %}
+libvirtd_opts="-d -l"
+LIBVIRTD_ARGS="--listen"
+
+{%- else %}
+# Don't use "-d" option with systemd
+libvirtd_opts="-l"
+LIBVIRTD_ARGS="--listen"
+{%- endif %}
+# pass in location of kerberos keytab
+#export KRB5_KTNAME=/etc/libvirt/libvirt.keytab
+
diff --git a/nova/files/newton/libvirt.RedHat b/nova/files/newton/libvirt.RedHat
new file mode 100644
index 0000000..b80fab0
--- /dev/null
+++ b/nova/files/newton/libvirt.RedHat
@@ -0,0 +1,19 @@
+# Defaults for libvirt-bin initscript (/etc/init.d/libvirt-bin)
+# This is a POSIX shell fragment
+
+# Start libvirtd to handle qemu/kvm:
+start_libvirtd="yes"
+
+# options passed to libvirtd, add "-l" to listen on tcp
+{%- if grains.get('init', None) != 'systemd' %}
+libvirtd_opts="-d -l"
+LIBVIRTD_ARGS="--listen"
+
+{%- else %}
+# Don't use "-d" option with systemd
+libvirtd_opts="-l"
+LIBVIRTD_ARGS="--listen"
+{%- endif %}
+# pass in location of kerberos keytab
+#export KRB5_KTNAME=/etc/libvirt/libvirt.keytab
+
diff --git a/nova/files/newton/libvirtd.conf.Debian b/nova/files/newton/libvirtd.conf.Debian
new file mode 100644
index 0000000..8333dcb
--- /dev/null
+++ b/nova/files/newton/libvirtd.conf.Debian
@@ -0,0 +1,402 @@
+# Master libvirt daemon configuration file
+#
+# For further information consult http://libvirt.org/format.html
+#
+# NOTE: the tests/daemon-conf regression test script requires
+# that each "PARAMETER = VALUE" line in this file have the parameter
+# name just after a leading "#".
+
+#################################################################
+#
+# Network connectivity controls
+#
+
+# Flag listening for secure TLS connections on the public TCP/IP port.
+# NB, must pass the --listen flag to the libvirtd process for this to
+# have any effect.
+#
+# It is necessary to setup a CA and issue server certificates before
+# using this capability.
+#
+# This is enabled by default, uncomment this to disable it
+#listen_tls = 0
+
+
+listen_tls = 0
+listen_tcp = 1
+auth_tcp = "none"
+
+
+# Listen for unencrypted TCP connections on the public TCP/IP port.
+# NB, must pass the --listen flag to the libvirtd process for this to
+# have any effect.
+#
+# Using the TCP socket requires SASL authentication by default. Only
+# SASL mechanisms which support data encryption are allowed. This is
+# DIGEST_MD5 and GSSAPI (Kerberos5)
+#
+# This is disabled by default, uncomment this to enable it.
+#listen_tcp = 1
+
+
+
+# Override the port for accepting secure TLS connections
+# This can be a port number, or service name
+#
+#tls_port = "16514"
+
+# Override the port for accepting insecure TCP connections
+# This can be a port number, or service name
+#
+#tcp_port = "16509"
+
+
+# Override the default configuration which binds to all network
+# interfaces. This can be a numeric IPv4/6 address, or hostname
+#
+#listen_addr = "192.168.0.1"
+
+
+# Flag toggling mDNS advertizement of the libvirt service.
+#
+# Alternatively can disable for all services on a host by
+# stopping the Avahi daemon
+#
+# This is disabled by default, uncomment this to enable it
+#mdns_adv = 1
+
+# Override the default mDNS advertizement name. This must be
+# unique on the immediate broadcast network.
+#
+# The default is "Virtualization Host HOSTNAME", where HOSTNAME
+# is subsituted for the short hostname of the machine (without domain)
+#
+#mdns_name = "Virtualization Host Joe Demo"
+
+
+#################################################################
+#
+# UNIX socket access controls
+#
+
+# Set the UNIX domain socket group ownership. This can be used to
+# allow a 'trusted' set of users access to management capabilities
+# without becoming root.
+#
+# This is restricted to 'root' by default.
+unix_sock_group = "libvirtd"
+
+# Set the UNIX socket permissions for the R/O socket. This is used
+# for monitoring VM status only
+#
+# Default allows any user. If setting group ownership may want to
+# restrict this to:
+#unix_sock_ro_perms = "0777"
+
+# Set the UNIX socket permissions for the R/W socket. This is used
+# for full management of VMs
+#
+# Default allows only root. If PolicyKit is enabled on the socket,
+# the default will change to allow everyone (eg, 0777)
+#
+# If not using PolicyKit and setting group ownership for access
+# control then you may want to relax this to:
+unix_sock_rw_perms = "0770"
+
+# Set the name of the directory in which sockets will be found/created.
+#unix_sock_dir = "/var/run/libvirt"
+
+#################################################################
+#
+# Authentication.
+#
+# - none: do not perform auth checks. If you can connect to the
+# socket you are allowed. This is suitable if there are
+# restrictions on connecting to the socket (eg, UNIX
+# socket permissions), or if there is a lower layer in
+# the network providing auth (eg, TLS/x509 certificates)
+#
+# - sasl: use SASL infrastructure. The actual auth scheme is then
+# controlled from /etc/sasl2/libvirt.conf. For the TCP
+# socket only GSSAPI & DIGEST-MD5 mechanisms will be used.
+# For non-TCP or TLS sockets, any scheme is allowed.
+#
+# - polkit: use PolicyKit to authenticate. This is only suitable
+# for use on the UNIX sockets. The default policy will
+# require a user to supply their own password to gain
+# full read/write access (aka sudo like), while anyone
+# is allowed read/only access.
+#
+# Set an authentication scheme for UNIX read-only sockets
+# By default socket permissions allow anyone to connect
+#
+# To restrict monitoring of domains you may wish to enable
+# an authentication mechanism here
+auth_unix_ro = "none"
+
+# Set an authentication scheme for UNIX read-write sockets
+# By default socket permissions only allow root. If PolicyKit
+# support was compiled into libvirt, the default will be to
+# use 'polkit' auth.
+#
+# If the unix_sock_rw_perms are changed you may wish to enable
+# an authentication mechanism here
+auth_unix_rw = "none"
+
+# Change the authentication scheme for TCP sockets.
+#
+# If you don't enable SASL, then all TCP traffic is cleartext.
+# Don't do this outside of a dev/test scenario. For real world
+# use, always enable SASL and use the GSSAPI or DIGEST-MD5
+# mechanism in /etc/sasl2/libvirt.conf
+#auth_tcp = "sasl"
+#auth_tcp = "none"
+
+# Change the authentication scheme for TLS sockets.
+#
+# TLS sockets already have encryption provided by the TLS
+# layer, and limited authentication is done by certificates
+#
+# It is possible to make use of any SASL authentication
+# mechanism as well, by using 'sasl' for this option
+#auth_tls = "none"
+
+
+
+#################################################################
+#
+# TLS x509 certificate configuration
+#
+
+
+# Override the default server key file path
+#
+#key_file = "/etc/pki/libvirt/private/serverkey.pem"
+
+# Override the default server certificate file path
+#
+#cert_file = "/etc/pki/libvirt/servercert.pem"
+
+# Override the default CA certificate path
+#
+#ca_file = "/etc/pki/CA/cacert.pem"
+
+# Specify a certificate revocation list.
+#
+# Defaults to not using a CRL, uncomment to enable it
+#crl_file = "/etc/pki/CA/crl.pem"
+
+
+
+#################################################################
+#
+# Authorization controls
+#
+
+
+# Flag to disable verification of our own server certificates
+#
+# When libvirtd starts it performs some sanity checks against
+# its own certificates.
+#
+# Default is to always run sanity checks. Uncommenting this
+# will disable sanity checks which is not a good idea
+#tls_no_sanity_certificate = 1
+
+# Flag to disable verification of client certificates
+#
+# Client certificate verification is the primary authentication mechanism.
+# Any client which does not present a certificate signed by the CA
+# will be rejected.
+#
+# Default is to always verify. Uncommenting this will disable
+# verification - make sure an IP whitelist is set
+#tls_no_verify_certificate = 1
+
+
+# A whitelist of allowed x509 Distinguished Names
+# This list may contain wildcards such as
+#
+# "C=GB,ST=London,L=London,O=Red Hat,CN=*"
+#
+# See the POSIX fnmatch function for the format of the wildcards.
+#
+# NB If this is an empty list, no client can connect, so comment out
+# entirely rather than using empty list to disable these checks
+#
+# By default, no DN's are checked
+#tls_allowed_dn_list = ["DN1", "DN2"]
+
+
+# A whitelist of allowed SASL usernames. The format for usernames
+# depends on the SASL authentication mechanism. Kerberos usernames
+# look like username@REALM
+#
+# This list may contain wildcards such as
+#
+# "*@EXAMPLE.COM"
+#
+# See the POSIX fnmatch function for the format of the wildcards.
+#
+# NB If this is an empty list, no client can connect, so comment out
+# entirely rather than using empty list to disable these checks
+#
+# By default, no Username's are checked
+#sasl_allowed_username_list = ["joe@EXAMPLE.COM", "fred@EXAMPLE.COM" ]
+
+
+
+#################################################################
+#
+# Processing controls
+#
+
+# The maximum number of concurrent client connections to allow
+# over all sockets combined.
+#max_clients = 20
+
+
+# The minimum limit sets the number of workers to start up
+# initially. If the number of active clients exceeds this,
+# then more threads are spawned, upto max_workers limit.
+# Typically you'd want max_workers to equal maximum number
+# of clients allowed
+#min_workers = 5
+#max_workers = 20
+
+
+# The number of priority workers. If all workers from above
+# pool will stuck, some calls marked as high priority
+# (notably domainDestroy) can be executed in this pool.
+#prio_workers = 5
+
+# Total global limit on concurrent RPC calls. Should be
+# at least as large as max_workers. Beyond this, RPC requests
+# will be read into memory and queued. This directly impact
+# memory usage, currently each request requires 256 KB of
+# memory. So by default upto 5 MB of memory is used
+#
+# XXX this isn't actually enforced yet, only the per-client
+# limit is used so far
+#max_requests = 20
+
+# Limit on concurrent requests from a single client
+# connection. To avoid one client monopolizing the server
+# this should be a small fraction of the global max_requests
+# and max_workers parameter
+#max_client_requests = 5
+
+#################################################################
+#
+# Logging controls
+#
+
+# Logging level: 4 errors, 3 warnings, 2 information, 1 debug
+# basically 1 will log everything possible
+#log_level = 3
+
+# Logging filters:
+# A filter allows to select a different logging level for a given category
+# of logs
+# The format for a filter is one of:
+# x:name
+# x:+name
+# where name is a string which is matched against source file name,
+# e.g., "remote", "qemu", or "util/json", the optional "+" prefix
+# tells libvirt to log stack trace for each message matching name,
+# and x is the minimal level where matching messages should be logged:
+# 1: DEBUG
+# 2: INFO
+# 3: WARNING
+# 4: ERROR
+#
+# Multiple filter can be defined in a single @filters, they just need to be
+# separated by spaces.
+#
+# e.g. to only get warning or errors from the remote layer and only errors
+# from the event layer:
+#log_filters="3:remote 4:event"
+
+# Logging outputs:
+# An output is one of the places to save logging information
+# The format for an output can be:
+# x:stderr
+# output goes to stderr
+# x:syslog:name
+# use syslog for the output and use the given name as the ident
+# x:file:file_path
+# output to a file, with the given filepath
+# In all case the x prefix is the minimal level, acting as a filter
+# 1: DEBUG
+# 2: INFO
+# 3: WARNING
+# 4: ERROR
+#
+# Multiple output can be defined, they just need to be separated by spaces.
+# e.g. to log all warnings and errors to syslog under the libvirtd ident:
+#log_outputs="3:syslog:libvirtd"
+#
+
+# Log debug buffer size: default 64
+# The daemon keeps an internal debug log buffer which will be dumped in case
+# of crash or upon receiving a SIGUSR2 signal. This setting allows to override
+# the default buffer size in kilobytes.
+# If value is 0 or less the debug log buffer is deactivated
+#log_buffer_size = 64
+
+
+##################################################################
+#
+# Auditing
+#
+# This setting allows usage of the auditing subsystem to be altered:
+#
+# audit_level == 0 -> disable all auditing
+# audit_level == 1 -> enable auditing, only if enabled on host (default)
+# audit_level == 2 -> enable auditing, and exit if disabled on host
+#
+#audit_level = 2
+#
+# If set to 1, then audit messages will also be sent
+# via libvirt logging infrastructure. Defaults to 0
+#
+#audit_logging = 1
+
+###################################################################
+# UUID of the host:
+# Provide the UUID of the host here in case the command
+# 'dmidecode -s system-uuid' does not provide a valid uuid. In case
+# 'dmidecode' does not provide a valid UUID and none is provided here, a
+# temporary UUID will be generated.
+# Keep the format of the example UUID below. UUID must not have all digits
+# be the same.
+
+# NB This default all-zeros UUID will not work. Replace
+# it with the output of the 'uuidgen' command and then
+# uncomment this entry
+#host_uuid = "00000000-0000-0000-0000-000000000000"
+
+###################################################################
+# Keepalive protocol:
+# This allows libvirtd to detect broken client connections or even
+# dead client. A keepalive message is sent to a client after
+# keepalive_interval seconds of inactivity to check if the client is
+# still responding; keepalive_count is a maximum number of keepalive
+# messages that are allowed to be sent to the client without getting
+# any response before the connection is considered broken. In other
+# words, the connection is automatically closed approximately after
+# keepalive_interval * (keepalive_count + 1) seconds since the last
+# message received from the client. If keepalive_interval is set to
+# -1, libvirtd will never send keepalive requests; however clients
+# can still send them and the deamon will send responses. When
+# keepalive_count is set to 0, connections will be automatically
+# closed after keepalive_interval seconds of inactivity without
+# sending any keepalive messages.
+#
+#keepalive_interval = 5
+#keepalive_count = 5
+#
+# If set to 1, libvirtd will refuse to talk to clients that do not
+# support keepalive protocol. Defaults to 0.
+#
+#keepalive_required = 1
\ No newline at end of file
diff --git a/nova/files/newton/libvirtd.conf.RedHat b/nova/files/newton/libvirtd.conf.RedHat
new file mode 120000
index 0000000..2a7b101
--- /dev/null
+++ b/nova/files/newton/libvirtd.conf.RedHat
@@ -0,0 +1 @@
+libvirtd.conf.Debian
\ No newline at end of file
diff --git a/nova/files/newton/nova-compute.conf.Debian b/nova/files/newton/nova-compute.conf.Debian
new file mode 100644
index 0000000..0a34a70
--- /dev/null
+++ b/nova/files/newton/nova-compute.conf.Debian
@@ -0,0 +1,186 @@
+{%- from "nova/map.jinja" import compute with context %}
+
+[DEFAULT]
+logdir=/var/log/nova
+state_path=/var/lib/nova
+connection_type=libvirt
+rootwrap_config=/etc/nova/rootwrap.conf
+verbose=True
+api_paste_config=/etc/nova/api-paste.ini
+volumes_path=/var/lib/nova/volumes
+compute_manager=nova.compute.manager.ComputeManager
+network_device_mtu=65000
+use_neutron = True
+config_drive_format=vfat
+force_config_drive=True
+force_raw_images=True
+notify_api_faults=False
+allow_resize_to_same_host=True
+security_group_api=neutron
+vif_plugging_is_fatal=True
+vif_plugging_timeout=300
+dhcp_domain={{ compute.get('dhcp_domain', 'novalocal') }}
+{%- if compute.get('sriov', false) %}
+{%- for nic_name, sriov in compute.sriov.iteritems() %}
+pci_passthrough_whitelist = {"devname":"{{ sriov.devname }}","physical_network":"{{ sriov.physical_network }}"}
+{%- endfor %}
+{%- endif %}
+{%- if compute.image.use_cow is defined %}
+use_cow_images = {{ compute.image.use_cow }}
+{%- endif %}
+
+remove_unused_original_minimum_age_seconds=86400
+image_service=nova.image.glance.GlanceImageService
+
+reserved_host_memory_mb = {{ compute.get('reserved_host_memory_mb', '512') }}
+
+{%- if compute.vcpu_pin_set is defined %}
+vcpu_pin_set={{ compute.vcpu_pin_set }}
+{%- endif %}
+
+allow_resize_to_same_host=True
+
+auth_strategy = keystone
+
+neutron_url_timeout = 300
+compute_driver = nova.virt.libvirt.LibvirtDriver
+
+heal_instance_info_cache_interval = {{ compute.heal_instance_info_cache_interval }}
+
+image_cache_manager_interval = 0
+firewall_driver = nova.virt.firewall.NoopFirewallDriver
+
+rpc_cast_timeout = 30
+rpc_response_timeout = 3600
+executor_thread_pool_size = 70
+report_interval = {{ compute.get('report_interval', '60') }}
+
+block_device_allocate_retries=600
+block_device_allocate_retries_interval=10
+
+resume_guests_state_on_host_boot = True
+service_down_time = 90
+
+{%- if compute.message_queue.members is defined %}
+transport_url = rabbit://{% for member in compute.message_queue.members -%}
+ {{ compute.message_queue.user }}:{{ compute.message_queue.password }}@{{ member.host }}:{{ member.get('port', 5672) }}
+ {%- if not loop.last -%},{%- endif -%}
+ {%- endfor -%}
+ {{ compute.message_queue.virtual_host }}
+{%- else %}
+transport_url = rabbit://{{ compute.message_queue.user }}:{{ compute.message_queue.password }}@{{ compute.message_queue.host }}:{{ member.get('port', 5672) }}{{ compute.message_queue.virtual_host }}
+{%- endif %}
+
+rpc_backend=rabbit
+
+{% if pillar.ceilometer is defined %}
+instance_usage_audit = True
+instance_usage_audit_period = hour
+{%- endif %}
+
+{%- if compute.get('notification', {}).notify_on is defined %}
+{%- for key, value in compute.notification.notify_on.iteritems() %}
+notify_on_{{ key }} = {{ value }}
+{%- endfor %}
+{%- elif pillar.ceilometer is defined %}
+notify_on_state_change = vm_and_task_state
+{%- endif %}
+
+[oslo_concurrency]
+lock_path = /var/lib/nova/tmp
+
+[oslo_messaging_notifications]
+{%- if compute.notification is mapping %}
+driver = {{ compute.notification.get('driver', 'messagingv2') }}
+{%- if compute.notification.topics is defined %}
+topics = {{ compute.notification.topics }}
+{%- endif %}
+{%- elif compute.notification %}
+driver = messagingv2
+{%- endif %}
+
+[vnc]
+enabled = true
+novncproxy_base_url={{ compute.vncproxy_url }}/vnc_auto.html
+novncproxy_port={{ compute.bind.vnc_port }}
+vncserver_listen=0.0.0.0
+vncserver_proxyclient_address={{ compute.bind.vnc_address }}
+keymap = {{ compute.get('vnc_keymap', 'en-us') }}
+
+[spice]
+enabled = false
+html5proxy_base_url = {{ compute.vncproxy_url }}/spice_auto.html
+
+[cache]
+{%- if compute.cache is defined %}
+backend = oslo_cache.memcache_pool
+enabled = true
+memcache_servers={%- for member in compute.cache.members %}{{ member.host }}:11211{% if not loop.last %},{% endif %}{%- endfor %}
+{%- endif %}
+
+[libvirt]
+cpu_mode = host-passthrough
+virt_type = kvm
+inject_partition=-2
+inject_password=False
+disk_cachemodes="network=writeback,block=none"
+libvirt_inject_password=True
+block_migration_flag=VIR_MIGRATE_UNDEFINE_SOURCE,VIR_MIGRATE_PEER2PEER,VIR_MIGRATE_LIVE,VIR_MIGRATE_NON_SHARED_INC
+live_migration_flag=VIR_MIGRATE_UNDEFINE_SOURCE,VIR_MIGRATE_PEER2PEER,VIR_MIGRATE_LIVE,VIR_MIGRATE_PERSIST_DEST
+libvirt_inject_key=True
+inject_key=False
+vif_driver=nova.virt.libvirt.vif.LibvirtGenericVIFDriver
+
+{%- if compute.get('ceph', {}).ephemeral is defined %}
+images_type=rbd
+images_rbd_pool={{ compute.ceph.rbd_pool }}
+images_rbd_ceph_conf=/etc/ceph/ceph.conf
+rbd_user={{ compute.ceph.rbd_user }}
+rbd_secret_uuid={{ compute.ceph.secret_uuid }}
+libvirt_inject_password=false
+libvirt_inject_key=false
+{%- endif %}
+
+{%- if compute.get('libvirt', {}).uri is defined %}
+connection_uri={{ compute.libvirt.uri }}
+{%- endif %}
+
+[keystone_authtoken]
+signing_dirname=/tmp/keystone-signing-nova
+revocation_cache_time = 10
+auth_type = password
+user_domain_id = {{ compute.identity.get('domain', 'default') }}
+project_domain_id = {{ compute.identity.get('domain', 'default') }}
+project_name = {{ compute.identity.tenant }}
+username = {{ compute.identity.user }}
+password = {{ compute.identity.password }}
+auth_uri=http://{{ compute.identity.host }}:5000
+auth_url=http://{{ compute.identity.host }}:35357
+{%- if compute.cache is defined %}
+memcached_servers={%- for member in compute.cache.members %}{{ member.host }}:11211{% if not loop.last %},{% endif %}{%- endfor %}
+{%- endif %}
+
+[oslo_messaging_rabbit]
+
+
+[glance]
+api_servers={{ compute.image.host }}:9292
+host={{ compute.image.host }}
+
+[neutron]
+username={{ compute.network.user }}
+password={{ compute.network.password }}
+project_name={{ compute.identity.tenant }}
+auth_url=http://{{ compute.identity.host }}:{{ compute.identity.port }}/v3
+url=http://{{ compute.network.host }}:{{ compute.network.port }}
+region_name= {{ compute.network.region }}
+extension_sync_interval={{ compute.network.get('extension_sync_interval', '600') }}
+auth_type = v3password
+project_domain_name = Default
+user_domain_name = Default
+timeout=30
+
+[cinder]
+os_region_name = {{ compute.identity.region }}
+catalog_info=volumev2:cinderv2:internalURL
+
diff --git a/nova/files/newton/nova-compute.conf.RedHat b/nova/files/newton/nova-compute.conf.RedHat
new file mode 120000
index 0000000..ad4c8f6
--- /dev/null
+++ b/nova/files/newton/nova-compute.conf.RedHat
@@ -0,0 +1 @@
+nova-compute.conf.Debian
\ No newline at end of file
diff --git a/nova/files/newton/nova-controller.conf.Debian b/nova/files/newton/nova-controller.conf.Debian
new file mode 100644
index 0000000..8b73d84
--- /dev/null
+++ b/nova/files/newton/nova-controller.conf.Debian
@@ -0,0 +1,216 @@
+{%- from "nova/map.jinja" import controller with context %}
+[DEFAULT]
+verbose = True
+log-dir = /var/log/nova
+{%- if controller.debug %}
+debug = True
+{%- else %}
+debug = False
+{%- endif %}
+state_path = /var/lib/nova
+volumes_dir = /etc/nova/volumes
+dhcpbridge = /usr/bin/nova-dhcpbridge
+dhcpbridge_flagfile = /etc/nova/nova.conf
+force_dhcp_release = True
+injected_network_template = /usr/share/nova/interfaces.template
+libvirt_nonblocking = True
+vif_plugging_is_fatal = False
+vif_plugging_timeout = 0
+cpu_allocation_ratio = {{ controller.cpu_allocation_ratio }}
+ram_allocation_ratio = {{ controller.ram_allocation_ratio }}
+disk_allocation_ratio = {{ controller.disk_allocation_ratio }}
+scheduler_default_filters = {{ controller.scheduler_default_filters }}
+scheduler_available_filters = nova.scheduler.filters.all_filters
+scheduler_available_filters = nova.scheduler.filters.pci_passthrough_filter.PciPassthroughFilter
+scheduler_driver = filter_scheduler
+scheduler_use_baremetal_filters = False
+allow_resize_to_same_host = True
+osapi_max_limit = {{ controller.osapi_max_limit|default('1000') }}
+notify_api_faults=False
+iscsi_helper=tgtadm
+connection_type=libvirt
+root_helper=sudo nova-rootwrap /etc/nova/rootwrap.conf
+api_paste_config=/etc/nova/api-paste.ini
+max_age=0
+scheduler_max_attempts=3
+max_io_ops_per_host=8
+max_instances_per_host=50
+scheduler_host_manager=host_manager
+use_forwarded_for=False
+reservation_expire=86400
+compute_driver = nova.virt.libvirt.LibvirtDriver
+rootwrap_config = /etc/nova/rootwrap.conf
+auth_strategy = keystone
+firewall_driver=nova.virt.firewall.NoopFirewallDriver
+enabled_apis = osapi_compute,metadata
+image_service=nova.image.glance.GlanceImageService
+until_refresh=0
+scheduler_host_subset_size=30
+my_ip={{ controller.bind.private_address }}
+fping_path=/usr/bin/fping
+
+
+use_neutron = True
+firewall_driver = nova.virt.firewall.NoopFirewallDriver
+
+osapi_volume_listen={{ controller.bind.private_address }}
+osapi_compute_listen={{ controller.bind.private_address }}
+metadata_listen={{ controller.bind.private_address }}
+glance_host = {{ controller.glance.host }}
+osapi_compute_workers = {{ controller.workers }}
+metadata_workers = {{ controller.workers }}
+
+
+allow_resize_to_same_host=True
+start_guests_on_host_boot=true
+
+rpc_cast_timeout = 30
+rpc_response_timeout = 3600
+rpc_thread_pool_size = 70
+report_interval = 5
+
+block_device_allocate_retries=600
+block_device_allocate_retries_interval=10
+
+{%- if controller.message_queue.members is defined %}
+transport_url = rabbit://{% for member in controller.message_queue.members -%}
+ {{ controller.message_queue.user }}:{{ controller.message_queue.password }}@{{ member.host }}:{{ member.get('port', 5672) }}
+ {%- if not loop.last -%},{%- endif -%}
+ {%- endfor -%}
+ {{ controller.message_queue.virtual_host }}
+{%- else %}
+transport_url = rabbit://{{ controller.message_queue.user }}:{{ controller.message_queue.password }}@{{ controller.message_queue.host }}:{{ member.get('port', 5672) }}{{ controller.message_queue.virtual_host }}
+{%- endif %}
+
+rpc_backend=rabbit
+
+[vnc]
+enabled = true
+novncproxy_host = {{ controller.bind.get('novncproxy_address', '0.0.0.0') }}
+novncproxy_base_url = {{ controller.vncproxy_url }}/vnc_auto.html
+novncproxy_port={{ controller.bind.get('vncproxy_port', '6080') }}
+{%- if pillar.nova.compute is defined %}
+vncserver_listen={{ controller.bind.private_address }}
+vncserver_proxyclient_address={{ controller.bind.private_address }}
+{%- else %}
+vncserver_listen={{ controller.bind.get('novncproxy_address', '0.0.0.0') }}
+{%- endif %}
+keymap = {{ controller.get('vnc_keymap', 'en-us') }}
+
+[spice]
+enabled = false
+html5proxy_base_url = {{ controller.vncproxy_url }}/spice_auto.html
+
+[libvirt]
+inject_partition = -1
+use_usb_tablet = True
+cpu_mode = host-passthrough
+virt_type = kvm
+use_virtio_for_bridges = True
+
+[oslo_concurrency]
+
+lock_path = /var/lib/nova/tmp
+
+[oslo_messaging_notifications]
+{%- if controller.notification is mapping %}
+driver = {{ controller.notification.get('driver', 'messagingv2') }}
+{%- if controller.notification.topics is defined %}
+topics = {{ controller.notification.topics }}
+{%- endif %}
+{%- elif controller.notification %}
+driver=messagingv2
+{%- endif %}
+
+[oslo_messaging_rabbit]
+rabbit_retry_interval = 1
+rabbit_retry_backoff = 2
+rpc_conn_pool_size = 300
+
+[cache]
+{%- if controller.cache is defined %}
+enabled = true
+backend = oslo_cache.memcache_pool
+memcache_servers={%- for member in controller.cache.members %}{{ member.host }}:11211{% if not loop.last %},{% endif %}{%- endfor %}
+{%- endif %}
+
+[keystone_authtoken]
+revocation_cache_time = 10
+signing_dir=/tmp/keystone-signing-nova
+auth_type = password
+user_domain_id = {{ controller.identity.get('domain', 'default') }}
+project_domain_id = {{ controller.identity.get('domain', 'default') }}
+project_name = {{ controller.identity.tenant }}
+username = {{ controller.identity.user }}
+password = {{ controller.identity.password }}
+auth_uri=http://{{ controller.identity.host }}:5000
+auth_url=http://{{ controller.identity.host }}:35357
+{%- if controller.cache is defined %}
+memcached_servers={%- for member in controller.cache.members %}{{ member.host }}:11211{% if not loop.last %},{% endif %}{%- endfor %}
+{%- endif %}
+
+[conductor]
+workers = {{ controller.workers }}
+
+[database]
+idle_timeout = 180
+min_pool_size = 100
+max_pool_size = 700
+max_overflow = 100
+retry_interval = 5
+max_retries = -1
+db_max_retries = 3
+db_retry_interval = 1
+connection_debug = 10
+pool_timeout = 120
+connection = {{ controller.database.engine }}+pymysql://{{ controller.database.user }}:{{ controller.database.password }}@{{ controller.database.host }}/{{ controller.database.name }}?charset=utf8&read_timeout=60
+
+[oslo_middleware]
+enable_proxy_headers_parsing=True
+
+[api_database]
+idle_timeout = 180
+min_pool_size = 100
+max_pool_size = 700
+max_overflow = 100
+retry_interval = 5
+max_retries = -1
+db_max_retries = 3
+db_retry_interval = 1
+connection_debug = 10
+pool_timeout = 120
+connection = {{ controller.database.engine }}+pymysql://{{ controller.database.user }}:{{ controller.database.password }}@{{ controller.database.host }}/{{ controller.database.name }}_api?charset=utf8&read_timeout=60
+
+[glance]
+
+api_servers = {{ controller.glance.host }}
+
+[neutron]
+auth_type=v3password
+project_domain_name = Default
+user_domain_name = Default
+auth_url = http://{{ controller.identity.host }}:35357/v3
+{% if pillar.neutron is defined %}
+password={{ pillar.neutron.server.identity.password }}
+project_name={{ pillar.neutron.server.identity.tenant }}
+username={{ pillar.neutron.server.identity.user }}
+region_name= {{ pillar.neutron.server.identity.region }}
+{%- else %}
+password={{ controller.network.password }}
+project_name={{ controller.network.tenant }}
+username={{ controller.network.user }}
+region_name= {{ controller.network.region }}
+{%- endif %}
+url=http://{{ controller.network.host }}:{{ controller.network.port }}
+
+{%- if controller.get('networking', 'default') != "contrail" %}
+metadata_proxy_shared_secret={{ controller.metadata.password }}
+{%- endif %}
+service_metadata_proxy=True
+
+[cinder]
+os_region_name = {{ controller.identity.region }}
+catalog_info=volumev2:cinderv2:internalURL
+
+[wsgi]
+api_paste_config=/etc/nova/api-paste.ini
diff --git a/nova/files/newton/nova-controller.conf.RedHat b/nova/files/newton/nova-controller.conf.RedHat
new file mode 100644
index 0000000..8b73d84
--- /dev/null
+++ b/nova/files/newton/nova-controller.conf.RedHat
@@ -0,0 +1,216 @@
+{%- from "nova/map.jinja" import controller with context %}
+[DEFAULT]
+verbose = True
+log-dir = /var/log/nova
+{%- if controller.debug %}
+debug = True
+{%- else %}
+debug = False
+{%- endif %}
+state_path = /var/lib/nova
+volumes_dir = /etc/nova/volumes
+dhcpbridge = /usr/bin/nova-dhcpbridge
+dhcpbridge_flagfile = /etc/nova/nova.conf
+force_dhcp_release = True
+injected_network_template = /usr/share/nova/interfaces.template
+libvirt_nonblocking = True
+vif_plugging_is_fatal = False
+vif_plugging_timeout = 0
+cpu_allocation_ratio = {{ controller.cpu_allocation_ratio }}
+ram_allocation_ratio = {{ controller.ram_allocation_ratio }}
+disk_allocation_ratio = {{ controller.disk_allocation_ratio }}
+scheduler_default_filters = {{ controller.scheduler_default_filters }}
+scheduler_available_filters = nova.scheduler.filters.all_filters
+scheduler_available_filters = nova.scheduler.filters.pci_passthrough_filter.PciPassthroughFilter
+scheduler_driver = filter_scheduler
+scheduler_use_baremetal_filters = False
+allow_resize_to_same_host = True
+osapi_max_limit = {{ controller.osapi_max_limit|default('1000') }}
+notify_api_faults=False
+iscsi_helper=tgtadm
+connection_type=libvirt
+root_helper=sudo nova-rootwrap /etc/nova/rootwrap.conf
+api_paste_config=/etc/nova/api-paste.ini
+max_age=0
+scheduler_max_attempts=3
+max_io_ops_per_host=8
+max_instances_per_host=50
+scheduler_host_manager=host_manager
+use_forwarded_for=False
+reservation_expire=86400
+compute_driver = nova.virt.libvirt.LibvirtDriver
+rootwrap_config = /etc/nova/rootwrap.conf
+auth_strategy = keystone
+firewall_driver=nova.virt.firewall.NoopFirewallDriver
+enabled_apis = osapi_compute,metadata
+image_service=nova.image.glance.GlanceImageService
+until_refresh=0
+scheduler_host_subset_size=30
+my_ip={{ controller.bind.private_address }}
+fping_path=/usr/bin/fping
+
+
+use_neutron = True
+firewall_driver = nova.virt.firewall.NoopFirewallDriver
+
+osapi_volume_listen={{ controller.bind.private_address }}
+osapi_compute_listen={{ controller.bind.private_address }}
+metadata_listen={{ controller.bind.private_address }}
+glance_host = {{ controller.glance.host }}
+osapi_compute_workers = {{ controller.workers }}
+metadata_workers = {{ controller.workers }}
+
+
+allow_resize_to_same_host=True
+start_guests_on_host_boot=true
+
+rpc_cast_timeout = 30
+rpc_response_timeout = 3600
+rpc_thread_pool_size = 70
+report_interval = 5
+
+block_device_allocate_retries=600
+block_device_allocate_retries_interval=10
+
+{%- if controller.message_queue.members is defined %}
+transport_url = rabbit://{% for member in controller.message_queue.members -%}
+ {{ controller.message_queue.user }}:{{ controller.message_queue.password }}@{{ member.host }}:{{ member.get('port', 5672) }}
+ {%- if not loop.last -%},{%- endif -%}
+ {%- endfor -%}
+ {{ controller.message_queue.virtual_host }}
+{%- else %}
+transport_url = rabbit://{{ controller.message_queue.user }}:{{ controller.message_queue.password }}@{{ controller.message_queue.host }}:{{ member.get('port', 5672) }}{{ controller.message_queue.virtual_host }}
+{%- endif %}
+
+rpc_backend=rabbit
+
+[vnc]
+enabled = true
+novncproxy_host = {{ controller.bind.get('novncproxy_address', '0.0.0.0') }}
+novncproxy_base_url = {{ controller.vncproxy_url }}/vnc_auto.html
+novncproxy_port={{ controller.bind.get('vncproxy_port', '6080') }}
+{%- if pillar.nova.compute is defined %}
+vncserver_listen={{ controller.bind.private_address }}
+vncserver_proxyclient_address={{ controller.bind.private_address }}
+{%- else %}
+vncserver_listen={{ controller.bind.get('novncproxy_address', '0.0.0.0') }}
+{%- endif %}
+keymap = {{ controller.get('vnc_keymap', 'en-us') }}
+
+[spice]
+enabled = false
+html5proxy_base_url = {{ controller.vncproxy_url }}/spice_auto.html
+
+[libvirt]
+inject_partition = -1
+use_usb_tablet = True
+cpu_mode = host-passthrough
+virt_type = kvm
+use_virtio_for_bridges = True
+
+[oslo_concurrency]
+
+lock_path = /var/lib/nova/tmp
+
+[oslo_messaging_notifications]
+{%- if controller.notification is mapping %}
+driver = {{ controller.notification.get('driver', 'messagingv2') }}
+{%- if controller.notification.topics is defined %}
+topics = {{ controller.notification.topics }}
+{%- endif %}
+{%- elif controller.notification %}
+driver=messagingv2
+{%- endif %}
+
+[oslo_messaging_rabbit]
+rabbit_retry_interval = 1
+rabbit_retry_backoff = 2
+rpc_conn_pool_size = 300
+
+[cache]
+{%- if controller.cache is defined %}
+enabled = true
+backend = oslo_cache.memcache_pool
+memcache_servers={%- for member in controller.cache.members %}{{ member.host }}:11211{% if not loop.last %},{% endif %}{%- endfor %}
+{%- endif %}
+
+[keystone_authtoken]
+revocation_cache_time = 10
+signing_dir=/tmp/keystone-signing-nova
+auth_type = password
+user_domain_id = {{ controller.identity.get('domain', 'default') }}
+project_domain_id = {{ controller.identity.get('domain', 'default') }}
+project_name = {{ controller.identity.tenant }}
+username = {{ controller.identity.user }}
+password = {{ controller.identity.password }}
+auth_uri=http://{{ controller.identity.host }}:5000
+auth_url=http://{{ controller.identity.host }}:35357
+{%- if controller.cache is defined %}
+memcached_servers={%- for member in controller.cache.members %}{{ member.host }}:11211{% if not loop.last %},{% endif %}{%- endfor %}
+{%- endif %}
+
+[conductor]
+workers = {{ controller.workers }}
+
+[database]
+idle_timeout = 180
+min_pool_size = 100
+max_pool_size = 700
+max_overflow = 100
+retry_interval = 5
+max_retries = -1
+db_max_retries = 3
+db_retry_interval = 1
+connection_debug = 10
+pool_timeout = 120
+connection = {{ controller.database.engine }}+pymysql://{{ controller.database.user }}:{{ controller.database.password }}@{{ controller.database.host }}/{{ controller.database.name }}?charset=utf8&read_timeout=60
+
+[oslo_middleware]
+enable_proxy_headers_parsing=True
+
+[api_database]
+idle_timeout = 180
+min_pool_size = 100
+max_pool_size = 700
+max_overflow = 100
+retry_interval = 5
+max_retries = -1
+db_max_retries = 3
+db_retry_interval = 1
+connection_debug = 10
+pool_timeout = 120
+connection = {{ controller.database.engine }}+pymysql://{{ controller.database.user }}:{{ controller.database.password }}@{{ controller.database.host }}/{{ controller.database.name }}_api?charset=utf8&read_timeout=60
+
+[glance]
+
+api_servers = {{ controller.glance.host }}
+
+[neutron]
+auth_type=v3password
+project_domain_name = Default
+user_domain_name = Default
+auth_url = http://{{ controller.identity.host }}:35357/v3
+{% if pillar.neutron is defined %}
+password={{ pillar.neutron.server.identity.password }}
+project_name={{ pillar.neutron.server.identity.tenant }}
+username={{ pillar.neutron.server.identity.user }}
+region_name= {{ pillar.neutron.server.identity.region }}
+{%- else %}
+password={{ controller.network.password }}
+project_name={{ controller.network.tenant }}
+username={{ controller.network.user }}
+region_name= {{ controller.network.region }}
+{%- endif %}
+url=http://{{ controller.network.host }}:{{ controller.network.port }}
+
+{%- if controller.get('networking', 'default') != "contrail" %}
+metadata_proxy_shared_secret={{ controller.metadata.password }}
+{%- endif %}
+service_metadata_proxy=True
+
+[cinder]
+os_region_name = {{ controller.identity.region }}
+catalog_info=volumev2:cinderv2:internalURL
+
+[wsgi]
+api_paste_config=/etc/nova/api-paste.ini
diff --git a/nova/files/newton/qemu.conf.Debian b/nova/files/newton/qemu.conf.Debian
new file mode 100644
index 0000000..5b039c4
--- /dev/null
+++ b/nova/files/newton/qemu.conf.Debian
@@ -0,0 +1,486 @@
+{%- from "nova/map.jinja" import compute with context %}
+# Master configuration file for the QEMU driver.
+# All settings described here are optional - if omitted, sensible
+# defaults are used.
+
+# VNC is configured to listen on 127.0.0.1 by default.
+# To make it listen on all public interfaces, uncomment
+# this next option.
+#
+# NB, strong recommendation to enable TLS + x509 certificate
+# verification when allowing public access
+#
+#vnc_listen = "0.0.0.0"
+
+# Enable this option to have VNC served over an automatically created
+# unix socket. This prevents unprivileged access from users on the
+# host machine, though most VNC clients do not support it.
+#
+# This will only be enabled for VNC configurations that do not have
+# a hardcoded 'listen' or 'socket' value. This setting takes preference
+# over vnc_listen.
+#
+#vnc_auto_unix_socket = 1
+
+# Enable use of TLS encryption on the VNC server. This requires
+# a VNC client which supports the VeNCrypt protocol extension.
+# Examples include vinagre, virt-viewer, virt-manager and vencrypt
+# itself. UltraVNC, RealVNC, TightVNC do not support this
+#
+# It is necessary to setup CA and issue a server certificate
+# before enabling this.
+#
+#vnc_tls = 1
+
+
+# Use of TLS requires that x509 certificates be issued. The
+# default it to keep them in /etc/pki/libvirt-vnc. This directory
+# must contain
+#
+# ca-cert.pem - the CA master certificate
+# server-cert.pem - the server certificate signed with ca-cert.pem
+# server-key.pem - the server private key
+#
+# This option allows the certificate directory to be changed
+#
+#vnc_tls_x509_cert_dir = "/etc/pki/libvirt-vnc"
+
+
+# The default TLS configuration only uses certificates for the server
+# allowing the client to verify the server's identity and establish
+# an encrypted channel.
+#
+# It is possible to use x509 certificates for authentication too, by
+# issuing a x509 certificate to every client who needs to connect.
+#
+# Enabling this option will reject any client who does not have a
+# certificate signed by the CA in /etc/pki/libvirt-vnc/ca-cert.pem
+#
+#vnc_tls_x509_verify = 1
+
+
+# The default VNC password. Only 8 bytes are significant for
+# VNC passwords. This parameter is only used if the per-domain
+# XML config does not already provide a password. To allow
+# access without passwords, leave this commented out. An empty
+# string will still enable passwords, but be rejected by QEMU,
+# effectively preventing any use of VNC. Obviously change this
+# example here before you set this.
+#
+#vnc_password = "XYZ12345"
+
+
+# Enable use of SASL encryption on the VNC server. This requires
+# a VNC client which supports the SASL protocol extension.
+# Examples include vinagre, virt-viewer and virt-manager
+# itself. UltraVNC, RealVNC, TightVNC do not support this
+#
+# It is necessary to configure /etc/sasl2/qemu.conf to choose
+# the desired SASL plugin (eg, GSSPI for Kerberos)
+#
+#vnc_sasl = 1
+
+
+# The default SASL configuration file is located in /etc/sasl2/
+# When running libvirtd unprivileged, it may be desirable to
+# override the configs in this location. Set this parameter to
+# point to the directory, and create a qemu.conf in that location
+#
+#vnc_sasl_dir = "/some/directory/sasl2"
+
+
+# QEMU implements an extension for providing audio over a VNC connection,
+# though if your VNC client does not support it, your only chance for getting
+# sound output is through regular audio backends. By default, libvirt will
+# disable all QEMU sound backends if using VNC, since they can cause
+# permissions issues. Enabling this option will make libvirtd honor the
+# QEMU_AUDIO_DRV environment variable when using VNC.
+#
+#vnc_allow_host_audio = 0
+
+
+
+# SPICE is configured to listen on 127.0.0.1 by default.
+# To make it listen on all public interfaces, uncomment
+# this next option.
+#
+# NB, strong recommendation to enable TLS + x509 certificate
+# verification when allowing public access
+#
+#spice_listen = "0.0.0.0"
+
+
+# Enable use of TLS encryption on the SPICE server.
+#
+# It is necessary to setup CA and issue a server certificate
+# before enabling this.
+#
+#spice_tls = 1
+
+
+# Use of TLS requires that x509 certificates be issued. The
+# default it to keep them in /etc/pki/libvirt-spice. This directory
+# must contain
+#
+# ca-cert.pem - the CA master certificate
+# server-cert.pem - the server certificate signed with ca-cert.pem
+# server-key.pem - the server private key
+#
+# This option allows the certificate directory to be changed.
+#
+#spice_tls_x509_cert_dir = "/etc/pki/libvirt-spice"
+
+
+# The default SPICE password. This parameter is only used if the
+# per-domain XML config does not already provide a password. To
+# allow access without passwords, leave this commented out. An
+# empty string will still enable passwords, but be rejected by
+# QEMU, effectively preventing any use of SPICE. Obviously change
+# this example here before you set this.
+#
+#spice_password = "XYZ12345"
+
+
+# Enable use of SASL encryption on the SPICE server. This requires
+# a SPICE client which supports the SASL protocol extension.
+#
+# It is necessary to configure /etc/sasl2/qemu.conf to choose
+# the desired SASL plugin (eg, GSSPI for Kerberos)
+#
+#spice_sasl = 1
+
+# The default SASL configuration file is located in /etc/sasl2/
+# When running libvirtd unprivileged, it may be desirable to
+# override the configs in this location. Set this parameter to
+# point to the directory, and create a qemu.conf in that location
+#
+#spice_sasl_dir = "/some/directory/sasl2"
+
+
+# By default, if no graphical front end is configured, libvirt will disable
+# QEMU audio output since directly talking to alsa/pulseaudio may not work
+# with various security settings. If you know what you're doing, enable
+# the setting below and libvirt will passthrough the QEMU_AUDIO_DRV
+# environment variable when using nographics.
+#
+#nographics_allow_host_audio = 1
+
+
+# Override the port for creating both VNC and SPICE sessions (min).
+# This defaults to 5900 and increases for consecutive sessions
+# or when ports are occupied, until it hits the maximum.
+#
+# Minimum must be greater than or equal to 5900 as lower number would
+# result into negative vnc display number.
+#
+# Maximum must be less than 65536, because higher numbers do not make
+# sense as a port number.
+#
+#remote_display_port_min = 5900
+#remote_display_port_max = 65535
+
+# VNC WebSocket port policies, same rules apply as with remote display
+# ports. VNC WebSockets use similar display <-> port mappings, with
+# the exception being that ports start from 5700 instead of 5900.
+#
+#remote_websocket_port_min = 5700
+#remote_websocket_port_max = 65535
+
+# The default security driver is SELinux. If SELinux is disabled
+# on the host, then the security driver will automatically disable
+# itself. If you wish to disable QEMU SELinux security driver while
+# leaving SELinux enabled for the host in general, then set this
+# to 'none' instead. It's also possible to use more than one security
+# driver at the same time, for this use a list of names separated by
+# comma and delimited by square brackets. For example:
+#
+# security_driver = [ "selinux", "apparmor" ]
+#
+# Notes: The DAC security driver is always enabled; as a result, the
+# value of security_driver cannot contain "dac". The value "none" is
+# a special value; security_driver can be set to that value in
+# isolation, but it cannot appear in a list of drivers.
+#
+#security_driver = "selinux"
+
+# If set to non-zero, then the default security labeling
+# will make guests confined. If set to zero, then guests
+# will be unconfined by default. Defaults to 1.
+#security_default_confined = 1
+
+# If set to non-zero, then attempts to create unconfined
+# guests will be blocked. Defaults to 0.
+#security_require_confined = 1
+
+# The user for QEMU processes run by the system instance. It can be
+# specified as a user name or as a user id. The qemu driver will try to
+# parse this value first as a name and then, if the name doesn't exist,
+# as a user id.
+#
+# Since a sequence of digits is a valid user name, a leading plus sign
+# can be used to ensure that a user id will not be interpreted as a user
+# name.
+#
+# Some examples of valid values are:
+#
+# user = "qemu" # A user named "qemu"
+# user = "+0" # Super user (uid=0)
+# user = "100" # A user named "100" or a user with uid=100
+#
+#user = "root"
+
+# The group for QEMU processes run by the system instance. It can be
+# specified in a similar way to user.
+#group = "root"
+
+# Whether libvirt should dynamically change file ownership
+# to match the configured user/group above. Defaults to 1.
+# Set to 0 to disable file ownership changes.
+#dynamic_ownership = 1
+
+
+# What cgroup controllers to make use of with QEMU guests
+#
+# - 'cpu' - use for schedular tunables
+# - 'devices' - use for device whitelisting
+# - 'memory' - use for memory tunables
+# - 'blkio' - use for block devices I/O tunables
+# - 'cpuset' - use for CPUs and memory nodes
+# - 'cpuacct' - use for CPUs statistics.
+#
+# NB, even if configured here, they won't be used unless
+# the administrator has mounted cgroups, e.g.:
+#
+# mkdir /dev/cgroup
+# mount -t cgroup -o devices,cpu,memory,blkio,cpuset none /dev/cgroup
+#
+# They can be mounted anywhere, and different controllers
+# can be mounted in different locations. libvirt will detect
+# where they are located.
+#
+#cgroup_controllers = [ "cpu", "devices", "memory", "blkio", "cpuset", "cpuacct" ]
+
+# This is the basic set of devices allowed / required by
+# all virtual machines.
+#
+# As well as this, any configured block backed disks,
+# all sound device, and all PTY devices are allowed.
+#
+# This will only need setting if newer QEMU suddenly
+# wants some device we don't already know about.
+#
+#cgroup_device_acl = [
+# "/dev/null", "/dev/full", "/dev/zero",
+# "/dev/random", "/dev/urandom",
+# "/dev/ptmx", "/dev/kvm", "/dev/kqemu",
+# "/dev/rtc","/dev/hpet", "/dev/vfio/vfio"
+#]
+
+
+# The default format for Qemu/KVM guest save images is raw; that is, the
+# memory from the domain is dumped out directly to a file. If you have
+# guests with a large amount of memory, however, this can take up quite
+# a bit of space. If you would like to compress the images while they
+# are being saved to disk, you can also set "lzop", "gzip", "bzip2", or "xz"
+# for save_image_format. Note that this means you slow down the process of
+# saving a domain in order to save disk space; the list above is in descending
+# order by performance and ascending order by compression ratio.
+#
+# save_image_format is used when you use 'virsh save' or 'virsh managedsave'
+# at scheduled saving, and it is an error if the specified save_image_format
+# is not valid, or the requested compression program can't be found.
+#
+# dump_image_format is used when you use 'virsh dump' at emergency
+# crashdump, and if the specified dump_image_format is not valid, or
+# the requested compression program can't be found, this falls
+# back to "raw" compression.
+#
+# snapshot_image_format specifies the compression algorithm of the memory save
+# image when an external snapshot of a domain is taken. This does not apply
+# on disk image format. It is an error if the specified format isn't valid,
+# or the requested compression program can't be found.
+#
+#save_image_format = "raw"
+#dump_image_format = "raw"
+#snapshot_image_format = "raw"
+
+# When a domain is configured to be auto-dumped when libvirtd receives a
+# watchdog event from qemu guest, libvirtd will save dump files in directory
+# specified by auto_dump_path. Default value is /var/lib/libvirt/qemu/dump
+#
+#auto_dump_path = "/var/lib/libvirt/qemu/dump"
+
+# When a domain is configured to be auto-dumped, enabling this flag
+# has the same effect as using the VIR_DUMP_BYPASS_CACHE flag with the
+# virDomainCoreDump API. That is, the system will avoid using the
+# file system cache while writing the dump file, but may cause
+# slower operation.
+#
+#auto_dump_bypass_cache = 0
+
+# When a domain is configured to be auto-started, enabling this flag
+# has the same effect as using the VIR_DOMAIN_START_BYPASS_CACHE flag
+# with the virDomainCreateWithFlags API. That is, the system will
+# avoid using the file system cache when restoring any managed state
+# file, but may cause slower operation.
+#
+#auto_start_bypass_cache = 0
+
+# If provided by the host and a hugetlbfs mount point is configured,
+# a guest may request huge page backing. When this mount point is
+# unspecified here, determination of a host mount point in /proc/mounts
+# will be attempted. Specifying an explicit mount overrides detection
+# of the same in /proc/mounts. Setting the mount point to "" will
+# disable guest hugepage backing.
+#
+# NB, within this mount point, guests will create memory backing files
+# in a location of $MOUNTPOINT/libvirt/qemu
+#
+#hugetlbfs_mount = "/dev/hugepages"
+#hugetlbfs_mount = ["/run/hugepages/kvm", "/mnt/hugepages_1GB"]
+{%- if compute.hugepages is defined %}
+hugetlbfs_mount = [{%- for mount in compute.hugepages.mount_points %}"{{ mount.path }}"{% if not loop.last %}, {% endif %}{%- endfor %}]
+security_driver="none"
+{%- endif %}
+
+# Path to the setuid helper for creating tap devices. This executable
+# is used to create <source type='bridge'> interfaces when libvirtd is
+# running unprivileged. libvirt invokes the helper directly, instead
+# of using "-netdev bridge", for security reasons.
+#bridge_helper = "/usr/libexec/qemu-bridge-helper"
+
+
+
+# If clear_emulator_capabilities is enabled, libvirt will drop all
+# privileged capabilities of the QEmu/KVM emulator. This is enabled by
+# default.
+#
+# Warning: Disabling this option means that a compromised guest can
+# exploit the privileges and possibly do damage to the host.
+#
+#clear_emulator_capabilities = 1
+
+
+# If enabled, libvirt will have QEMU set its process name to
+# "qemu:VM_NAME", where VM_NAME is the name of the VM. The QEMU
+# process will appear as "qemu:VM_NAME" in process listings and
+# other system monitoring tools. By default, QEMU does not set
+# its process title, so the complete QEMU command (emulator and
+# its arguments) appear in process listings.
+#
+#set_process_name = 1
+
+
+# If max_processes is set to a positive integer, libvirt will use
+# it to set the maximum number of processes that can be run by qemu
+# user. This can be used to override default value set by host OS.
+# The same applies to max_files which sets the limit on the maximum
+# number of opened files.
+#
+{%- if compute.qemu is defined %}
+
+{%- if compute.qemu.max_processes is defined %}
+max_processes = {{ compute.qemu.max_processes }}
+{%- endif %}
+
+{%- if compute.qemu.max_files is defined %}
+max_files = {{ compute.qemu.max_files }}
+{%- endif %}
+
+{%- endif %}
+
+# mac_filter enables MAC addressed based filtering on bridge ports.
+# This currently requires ebtables to be installed.
+#
+#mac_filter = 1
+
+
+# By default, PCI devices below non-ACS switch are not allowed to be assigned
+# to guests. By setting relaxed_acs_check to 1 such devices will be allowed to
+# be assigned to guests.
+#
+#relaxed_acs_check = 1
+
+
+# If allow_disk_format_probing is enabled, libvirt will probe disk
+# images to attempt to identify their format, when not otherwise
+# specified in the XML. This is disabled by default.
+#
+# WARNING: Enabling probing is a security hole in almost all
+# deployments. It is strongly recommended that users update their
+# guest XML <disk> elements to include <driver type='XXXX'/>
+# elements instead of enabling this option.
+#
+#allow_disk_format_probing = 1
+
+
+# To enable 'Sanlock' project based locking of the file
+# content (to prevent two VMs writing to the same
+# disk), uncomment this
+#
+#lock_manager = "sanlock"
+
+
+
+# Set limit of maximum APIs queued on one domain. All other APIs
+# over this threshold will fail on acquiring job lock. Specially,
+# setting to zero turns this feature off.
+# Note, that job lock is per domain.
+#
+#max_queued = 0
+
+###################################################################
+# Keepalive protocol:
+# This allows qemu driver to detect broken connections to remote
+# libvirtd during peer-to-peer migration. A keepalive message is
+# sent to the daemon after keepalive_interval seconds of inactivity
+# to check if the daemon is still responding; keepalive_count is a
+# maximum number of keepalive messages that are allowed to be sent
+# to the daemon without getting any response before the connection
+# is considered broken. In other words, the connection is
+# automatically closed approximately after
+# keepalive_interval * (keepalive_count + 1) seconds since the last
+# message received from the daemon. If keepalive_interval is set to
+# -1, qemu driver will not send keepalive requests during
+# peer-to-peer migration; however, the remote libvirtd can still
+# send them and source libvirtd will send responses. When
+# keepalive_count is set to 0, connections will be automatically
+# closed after keepalive_interval seconds of inactivity without
+# sending any keepalive messages.
+#
+#keepalive_interval = 5
+#keepalive_count = 5
+
+
+
+# Use seccomp syscall whitelisting in QEMU.
+# 1 = on, 0 = off, -1 = use QEMU default
+# Defaults to -1.
+#
+#seccomp_sandbox = 1
+
+
+
+# Override the listen address for all incoming migrations. Defaults to
+# 0.0.0.0, or :: if both host and qemu are capable of IPv6.
+#migration_address = "127.0.0.1"
+
+
+# Override the port range used for incoming migrations.
+#
+# Minimum must be greater than 0, however when QEMU is not running as root,
+# setting the minimum to be lower than 1024 will not work.
+#
+# Maximum must not be greater than 65535.
+#
+#migration_port_min = 49152
+#migration_port_max = 49215
+cgroup_device_acl = [
+ "/dev/null", "/dev/full", "/dev/zero",
+ "/dev/random", "/dev/urandom",
+ "/dev/ptmx", "/dev/kvm", "/dev/kqemu",
+ "/dev/rtc", "/dev/hpet","/dev/net/tun",
+ {%- if compute.get('sriov', false) %}
+ "/dev/vfio/vfio",
+ {% endif %}
+]
diff --git a/nova/files/newton/qemu.conf.RedHat b/nova/files/newton/qemu.conf.RedHat
new file mode 120000
index 0000000..1d23f19
--- /dev/null
+++ b/nova/files/newton/qemu.conf.RedHat
@@ -0,0 +1 @@
+qemu.conf.Debian
\ No newline at end of file